content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# MIT License, Copyright (c) 2020 Bob van den Heuvel
# https://github.com/bheuvel/transip/blob/main/LICENSE
"""Interface with the TransIP API, specifically DNS record management."""
import logging
from enum import Enum
from pathlib import Path
from time import sleep
from typing import Dict, Union
import requests
from transip_dns import __project__, __version__
from transip_dns.accesstoken import AccessToken
logger = logging.getLogger(__name__)
DNS_RECORD_TYPES = ["A", "AAAA", "CNAME", "MX", "NS", "TXT", "SRV", "SSHFP", "TLSA"]
class DnsEntry(object):
"""Class matching the TransIP dnsEntry."""
def __init__(
self,
content: str = None,
expire: int = None,
name: str = None,
rtype: str = None,
):
"""Initialize the DnsEntry object.
Closely represent the TransIP dnsEntry object
:param content: content (rdata) corresponding to the record type
(e.g. ip), defaults to None
:type content: str, optional
:param expire: Time To Live (TTL) of the record, defaults to None
:type expire: int, optional
:param name: name of the record, defaults to None
:type name: str, optional
:param rtype: one of the (allowed) record types (see DNS_RECORD_TYPES),
defaults to None
:type rtype: str, optional
"""
self.content = content
self.expire = expire
self.name = name
self.rtype = None
self.rtype = rtype
def __repr__(self) -> str:
"""Represent the TransIP definition of a dnsEntry object.
The dnsEntry object is specified as a JSON object
:return: JSON representation of the record according to the dnsEntry
:rtype: str
"""
return {
"dnsEntry": {
"name": self.name,
"expire": self.expire,
"type": self.rtype,
"content": self.content,
}
}
class RecordState(Enum):
"""Enumeration of record states.
When searching for records, these are the possible states.
NOTFOUND: The record is not present
FOUND_SAME: Record is present and the content is (already) the same
FOUND_DIFFERENT: Record is present, but with different content
FOUND_NO_REQUEST_DATA: If the content of the (requested) dns_record is empty.
This may occur when deleting a record (just) by name.
:param Enum: Parent class to create an enumeration
:type Enum: Enum
"""
FOUND_SAME = 1
FOUND_DIFFERENT = 2
FOUND_NO_REQUEST_DATA = 4
NOTFOUND = 3
class DnsRecord(DnsEntry):
"""DNS Record encapsulation with ip query and data checking.
Initializes the object, potentially search for the IP address and
check if the record type is allowed.
:param DnsEntry: Parent class to enhance
:type DnsEntry: DnsEntry
"""
def __init__(
self,
name: str,
rtype: str,
expire: str,
content: str,
zone: str,
query_data: Union[str, None] = None,
) -> None:
"""Initialize the DnsRecord object with safety checks.
:param name: name of the DNS record
:type name: str
:param rtype: type of the DNS record
:type rtype: str
:param expire: TTL of the DNS record
:type expire: str
:param content: content of the DNS record
:type content: str
:param zone: Zone or domain of the DNS record
:type zone: str
:param query_data: url which produces the exact data to be used as
content, defaults to None
:type query_data: Union[str, None], optional
:raises ValueError: Raise an error if an invalid record type is specified
"""
if rtype is not None:
if not rtype.upper() in DNS_RECORD_TYPES:
raise ValueError(
f"Type '{rtype}' is not one of the "
f"allowed record types ({DNS_RECORD_TYPES})"
)
super().__init__(content=content, expire=expire, name=name, rtype=rtype)
self.zone = zone
self.fqdn = f"{self.name}.{self.zone}"
if query_data:
self.content = DnsRecord.query_for_content(query_data)
logger.info(f"Resolved record data to be used: '{self.content}'")
self.record_state = None
@property
def dnsentry(self):
"""Return the TransIP representation of the dnsEntry object."""
return super().__repr__()
@staticmethod
def query_for_content(query_url: str) -> str:
"""Retrieve the ip address from the "current" location.
By default it will query for an ip (v4/v6) address,
but may be used for other data as well
:param query_url: url which produces the exact data
to be used as content
:type query_url: str
:raises RequestsRaisedException: raised for connection errors with the server
:raises Non200Response: raised when server does not respond "OK" (200)
:return: the resolved ip address, or whatever may be
returned by a custom provided url
:rtype: str
"""
my_ip = None
try:
ip_query = requests.get(query_url)
except Exception as e:
raise RequestsRaisedException(
"Error in request for Internet ip address; "
) from e
if ip_query.status_code == 200:
my_ip = ip_query.text.strip()
else:
raise Non200Response(
(
"Could not resolve Internet ip address (non 200 response); "
f"{ip_query.status_code}: {ip_query.reason}"
)
)
return my_ip
class KeyFileLoadException(Exception):
"""Provided private_key is is not a valid path, nor a valid key format."""
pass
class RequestsRaisedException(Exception):
"""Error occurred in requesting an url for the Internet ip address."""
pass
class Non200Response(Exception):
"""Request for the Internet ip address resulted in a non 200 response."""
pass
class TransipInterface:
"""Encapsulation of connection with TransIP."""
def __init__(
self,
login: str = None,
private_key_pem: str = None,
private_key_pem_file: Path = None,
access_token: str = None,
expiration_time: int = 60,
read_only: bool = False,
global_key: bool = False,
label: str = f"{__project__} {__version__}",
authentication_url: str = "https://api.transip.nl/v6/auth",
root_endpoint: str = "https://api.transip.nl/v6",
connection_timeout: int = 30,
retry: int = 3,
retry_delay: float = 5,
):
"""Initialize the interface with TransIP.
:param login: the TransIP login name, defaults to None
:type login: str, optional
:param private_key_pem: the private key as string, defaults to None
:type private_key_pem: str, optional
:param private_key_pem_file: file location of the private key, defaults to None
:type private_key_pem_file: Path, optional
:param access_token: JSON Web Token, defaults to None
:type access_token: str, optional
:param expiration_time: expiration time (TTL) of the access token,
defaults to 60
:type expiration_time: int, optional
:param read_only: key/token allows to change objects or only read,
defaults to False
:type read_only: bool, optional
:param global_key: key may only be used from whitelisted ip addresses,
defaults to False
:type global_key: bool, optional
:param label: textual identifier for the access token,
defaults to "__project__ __version__"
:type label: str, optional
:param authentication_url: TransIP authentication url,
defaults to "https://api.transip.nl/v6/auth"
:type authentication_url: str, optional
:param root_endpoint: TransIP root of endpoints,
defaults to "https://api.transip.nl/v6"
:type root_endpoint: str, optional
:param connection_timeout: timeout for the network response, defaults to 30
:type connection_timeout: int, optional
:param retry: retry when the call fails due to zone
being saved or locked (409), defaults to 3
:type retry: int, optional
:param retry_delay: time in seconds to wait between retries,
defaults to 5
:type retry_delay: float, optional
"""
if login is not None and access_token is not None:
raise ValueError(
"Either login and private_key or access token must be used, not both."
)
self.attempts = retry + 1
self.retry_delay = retry_delay
self.root_endpoint = root_endpoint
self.connection_timeout = connection_timeout
if access_token is None:
self._token = AccessToken(
login=login,
private_key=private_key_pem,
private_key_file=private_key_pem_file,
expiration_time=expiration_time,
read_only=read_only,
global_key=global_key,
label=label,
authentication_url=authentication_url,
connection_timeout=connection_timeout,
)
else:
self._token = access_token
@property
def headers(self) -> Dict:
"""Generate the default headers.
Note the the reference to "self._token" will allways
provide a valid (and renewed if needed) token
:return: default headers, including the authentication token
:rtype: Dict
"""
return {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._token}",
"User-Agent": f"{__project__} {__version__}",
}
def execute_dns_entry(self, method: str, rest_path: str, dnsentry: dict):
"""Execute the requested action, with retry on 409.
409: ~ "DNS Entries are currently being saved"
409: ~ "is locked"
:param method: get, post, patch, delete
:type method: str
:param zone_name: respective DNS zone
:type zone_name: str
:param dnsentry: DNS entry to manage
:type dnsentry: dict
:raises requests.exceptions.HTTPError: Raise an error
if a 400 or 500 response is returned
:return: the requests response
:rtype: requests.models.Response
"""
endpoint = f"{self.root_endpoint}{rest_path}"
request = getattr(requests, method)
response = None
for attempt in range(1, self.attempts + 1):
response = request(
url=endpoint,
json=dnsentry,
headers=self.headers,
timeout=self.connection_timeout,
)
if response.status_code != 409:
response.raise_for_status()
logger.debug(f"API request returned {response.status_code}")
return response
logger.debug(
(
f"API request returned {response.status_code}: "
f"{response.text}, atttempt {attempt} of {self.attempts}"
)
)
sleep(self.retry_delay)
# raises requests.exceptions.HTTPError
response.raise_for_status()
def domains(self) -> list:
"""Get a listing of all available domains.
[extended_summary]
:return: List of available domains
:rtype: list
"""
return self.execute_dns_entry("get", "/domains", None)
def get_dns_entry(self, dns_zone_name: str) -> Dict:
"""Get a listing of the respective domain."""
response = self.execute_dns_entry(
"get", rest_path=f"/domains/{dns_zone_name}/dns", dnsentry=None
)
return response
def post_dns_entry(self, dns_record: DnsRecord):
"""Add a dnsEntry to the respective domain."""
return self.execute_dns_entry(
"post",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
def patch_dns_entry(self, dns_record: DnsRecord):
"""Adjust a record in the respective domain."""
return self.execute_dns_entry(
"patch",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
def delete_dns_entry(self, dns_record: DnsRecord):
"""Delete an entry in the respective domain."""
return self.execute_dns_entry(
"delete",
rest_path=f"/domains/{dns_record.zone}/dns",
dnsentry=dns_record.dnsentry,
)
| 34.411458 | 87 | 0.595278 | [
"MIT"
] | bheuvel/transip_dns | transip_dns/transip_interface.py | 13,214 | Python |
# Generated by Django 2.2.1 on 2019-09-07 11:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('incidents', '0008_auto_20190829_1231'),
]
operations = [
migrations.AlterModelOptions(
name='incident',
options={'ordering': ('created_date',), 'permissions': (('can_change_assignee', 'Can directly change assignee'), ('can_review_incidents', 'Can review created incidents'), ('can_view_incident_reports', 'Can view inciddent reports'))},
),
]
| 30.611111 | 245 | 0.658802 | [
"Apache-2.0"
] | ECLK/IncidentManagement | backend/src/incidents/migrations/0009_auto_20190907_1144.py | 551 | Python |
id=732
f = open('sumo_script_pokes', 'r')
for lines in f:
print 'INSERT INTO pokemon_species_names (pokemon_species_id,local_language_id,name) VALUES('+str(id)+',7,"'+lines.strip('\n\r')+'");'
print 'INSERT INTO pokemon_species_names (pokemon_species_id,local_language_id,name) VALUES('+str(id)+',9,"'+lines.strip('\n\r')+'");'
id+=1
f = open('sumo_script_items', 'r')
id=763
for lines in f:
print 'INSERT INTO item_names (item_id,local_language_id,name) VALUES('+str(id)+',7,"'+lines.strip('\n\r')+'");'
print 'INSERT INTO item_names (item_id,local_language_id,name) VALUES('+str(id)+',9,"'+lines.strip('\n\r')+'");'
id+=1
id=189
f = open('sumo_script_abilities', 'r')
for lines in f:
print 'INSERT INTO ability_names (ability_id,local_language_id,name) VALUES('+str(id)+',7,"'+lines.strip('\n\r')+'");'
print 'INSERT INTO ability_names (ability_id,local_language_id,name) VALUES('+str(id)+',9,"'+lines.strip('\n\r')+'");'
id+=1
id=622
f = open('sumo_script_moves', 'r')
for lines in f:
print 'INSERT INTO move_names (move_id,local_language_id,name) VALUES('+str(id)+',7,"'+lines.strip('\n\r')+'");'
print 'INSERT INTO move_names (move_id,local_language_id,name) VALUES('+str(id)+',9,"'+lines.strip('\n\r')+'");'
id+=1
| 49.76 | 136 | 0.678457 | [
"MIT"
] | thecano/VGCsets | script_utiles/sumo_script.py | 1,244 | Python |
# -*- coding: utf-8 -*-
from django import forms
from django.forms.widgets import RadioSelect
from .models import Rating
class FlagForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
self.object = kwargs.pop('object')
super().__init__(*args, **kwargs)
if not self.instance or self.instance.marked_flag == Rating.FLAG_NONE:
self.fields['marked_flag'].choices = Rating.FLAG_CHOICES[1:]
self.fields['marked_flag'].required = True
self.initial['marked_flag'] = Rating.FLAG_SPAM
if self.data.get(self.add_prefix('marked_flag')) == Rating.FLAG_OTHER:
self.fields['comment'].required = True
class Meta:
model = Rating
fields = ('marked_flag', 'comment')
widgets = {
'marked_flag': RadioSelect(),
}
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get('marked_flag') == Rating.FLAG_NONE:
cleaned_data['comment'] = ''
return cleaned_data
def save(self):
return Rating.objects.rate(
self.object,
self.user,
marked_flag=self.cleaned_data['marked_flag'],
comment=self.cleaned_data['comment']
)
| 26.95122 | 72 | 0.703167 | [
"MIT"
] | LinuxOSsk/Shakal-NG | rating/forms.py | 1,105 | Python |
from unittest import TestCase
from unittest.mock import patch
import boto3
from botocore.exceptions import ClientError
from moto import mock_iam
from altimeter.aws.resource.iam.iam_oidc_provider import IAMOIDCProviderResourceSpec
from altimeter.aws.scan.aws_accessor import AWSAccessor
from altimeter.core.graph.links import LinkCollection, ResourceLink, SimpleLink
from altimeter.core.resource.resource import Resource
class TestIAMOIDCProvider(TestCase):
@mock_iam
def test_scan(self):
account_id = "123456789012"
region_name = "us-east-1"
session = boto3.Session()
client = session.client("iam")
oidc_url = "https://oidc.eks.us-east-1.amazonaws.com/id/EXAMPLED539D4633E53DE1B716D3041E"
oidc_client_ids = ["sts.amazonaws.com"]
oidc_thumbprints = ["9999999999999999999999999999999999999999"]
_ = client.create_open_id_connect_provider(
Url=oidc_url, ClientIDList=oidc_client_ids, ThumbprintList=oidc_thumbprints,
)
scan_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region_name)
resources = IAMOIDCProviderResourceSpec.scan(scan_accessor=scan_accessor)
expected_resources = [
Resource(
resource_id="arn:aws:iam::123456789012:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/EXAMPLED539D4633E53DE1B716D3041E",
type="aws:iam:oidc-provider",
link_collection=LinkCollection(
simple_links=(
SimpleLink(
pred="url",
obj="oidc.eks.us-east-1.amazonaws.com/id/EXAMPLED539D4633E53DE1B716D3041E",
),
SimpleLink(
pred="create_date",
obj=resources[0].link_collection.simple_links[1].obj,
),
SimpleLink(pred="client_id", obj="sts.amazonaws.com"),
SimpleLink(
pred="thumbprint", obj="9999999999999999999999999999999999999999"
),
),
multi_links=None,
tag_links=None,
resource_links=(
ResourceLink(pred="account", obj="arn:aws::::account/123456789012"),
),
transient_resource_links=None,
),
)
]
self.assertEqual(resources, expected_resources)
@mock_iam
def test_disappearing_oidc_provider_race_condition(self):
account_id = "123456789012"
region_name = "us-east-1"
oidc_url = "https://oidc.eks.us-east-1.amazonaws.com/id/EXAMPLED539D4633E53DE1B716D3041E"
oidc_client_ids = ["sts.amazonaws.com"]
oidc_thumbprints = ["9999999999999999999999999999999999999999"]
session = boto3.Session()
client = session.client("iam")
oidc_provider_resp = client.create_open_id_connect_provider(
Url=oidc_url, ClientIDList=oidc_client_ids, ThumbprintList=oidc_thumbprints,
)
oidc_provider_arn = oidc_provider_resp["OpenIDConnectProviderArn"]
scan_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region_name)
with patch(
"altimeter.aws.resource.iam.iam_oidc_provider.IAMOIDCProviderResourceSpec"
".get_oidc_provider_details"
) as mock_get_oidc_provider_details:
mock_get_oidc_provider_details.side_effect = ClientError(
operation_name="GetOIDCProvider",
error_response={
"Error": {
"Code": "NoSuchEntity",
"Message": f"OpenIDConnect Provider not found for arn {oidc_provider_arn}",
}
},
)
resources = IAMOIDCProviderResourceSpec.scan(scan_accessor=scan_accessor)
self.assertEqual(resources, [])
| 41.793814 | 139 | 0.614208 | [
"MIT"
] | Cemito/altimeter | tests/unit/altimeter/aws/resource/iam/test_oidc_provider.py | 4,054 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = u"imfpy"
copyright = u"2021, Liam Tay Kearney"
author = u"Liam Tay Kearney"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"autoapi.extension",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
autoapi_dirs = ["../src"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
| 33.324324 | 78 | 0.642336 | [
"MIT"
] | ltk2118/imfpy | docs/conf.py | 1,233 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import nonebot
from nonebot.adapters.cqhttp import Bot as CQHTTPBot, message
# 初始化nb
nonebot.init()
# 连接驱动
driver = nonebot.get_driver()
driver.register_adapter("cqhttp", CQHTTPBot)
# 加载插件(除此处其他配置不建议更改)
nonebot.load_builtin_plugins()
nonebot.load_plugins('src/plugins')
nonebot.load_plugin('nonebot_plugin_help')
nonebot.load_plugin('nonebot_plugin_apscheduler')
app = nonebot.get_asgi()
from nonebot import Bot
@driver.on_bot_connect
async def do_something(bot: Bot):
from utils import NAME, ONWER
await bot.send_private_msg(
user_id=ONWER,
message=f'{NAME} 已启动'
)
if __name__ == "__main__":
nonebot.run()
| 21.625 | 61 | 0.738439 | [
"MIT"
] | syhanjin/chizi_bot | bot.py | 744 | Python |
# -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
from py_dss_interface.models.XYCurves.XYCurvesF import XYCurvesF
from py_dss_interface.models.XYCurves.XYCurvesI import XYCurvesI
from py_dss_interface.models.XYCurves.XYCurvesS import XYCurvesS
from py_dss_interface.models.XYCurves.XYCurvesV import XYCurvesV
class XYCurves(XYCurvesS, XYCurvesI, XYCurvesF, XYCurvesV):
"""
This interface implements the XYCurves (IXYCurves) interface of OpenDSS by declaring 4 procedures for accessing
the different properties included in this interface: XYCurvesS, XYCurvesI, XYCurvesF, XYCurvesV.
"""
pass
| 35.111111 | 115 | 0.787975 | [
"MIT"
] | PauloRadatz/py_dss_interface | src/py_dss_interface/models/XYCurves/XYCurves.py | 632 | Python |
import unittest
from typing import cast
from dataladmetadatamodel.common import get_top_level_metadata_objects
from dataladmetadatamodel.datasettree import DatasetTree
from dataladmetadatamodel.metadata import Metadata
from dataladmetadatamodel.metadatapath import MetadataPath
class TestRemote(unittest.TestCase):
def test_basic_remote(self):
tree_version_list, uuid_set = get_top_level_metadata_objects(
"git",
"https://github.com/datalad/test_metadata"
)
self.assertEqual(len(tree_version_list.version_set), 1)
self.assertEqual(len(uuid_set.uuid_set), 1)
for version, element_info in tree_version_list.get_versioned_elements():
time_stamp, dataset_path, dataset_tree = element_info
dataset_tree = cast(DatasetTree, dataset_tree)
dataset_tree.read_in()
dataset_paths = dataset_tree.get_dataset_paths()
self.assertEqual(27, len(dataset_paths))
mrr = dataset_tree.get_metadata_root_record(MetadataPath("study-104"))
file_tree = mrr.get_file_tree()
file_paths = list(file_tree.get_paths_recursive())
self.assertEqual(7, len(file_paths))
file_tree.add_metadata(MetadataPath("a/b/c"), Metadata())
self.assertRaises(RuntimeError, file_tree.write_out)
| 39.823529 | 82 | 0.714919 | [
"MIT"
] | christian-monch/metadata-model | dataladmetadatamodel/tests/test_remote.py | 1,354 | Python |
from test.integration.base import DBTIntegrationTest, use_profile
class BaseTestSimpleDependencyWithConfigs(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.run_sql_file("seed.sql")
@property
def schema(self):
return "simple_dependency_006"
@property
def models(self):
return "models"
class TestSimpleDependencyWithConfigs(BaseTestSimpleDependencyWithConfigs):
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'with-configs-0.17.0',
},
]
}
@property
def project_config(self):
return {
'config-version': 2,
'vars': {
'dbt_integration_project': {
'bool_config': True
},
},
}
@use_profile('postgres')
def test_postgres_simple_dependency(self):
self.run_dbt(["deps"])
results = self.run_dbt(["run"])
self.assertEqual(len(results), 5)
self.assertTablesEqual('seed_config_expected_1', "config")
self.assertTablesEqual("seed", "table_model")
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "incremental")
class TestSimpleDependencyWithOverriddenConfigs(BaseTestSimpleDependencyWithConfigs):
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'with-configs-0.17.0',
},
]
}
@property
def project_config(self):
return {
'config-version': 2,
"vars": {
# project-level configs
"dbt_integration_project": {
"config_1": "abc",
"config_2": "def",
"bool_config": True
},
},
}
@use_profile('postgres')
def test_postgres_simple_dependency(self):
self.run_dbt(["deps"])
results = self.run_dbt(["run"])
self.assertEqual(len(results), 5)
self.assertTablesEqual('seed_config_expected_2', "config")
self.assertTablesEqual("seed", "table_model")
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "incremental")
class TestSimpleDependencyWithModelSpecificOverriddenConfigs(BaseTestSimpleDependencyWithConfigs):
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'with-configs-0.17.0',
},
]
}
@property
def project_config(self):
# This feature doesn't exist in v2!
return {
'config-version': 1,
"models": {
"dbt_integration_project": {
"config": {
# model-level configs
"vars": {
"config_1": "ghi",
"config_2": "jkl",
"bool_config": True,
}
}
}
},
}
@use_profile('postgres')
def test_postgres_simple_dependency(self):
self.use_default_project()
self.run_dbt(["deps"])
results = self.run_dbt(["run"], strict=False) # config is v1, can't use strict here
self.assertEqual(len(results), 5)
self.assertTablesEqual('seed_config_expected_3', "config")
self.assertTablesEqual("seed", "table_model")
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "incremental")
class TestSimpleDependencyWithModelSpecificOverriddenConfigsAndMaterializations(BaseTestSimpleDependencyWithConfigs):
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'with-configs-0.17.0',
},
]
}
@property
def project_config(self):
return {
'config-version': 1,
"models": {
"dbt_integration_project": {
# disable config model, but supply vars
"config": {
"enabled": False,
"vars": {
"config_1": "ghi",
"config_2": "jkl",
"bool_config": True
}
},
# disable the table model
"table_model": {
"enabled": False,
},
# override materialization settings
"view_model": {
"materialized": "table"
}
}
},
}
@use_profile('postgres')
def test_postgres_simple_dependency(self):
self.run_dbt(["deps"])
results = self.run_dbt(["run"], strict=False) # config is v1, can't use strict here
self.assertEqual(len(results), 3)
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "incremental")
created_models = self.get_models_in_schema()
# config, table are disabled
self.assertFalse('config' in created_models)
self.assertFalse('table_model' in created_models)
self.assertTrue('view_model' in created_models)
self.assertEqual(created_models['view_model'], 'table')
self.assertTrue('incremental' in created_models)
self.assertEqual(created_models['incremental'], 'table')
| 30.356436 | 117 | 0.520059 | [
"Apache-2.0"
] | ChristianKohlberg/dbt | test/integration/006_simple_dependency_test/test_simple_dependency_with_configs.py | 6,132 | Python |
import requests
from news_api.settings.Vespa_config import VESPA_IP, VESPA_PORT
import json
from ast import literal_eval
def GenerateDateParamYql(params):
"""[Check consistency in date parameterGenerate the date yql parameters]
Arguments:
params {[type]} -- [description]
Returns:
[type] -- [description]
"""
yql = ""
if "fromDate" in params:
if params["fromDate"].isdigit():
yql += " and published_date > " + str(params["fromDate"])
if "toDate" in params:
if params["toDate"].isdigit():
yql += " and published_date < " + str(params["toDate"])
if "fromDate" in params and "toDate" in params:
if params["fromDate"].isdigit() and params["toDate"].isdigit():
if params["fromDate"] > params["toDate"]:
return ""
return yql
def GenerateNewsYql(params):
"""[Generator of YQL vespa query, to have a refine request on the vespa cluster]
In this case, the YQL depends on the search definition of the document type in the vespa cluster
Modify with risk, some parameters position are important, such as limit
Returns:
yql [string] -- [String that select documents based on userquery
"""
yql = "&yql=select * from sources * where userQuery()"
if "source" in params:
yql += ' and hostsite contains " ' + params["source"] + ' "'
if "language" in params:
yql += ' and country contains "' + params["language"] + '"'
yql += GenerateDateParamYql(params)
if "count" in params:
if params["count"].isdigit():
yql += " limit " + str(params["count"])
if "offset" in params:
if params["offset"].isdigit():
yql += " offset " + str(params["offset"])
return yql
def vespaSearch(params):
"""Search Function for Vespa:
Arguments:
params {dict} -- [Dict containings all parameters for the Vespa Search]
List of accepted params:
:param query: User query to search (required)
:param toDate: Maximum datelimit for the publication date (optionnal, default = now() )
:param fromDate: Minimum datelimit for the publication date (optionnal)
:param count: Number of document to retrieve (optionnal, default = 10)
:param offset: Offset for the retrieved documents ( optionnal, default = 0)
:param source: Filter for the accepter hostsites (optionnal)
"""
result = None
if "query" not in params:
return None
else:
yql = GenerateNewsYql(params)
try:
print(
"http://"
+ VESPA_IP
+ ":"
+ VESPA_PORT
+ "/search/?query="
+ params["query"]
+ yql
+ ";"
)
result_request = requests.get(
"http://"
+ VESPA_IP
+ ":"
+ VESPA_PORT
+ "/search/?query="
+ params["query"]
+ yql
+ ";"
)
if result_request.status_code == 200:
result = result_request.json()
except Exception as e:
print(e)
return None
return result
| 32.75 | 107 | 0.539342 | [
"MIT"
] | rdoume/News_API | news_api/endpoints/vespaSearcher.py | 3,406 | Python |
from Good_Boids_module.Update_Boids import Boids
import numpy as np
from nose.tools import assert_almost_equal, assert_greater
from nose.tools import assert_less, assert_equal
from numpy.testing import assert_array_equal
import os
import yaml
from Good_Boids_module.tests.record_fixtures import configuration_file
fixtures = yaml.load(open('fixture.yaml'))
configuration_file_data = yaml.load(open(configuration_file))
def test_good_boids_for_regression():
before_positions = list(fixtures["before_positions"])
before_velocities = list(fixtures["before_velocities"])
new_positions = list(Boids(configuration_file).get_raw_positions(before_positions, before_velocities))
after_positions = list(fixtures["after_positions"])
new_velocities = list(Boids(configuration_file).get_raw_velocities(before_positions, before_velocities))
after_velocities = list(fixtures["after_velocities"])
for i in range(len(new_positions)):
assert_almost_equal(new_positions[0][i], after_positions[0][i], delta=0.1)
assert_almost_equal(new_positions[1][i], after_positions[1][i], delta=0.1)
assert_almost_equal(new_velocities[0][i], after_velocities[0][i], delta=15)
assert_almost_equal(new_velocities[1][i], after_velocities[1][i], delta=15)
test_good_boids_for_regression()
def test_good_boids_initialization():
boids_positions = Boids(configuration_file).positions
boids_velocities = Boids(configuration_file).velocities
assert_equal(configuration_file_data['birds_number'], len(boids_positions[0]))
assert_equal(configuration_file_data['birds_number'], Boids(configuration_file).birds_num)
for boid in range(Boids(configuration_file).birds_num):
assert_less(boids_positions[0][boid], configuration_file_data['position_upper_limits'][0])
assert_greater(boids_positions[0][boid], configuration_file_data['position_lower_limits'][0])
assert_less(boids_positions[1][boid], configuration_file_data['position_upper_limits'][1])
assert_greater(boids_positions[1][boid], configuration_file_data['position_lower_limits'][1])
assert_less(boids_velocities[0][boid], configuration_file_data['velocity_upper_limits'][0])
assert_greater(boids_velocities[0][boid], configuration_file_data['velocity_lower_limits'][0])
assert_less(boids_velocities[1][boid], configuration_file_data['velocity_upper_limits'][1])
assert_greater(boids_velocities[1][boid], configuration_file_data['velocity_lower_limits'][1])
test_good_boids_initialization() | 51.16 | 108 | 0.788898 | [
"MIT"
] | anest1s/Refactoring_the_Bad_Boids | Good_Boids_module/tests/test_the_Good_Boids.py | 2,558 | Python |
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Original code can be found at
#https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/flowers_tf_lite.ipynb#scrollTo=aCLb_yV5JfF3
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
IMAGE_SIZE = 224
BATCH_SIZE = 64
def download_flower_dataset():
_URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
zip_file = tf.keras.utils.get_file(origin=_URL,
fname="flower_photos.tgz",
extract=True)
return os.path.join(os.path.dirname(zip_file), 'flower_photos')
def create_image_batch_generator(base_dir):
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2)
train_generator = datagen.flow_from_directory(
base_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='training')
val_generator = datagen.flow_from_directory(
base_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='validation')
return train_generator, val_generator
def save_labels(train_generator):
for image_batch, label_batch in train_generator:
break
print(image_batch.shape, label_batch.shape)
print (train_generator.class_indices)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('labels.txt', 'w') as f:
f.write(labels)
def download_mobilenet_v2_model():
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(5, activation='softmax')
])
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
return base_model, model
def run_transfer_learning(base_model, model, train_generator, val_generator):
base_model.trainable = False
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
print('Number of trainable variables = {}'.format(len(model.trainable_variables)))
epochs = 10
history = model.fit(train_generator,
epochs=epochs,
validation_data=val_generator)
return history
def run_fine_tuning(base_model, model, train_generator, val_generator):
base_model.trainable = True
# Fine tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(loss='categorical_crossentropy',
optimizer = tf.keras.optimizers.Adam(1e-5),
metrics=['accuracy'])
model.summary()
print('Number of trainable variables = {}'.format(len(model.trainable_variables)))
history = model.fit(train_generator,
epochs=5,
validation_data=val_generator)
return history
def save_model_as_tflite(model):
saved_model_dir = 'fine_tuning'
tf.saved_model.save(model, saved_model_dir)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
def plot_figure(history, fig_name):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
plt.savefig(fig_name)
if __name__ == '__main__':
print(tf.__version__)
base_dir = download_flower_dataset()
train_generator, val_generator = create_image_batch_generator(base_dir)
save_labels(train_generator)
base_model, model = download_mobilenet_v2_model() #download without top layer and add top layer
history = run_transfer_learning(base_model, model, train_generator, val_generator)
plot_figure(history, 'transfer_learning.png')
history_fine = run_fine_tuning(base_model, model, train_generator, val_generator)
save_model_as_tflite(model)
plot_figure(history_fine, 'fine_tuning.png')
| 35.203488 | 131 | 0.663088 | [
"Apache-2.0"
] | Hammer7/Flowers-TF-Lite | flowers_tf_lite.py | 6,055 | Python |
import importlib
import json
from inspect import iscoroutine
from json import JSONDecodeError
from typing import Any, Callable, Dict, List, Union
from loguru import logger
from parse import parse
from pydantic import BaseModel, validator
class Moshi(BaseModel):
call: Union[str, Callable]
args: List[Any]
kwargs: Dict[str, Any]
@validator("call")
def convert2str(cls, value: Union[str, Callable]):
if callable(value):
# TODO: after read it still in doubt what is the best
value = f"{value.__module__}:{value.__qualname__}"
return value
class moshi:
def __new__(cls, to: str, *args, fallback: Callable = None, **kwargs):
function_path = a_json = to
# test hypothesis if to is a json
try:
call_detail = json.loads(a_json)
function_path = call_detail["call"]
# prioritize args and kwargs
args = [*call_detail.get("args", tuple()), *args]
kwargs = {**call_detail.get("kwargs", dict()), **kwargs}
except JSONDecodeError:
pass
parsed = parse(r"{import_path}:{function_name:w}", function_path)
if parsed is None:
if fallback:
return fallback(*args, **kwargs)
else:
raise Exception("argument `to` is invalid.")
import_path = parsed["import_path"]
function_name = parsed["function_name"]
try:
module = importlib.import_module(f"{import_path}")
function = getattr(module, function_name)
return function(*args, **kwargs)
except ModuleNotFoundError as e:
if fallback:
import os
logger.debug(
"Fallback is about to be returned. This is cwd, {}", os.getcwd()
)
logger.exception("The exception")
return fallback(*args, **kwargs)
else:
raise e
@classmethod
async def moshi(cls, to: str, *args, fallback: Callable = None, **kwargs):
ret = cls(to, *args, fallback=fallback, **kwargs)
if iscoroutine(ret):
return await ret
else:
return ret
@staticmethod
def to_json(to: Union[str, Callable], *args, **kwargs):
return Moshi(call=to, args=args, kwargs=kwargs).json()
| 29.73494 | 85 | 0.561183 | [
"ISC"
] | CircleOnCircles/moshimoshi | moshimoshi/__init__.py | 2,468 | Python |
#!/usr/bin/python
r"""
Contains PLDM-related constants.
"""
PLDM_SUPPORTED_TYPES = ['base', 'platform', 'bios', 'fru', 'oem-ibm']
# PLDM types.
PLDM_TYPE_BASE = {'VALUE': '00', 'STRING': 'base'}
PLDM_TYPE_PLATFORM = {'VALUE': '02', 'STRING': 'platform'}
PLDM_TYPE_BIOS = {'VALUE': '03', 'STRING': 'bios'}
PLDM_TYPE_FRU = {'VALUE': '04', 'STRING': 'fru'}
PLDM_TYPE_OEM = {'VALUE': '63', 'STRING': 'oem-ibm'}
PLDM_SUPPORTED_TYPES = ['0(base)', '2(platform)', '3(bios)', '4(fru)', '63(oem-ibm)']
VERSION_BASE = {'VALUE': ['f1', 'f0', 'f0', '00'], 'STRING': '1.0.0'}
VERSION_PLATFORM = {'VALUE': ['f1', 'f2', 'f0', '00'], 'STRING': '1.2.0'}
VERSION_BIOS = {'VALUE': ['f1', 'f1', 'f1', '00'], 'STRING': '1.0.0'}
VERSION_FRU = {'VALUE': ['f1', 'f0', 'f0', '00'], 'STRING': '1.0.0'}
VERSION_OEM = {'VALUE': ['f1', 'f0', 'f0', '00'], 'STRING': '1.0.0'}
PLDM_BASE_CMDS = ['2(GetTID)', '3(GetPLDMVersion)', '4(GetPLDMTypes)', '5(GetPLDMCommands)']
PLDM_PLATFORM_CMDS = ['57(SetStateEffecterStates)', '81(GetPDR)']
PLDM_BIOS_CMDS = ['1(GetBIOSTable)', '7(SetBIOSAttributeCurrentValue)',
'8(GetBIOSAttributeCurrentValueByHandle)', '12(GetDateTime)',
'13(SetDateTime)']
PLDM_FRU_CMDS = ['1(GetFRURecordTableMetadata)', '2(GetFRURecordTable)', '4(GetFRURecordByOption)']
PLDM_OEM_CMDS = ['1(GetFileTable)', '4(ReadFile)', '5(WriteFile)', '6(ReadFileInToMemory)',
'7(WriteFileFromMemory)', '8(ReadFileByTypeIntoMemory)',
'9(WriteFileByTypeFromMemory)', '10(NewFileAvailable)',
'11(ReadFileByType)', '12(WriteFileByType)', '13(FileAck)',
'240(GetAlertStatus)']
# PLDM command format.
'''
e.g. : GetPLDMVersion usage
pldmtool base GetPLDMVersion -t <pldm_type>
pldm supported types
base->0,platform->2,bios->3,fru->4
'''
CMD_GETPLDMVERSION = 'base GetPLDMVersion -t %s'
'''
e.g. : PLDM raw command usage
pldmtool raw -d 0x80 0x00 0x03 0x00 0x00 0x00 0x00 0x01 0x00
pldm raw -d 0x<header> 0x<pldm_type> 0x<pldm_cmd_type> 0x<payload_data>
'''
CMD_PLDMTOOL_RAW = 'raw -d 0x80' + '0x%s' + ' ' + '0x%s'
# PLDM command payload data.
PAYLOAD_GetPLDMVersion = \
' 0x00 0x00 0x00 0x00 0x%s 0x%s' # %(TransferOperationFlag, PLDMType)
'''
e.g. : SetDateTime usage
pldmtool bios SetDateTime -d <YYYYMMDDHHMMSS>
'''
CMD_SETDATETIME = 'bios SetDateTime -d %s'
CMD_GETPDR = 'platform GetPDR -d %s'
'''
e.g. : SetStateEffecterStates usage
pldmtool platform GetPDR -i <effter_handle> -c <count> -d <effecterID, effecterState>
pldmtool platform SetStateEffecterStates -i 1 -c 1 -d 1 1
'''
CMD_SETSTATEEFFECTERSTATES = 'platform SetStateEffecterStates -i %s -c %s -d %s'
# GetPDR parsed response message for record handle.
# Dictionary value array holds the expected output for record handle 1, 2.
#
# Note :
# Record handle - 0 is default & has same behaviour as record handle 1
# Only record handle 0, 1, 2 are supported as of now.
RESPONSE_DICT_GETPDR_SETSTATEEFFECTER = {
'PDRHeaderVersion': [1],
'PDRType': ['State Effecter PDR'],
'recordChangeNumber': [0],
'PLDMTerminusHandle': [0, 1, 2],
'effecterID': [0, 1, 2, 3, 4],
'entityType': ['Virtual Machine Manager', 'System chassis (main enclosure)',
'System Firmware', 'Processor Module', '32801(OEM)'],
'entityInstanceNumber': [0, 1, 2],
'containerID': [0, 1],
'effecterSemanticID': [0],
'effecterInit': ['noInit'],
'effecterDescriptionPDR': [False],
'compositeEffecterCount': [1]}
RESPONSE_DICT_GETPDR_FRURECORDSETIDENTIFIER = {
'PDRHeaderVersion': [1],
'PDRType': ['FRU Record Set PDR'],
'recordChangeNumber': [0],
'dataLength': [10],
'PLDMTerminusHandle': [0, 2],
'entityType': ['System Board', 'Chassis front panel board (control panel)',
'Management Controller', 'OEM', 'Power converter',
'System (logical)', 'System chassis (main enclosure)',
'Chassis front panel board (control panel)',
'Processor Module', 'Memory Module', 'Power Supply',
'24576(OEM)', '60(OEM)', 'Processor', '142(OEM)'],
'containerID': [0, 1, 2, 3]}
RESPONSE_DICT_GETPDR_PDRENTITYASSOCIATION = {
'PDRHeaderVersion': [1],
'PDRType': ['Entity Association PDR'],
'recordChangeNumber': [0],
'containerID': [1, 2, 3],
'associationtype': ['Physical'],
'containerentityType': ['System Board', 'System (logical)',
'System chassis (main enclosure)']}
RESPONSE_DICT_GETPDR_STATESENSORPDR = {
'entityType': ['Communication Channel', 'Connector', 'Processor Module',
'32774(OEM)', '57346(OEM)', '57347(OEM)', '32801(OEM)'],
'sensorInit': ['noInit'],
'sensorAuxiliaryNamesPDR': [False]}
RESPONSE_DICT_GETPDR_TERMINUSLOCATORPDR = {
'PDRHeaderVersion': [1],
'PDRType': ['Terminus Locator PDR'],
'recordChangeNumber': [0],
'PLDMTerminusHandle': [1],
'validity': ['valid'],
'TID': [1, 208],
'containerID': [0, 1],
'terminusLocatorType': ['MCTP_EID'],
'terminusLocatorValueSize': [1]}
RESPONSE_DICT_GETPDR_NUMERICEFFECTERPDR = {
'PDRHeaderVersion': [1],
'PDRType': ['Numeric Effecter PDR'],
'recordChangeNumber': [0],
'PLDMTerminusHandle': [0, 1],
'entityInstanceNumber': [0, 1],
'containerID': [0],
'effecterSemanticID': [0],
'effecterInit': [0],
'effecterAuxiliaryNames': [False],
'baseUnit': [0, 72],
'unitModifier': [0],
'rateUnit': [0],
'baseOEMUnitHandle': [0],
'auxUnit': [0],
'auxUnitModifier': [0],
'auxrateUnit': [0],
'auxOEMUnitHandle': [0],
'resolution': [1, 0],
'offset': [0],
'accuracy': [0],
'plusTolerance': [0],
'minusTolerance': [0],
'stateTransitionInterval': [0],
'TransitionInterval': [0],
'minSettable': [0],
'rangeFieldSupport': [0],
'nominalValue': [0],
'normalMax': [0],
'normalMin': [0],
'ratedMax': [0],
'ratedMin': [0]}
PLDM_PDR_TYPES = {
'PLDM_STATE_EFFECTER_PDR': 'State Effecter PDR',
'PLDM_PDR_FRU_RECORD_SET': 'FRU Record Set PDR',
'PLDM_PDR_ENTITY_ASSOCIATION': 'Entity Association PDR',
'PLDM_STATE_SENSOR_PDR': 'State Sensor PDR',
'PLDM_NUMERIC_EFFECTER_PDR': 'Numeric Effecter PDR',
'PLDM_TERMINUS_LOCATOR_PDR': 'Terminus Locator PDR',
'PLDM_COMPACT_NUMERIC_SENSOR_PDR': '21'}
RESPONSE_LIST_GETBIOSTABLE_STRTABLE = [
'Allowed', 'Disabled', 'Enabled', 'IPv4DHCP', 'IPv4Static', 'Not Allowed',
'Perm', 'Temp', 'pvm_fw_boot_side', 'pvm_inband_code_update', 'pvm_os_boot_side',
'pvm_pcie_error_inject', 'pvm_surveillance', 'pvm_system_name', 'vmi_hostname',
'vmi_if_count', 'vmi_if0_ipv4_ipaddr', 'vmi_if0_ipv4_method',
'vmi_if0_ipv4_prefix_length', 'vmi_if1_ipv4_ipaddr', 'vmi_if1_ipv4_method',
'vmi_if1_ipv4_prefix_length']
RESPONSE_LIST_GETBIOSTABLE_ATTRTABLE = [
'pvm_fw_boot_side', 'pvm_inband_code_update', 'pvm_os_boot_side',
'pvm_pcie_error_inject', 'pvm_surveillance', 'pvm_system_name', 'vmi_hostname',
'vmi_if_count', 'vmi_if0_ipv4_ipaddr', 'vmi_if0_ipv4_method',
'vmi_if0_ipv4_prefix_length', 'vmi_if1_ipv4_ipaddr', 'vmi_if1_ipv4_method',
'vmi_if1_ipv4_prefix_length']
RESPONSE_LIST_GETBIOSTABLE_ATTRVALTABLE = [
'BIOSString', 'BIOSInteger', 'BIOSEnumeration']
| 35.028571 | 99 | 0.643828 | [
"Apache-2.0"
] | jcsteven/openbmc-test-automation | data/pldm_variables.py | 7,356 | Python |
from __future__ import annotations
from jsonclasses import jsonclass, types
@jsonclass
class SuperOneOf:
const: list[str] = types.listof(str).default(['abc', 'def', 'ghi']).required
valconst: str | None = types.str.oneof(['abc', 'def', 'ghi'])
valcallable: str | None = types.str.oneof(lambda o: o.const)
valtypes: str | None = types.str.oneof(types.this.fval('const'))
| 35.272727 | 80 | 0.688144 | [
"MIT"
] | Wiosoft-Crafts/jsonclasses | tests/classes/super_oneof.py | 388 | Python |
import logging
import pytest
from collections import namedtuple, Counter
from tests.platform_tests.counterpoll.cpu_memory_helper import restore_counter_poll # lgtm [py/unused-import]
from tests.platform_tests.counterpoll.cpu_memory_helper import counterpoll_type # lgtm [py/unused-import]
from tests.platform_tests.counterpoll.counterpoll_helper import ConterpollHelper
from tests.platform_tests.counterpoll.counterpoll_constants import CounterpollConstants
from tests.common.mellanox_data import is_mellanox_device
pytestmark = [
pytest.mark.topology('any'),
pytest.mark.device_type('physical'),
]
def is_asan_image(duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
asan_val_from_sonic_ver_cmd = "sonic-cfggen -y /etc/sonic/sonic_version.yml -v asan"
asan_val = duthost.command(asan_val_from_sonic_ver_cmd)['stdout']
is_asan = False
if asan_val == "yes":
logging.info("The current sonic image is a ASAN image")
is_asan = True
return is_asan
@pytest.fixture(scope='module')
def setup_thresholds(duthosts, enum_rand_one_per_hwsku_hostname):
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cpu_threshold = 50
memory_threshold = 60
high_cpu_consume_procs = {}
is_asan = is_asan_image(duthosts, enum_rand_one_per_hwsku_hostname)
if duthost.facts['platform'] in ('x86_64-arista_7050_qx32', 'x86_64-kvm_x86_64-r0') or is_asan:
memory_threshold = 90
if duthost.facts['platform'] in ('x86_64-arista_7260cx3_64'):
high_cpu_consume_procs['syncd'] = 80
# The CPU usage of `sx_sdk` on mellanox is expected to be higher, and the actual CPU usage
# is correlated with the number of ports. So we ignore the check of CPU for sx_sdk
if duthost.facts["asic_type"] == 'mellanox':
high_cpu_consume_procs['sx_sdk'] = 90
return memory_threshold, cpu_threshold, high_cpu_consume_procs
def test_cpu_memory_usage(duthosts, enum_rand_one_per_hwsku_hostname, setup_thresholds):
"""Check DUT memory usage and process cpu usage are within threshold."""
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
monit_results = duthost.monit_process(iterations=24)['monit_results']
memory_threshold, normal_cpu_threshold, high_cpu_consume_procs = setup_thresholds
persist_threshold = 8
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
for i, monit_result in enumerate(MonitResult(*_) for _ in monit_results):
logging.debug("------ Iteration %d ------", i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
cpu_threshold = normal_cpu_threshold
if proc['name'] in high_cpu_consume_procs:
cpu_threshold = high_cpu_consume_procs[proc['name']]
check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc)
analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs,
outstanding_procs_counter, persist_threshold)
def analyse_monitoring_results(cpu_threshold, memory_threshold, outstanding_mem_polls, outstanding_procs,
outstanding_procs_counter, persist_threshold):
persist_outstanding_procs = []
for pid, freq in outstanding_procs_counter.most_common():
if freq <= persist_threshold:
break
persist_outstanding_procs.append(pid)
if outstanding_mem_polls or persist_outstanding_procs:
if outstanding_mem_polls:
logging.error("system memory usage exceeds %d%%", memory_threshold)
if persist_outstanding_procs:
logging.error(
"processes that persistently exceeds cpu usage %d%%: %s",
cpu_threshold,
[outstanding_procs[p] for p in persist_outstanding_procs]
)
pytest.fail("system cpu and memory usage check fails")
@pytest.fixture(scope='module')
def counterpoll_cpu_threshold(duthosts, request):
counterpoll_cpu_usage_threshold = {"port-buffer-drop": request.config.getoption("--port_buffer_drop_cpu_usage_threshold")}
return counterpoll_cpu_usage_threshold
def test_cpu_memory_usage_counterpoll(duthosts, enum_rand_one_per_hwsku_hostname,
setup_thresholds, restore_counter_poll, counterpoll_type, counterpoll_cpu_threshold):
"""Check DUT memory usage and process cpu usage are within threshold.
Disable all counterpoll types except tested one
Collect memory and CPUs usage for 60 secs
Compare the memory usage with the memory threshold
Compare the average cpu usage with the cpu threshold for the specified progress
Restore counterpolls status
"""
duthost = duthosts[enum_rand_one_per_hwsku_hostname]
program_to_check = get_manufacturer_program_to_check(duthost)
if program_to_check is None:
pytest.skip("Skip no program is offered to check")
memory_threshold, _, _ = setup_thresholds
counterpoll_cpu_usage_threshold = counterpoll_cpu_threshold[counterpoll_type]
MonitResult = namedtuple('MonitResult', ['processes', 'memory'])
disable_all_counterpoll_type_except_tested(duthost, counterpoll_type)
monit_results = duthost.monit_process(iterations=60, delay_interval=1)['monit_results']
poll_interval = CounterpollConstants.COUNTERPOLL_INTERVAL[counterpoll_type] // 1000
outstanding_mem_polls = {}
outstanding_procs = {}
outstanding_procs_counter = Counter()
cpu_usage_program_to_check = []
prepare_ram_cpu_usage_results(MonitResult, counterpoll_cpu_usage_threshold, memory_threshold, monit_results, outstanding_mem_polls,
outstanding_procs, outstanding_procs_counter, program_to_check,
cpu_usage_program_to_check)
log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type)
cpu_usage_average = caculate_cpu_usge_average_value(extract_valid_cpu_usage_data(cpu_usage_program_to_check, poll_interval), cpu_usage_program_to_check)
logging.info("Average cpu_usage is {}".format(cpu_usage_average))
assert cpu_usage_average < counterpoll_cpu_usage_threshold, "cpu_usage_average of {} exceeds the cpu threshold:{}".format(program_to_check, counterpoll_cpu_usage_threshold)
assert not outstanding_mem_polls, " Memory {} exceeds the memory threshold {} ".format(outstanding_mem_polls, memory_threshold)
def log_cpu_usage_by_vendor(cpu_usage_program_to_check, counterpoll_type):
if cpu_usage_program_to_check:
logging.info('CPU usage for counterpoll type {} : {}'.format(counterpoll_type, cpu_usage_program_to_check))
def get_manufacturer_program_to_check(duthost):
if is_mellanox_device(duthost):
return CounterpollConstants.SX_SDK
def prepare_ram_cpu_usage_results(MonitResult, cpu_threshold, memory_threshold, monit_results, outstanding_mem_polls,
outstanding_procs, outstanding_procs_counter, program_to_check,
program_to_check_cpu_usage):
for i, monit_result in enumerate(MonitResult(*_) for _ in monit_results):
logging.debug("------ Iteration %d ------", i)
check_memory(i, memory_threshold, monit_result, outstanding_mem_polls)
for proc in monit_result.processes:
update_cpu_usage_desired_program(proc, program_to_check, program_to_check_cpu_usage)
def extract_valid_cpu_usage_data(program_to_check_cpu_usage, poll_interval):
"""
This method it to extract the valid cpu usage data according to the poll_interval
1. Find the index for the max one for every poll interval,
2. Discard the data if the index is on the edge(0 o the length of program_to_check_cpu_usage -1)
3. If the index is closed in the neighbour interval, only keep the former one
4. Return all indexes
For example:
poll_interval = 10
7, 1, 0, 1, 0, 1, 5, 1, 1,2, 0, 1, 0, 1, 0, 6, 1, 1, 1,2
return [15]
0, 1, 0, 1, 0, 1, 0, 1, 0, 8, 7, 1, 0, 1, 0, 6, 1, 1, 1,2
return [9]
"""
valid_cpu_usage_center_index_list = []
poll_number = len(program_to_check_cpu_usage) // poll_interval
def find_max_cpu_usage(cpu_usage_list, poll_times):
max_cpu_usage = cpu_usage_list[0]
max_cpu_usage_index = 0
for i, cpu_usage in enumerate(cpu_usage_list):
if cpu_usage > max_cpu_usage:
max_cpu_usage = cpu_usage
max_cpu_usage_index = i
return [max_cpu_usage, max_cpu_usage_index + poll_times * poll_interval]
for i in range(0, poll_number):
max_cpu_usage, max_cpu_usage_index = find_max_cpu_usage(
program_to_check_cpu_usage[poll_interval * i:poll_interval * (i + 1)], i)
if max_cpu_usage_index == 0 or max_cpu_usage_index == len(program_to_check_cpu_usage) - 1:
logging.info("The data is on the edge:{}, discard it ".format(max_cpu_usage_index))
else:
if valid_cpu_usage_center_index_list and valid_cpu_usage_center_index_list[-1] + 1 == max_cpu_usage_index:
continue
valid_cpu_usage_center_index_list.append(max_cpu_usage_index)
return valid_cpu_usage_center_index_list
def caculate_cpu_usge_average_value(valid_cpu_usage_center_index_list, program_to_check_cpu_usage):
len_valid_cpu_usage = len(valid_cpu_usage_center_index_list)
cpu_usage_average = 0.0
for i in valid_cpu_usage_center_index_list:
cpu_usage_average += sum(program_to_check_cpu_usage[i - 1: i + 2])
logging.info("cpu usage center index:{}: cpu usage:{}".format(i, program_to_check_cpu_usage[i - 1:i + 2]))
return cpu_usage_average / len_valid_cpu_usage / 3.0 if len_valid_cpu_usage != 0 else 0
def check_cpu_usage(cpu_threshold, outstanding_procs, outstanding_procs_counter, proc):
if proc['cpu_percent'] >= cpu_threshold:
logging.debug("process %s(%d) cpu usage exceeds %d%%.",
proc['name'], proc['pid'], cpu_threshold)
outstanding_procs[proc['pid']] = proc.get('cmdline', proc['name'])
outstanding_procs_counter[proc['pid']] += 1
def update_cpu_usage_desired_program(proc, program_to_check, program_to_check_cpu_usage):
if program_to_check:
if proc['name'] == program_to_check:
program_to_check_cpu_usage.append(proc['cpu_percent'])
def check_memory(i, memory_threshold, monit_result, outstanding_mem_polls):
if monit_result.memory['used_percent'] > memory_threshold:
logging.debug("system memory usage exceeds %d%%: %s",
memory_threshold, monit_result.memory)
outstanding_mem_polls[i] = monit_result.memory
def disable_all_counterpoll_type_except_tested(duthost, counterpoll_type):
available_types = ConterpollHelper.get_available_counterpoll_types(duthost)
available_types.remove(counterpoll_type)
ConterpollHelper.disable_counterpoll(duthost, available_types)
| 48.786026 | 176 | 0.735858 | [
"Apache-2.0"
] | jsanghra/sonic-mgmt | tests/platform_tests/test_cpu_memory_usage.py | 11,172 | Python |
#!/usr/bin/python
import sys
import os
import tkinter
import joblib
import pathlib
from PIL import Image, ImageTk
from PIL.ExifTags import TAGS
from pathlib import Path
from collections import deque
# Built based off of: https://github.com/Lexing/pyImageCropper
# ================================================================
#
# Module scope funcitons
#
# ================================================================
def get_current_folder():
return str(pathlib.Path(__file__).parent.absolute())
def _get_filename(filepath):
""" get filename from path """
return str(Path(filepath).name)
def _scale_image(img, maxLen, maxHeight):
""" scale image to under the specified maxLen and maxHeight """
scale = 1
resized_img = img.copy()
# if > maxLen width, resize to maxLen
if resized_img.size[0] > maxLen:
resize = resized_img.size[0] / maxLen
answer = (int(resized_img.size[0] / resize), int(resized_img.size[1] / resize))
scale = resize
resized_img = resized_img.resize(answer, Image.ANTIALIAS)
# if > maxHeight height, resize to maxHeight
if resized_img.size[1] > maxHeight:
resize = (resized_img.size[1] / maxHeight)
answer = (int(resized_img.size[0] / resize), int(resized_img.size[1] / resize))
scale = scale * resize
resized_img = resized_img.resize(answer, Image.ANTIALIAS)
return resized_img, scale
def _point_on_image(point, img):
""" check if point is on the image """
x, y = point
if x >= 0 and x <= img.size[0]:
if y >= 0 and y <= img.size[1]:
return True
return False
# ================================================================
#
# Module scope classes
#
# ================================================================
class _DataStore:
"""
Stores data about the current state
"""
FOLDER_PROG_KEY = "FOLDER_PROG"
KEY_TO_STORE = {FOLDER_PROG_KEY: {}}
def __init__(self, filepath):
self._filepath = filepath
self._store = self._load_store()
for key in _DataStore.KEY_TO_STORE:
self.build_store(key)
def save_value(self, key, value):
self._store[key] = value
def get_value(self, key):
if key in self._store:
return self._store[key]
def build_store(self, key):
if key in self._store:
return self._store[key]
else:
self._store[key] = _DataStore.KEY_TO_STORE[key]
return self._store[key]
def save_store(self, delete=False):
self._delete_store() if delete else self._write_store()
def _load_store(self):
if os.path.exists(self._filepath):
return joblib.load(self._filepath)
else:
return {}
def _delete_store(self):
os.remove(self._filepath)
def _write_store(self):
joblib.dump(self._store, self._filepath, compress=9)
class ImageCanvas:
"""
Image canvas area of the GUI
"""
def __init__(self, tkRoot, height=800, width=1200, boxBasePx=32):
# vals
self.canvas_image = None # TK image on the canvas
self.canvasHeight = height # TK canvas height
self.canvasWidth = width # TK canvas width
self.rectangle = None # TK rectangle on the canvas
self.box = [0, 0, 0, 0] # Need to turn this into array of points
self.boxBasePx = boxBasePx # base of the rectangle to build crop box
self.img = None # curr origional image
self.resized_img = None # curr resized image
self.movingCrop = False # crop is currently moving
self.lastLocation = [0, 0] # last location the mouse was clicked down at
self.scale = 1 # scale image was reduced by
self.currImage = None # current TK image in canvas. Need reference to it, or garbage coll cleans it up.
# objs
self.canvas = tkinter.Canvas(tkRoot,
highlightthickness=0,
bd=0)
# movement button binds
tkRoot.bind("<Button-1>", self._on_mouse_down)
tkRoot.bind("<ButtonRelease-1>", self._on_mouse_release)
tkRoot.bind("<B1-Motion>", self._on_mouse_move)
# primary methods
# ================================================================
def roll_image(self, imgLoc):
""" changes canvas to a new image """
# open image
self.img = Image.open(imgLoc)
# scale to fit area
self.resized_img, self.scale = _scale_image(self.img, self.canvasWidth, self.canvasHeight)
self.currImage = ImageTk.PhotoImage(self.resized_img)
# setup canvas
self.canvas.delete("all")
self.canvas.config(width=self.resized_img.size[0], height=self.resized_img.size[1])
self.canvas_image = self.canvas.create_image(0, 0, anchor=tkinter.NW, image=self.currImage)
self.canvas.pack(fill=tkinter.BOTH, expand=tkinter.YES)
# build pre-req objs
self._build_crop_box()
self._refresh_crop_rectangle()
def crop_image(self):
# scale box back from the viewed area
box = [self.box[0] * self.scale, self.box[1] * self.scale,
self.box[2] * self.scale, self.box[3] * self.scale]
# make the crop
cropped = self.img.crop(box)
# error?
if cropped.size[0] == 0 and cropped.size[1] == 0:
print('image has no size!!!')
# edge case from resizing. Should change this to resize crop to fix diff? Only occcurs every 1 in like 600 so far
# Possible fix: if remainder above half of base, then resize HIGHER else resize LOWER?
if not((cropped.size[0] * cropped.size[1]) % self.boxBasePx == 0):
return None
return cropped
# helper methods
# ================================================================
def _build_crop_box(self):
""" creates the box for the crop rectangle x1,y1,x2,y2"""
# get min side length of image
boxMax = min(self.resized_img.size[0], self.resized_img.size[1])
# (length of side - (remainder of side left after removing area divisible by boxBase))
newImgLen = (boxMax - (boxMax % (self.boxBasePx / self.scale)))
# build box from side length
self.box = [0, 0, newImgLen, newImgLen]
def _refresh_crop_rectangle(self, deltaX=0, deltaY=0):
""" re-builds the crop rectangle based on the specified box """
if self.rectangle and deltaX > 0 or deltaY > 0:
self.canvas.move(self.rectangle, deltaX, deltaY)
else:
self.canvas.delete(self.rectangle)
self.rectangle = self.canvas.create_rectangle(self.box[0],
self.box[1],
self.box[2],
self.box[3],
outline='red',
width=2)
# movement methods
# ================================================================
def _on_mouse_down(self, event):
""" if mouse clicked on crop area, allow moving crop """
if event.x >= self.box[0] and event.x <= self.box[2]:
if event.y >= self.box[1] and event.y <= self.box[3]:
self.movingCrop = True
self.lastLocation = [event.x, event.y]
def _on_mouse_release(self, event):
""" stop allowing movement of crop area """
self._on_mouse_move(event)
self.movingCrop = False
def _on_mouse_move(self, event):
""" move crop along with the user's mouse """
if self.movingCrop:
if _point_on_image((event.x, event.y), self.resized_img):
# build delta from last spot
deltaX = event.x - self.lastLocation[0]
deltaY = event.y - self.lastLocation[1]
# force area of box to conform to area of image
if self.box[0] + deltaX < 0:
deltaX = 0
if self.box[1] + deltaY < 0:
deltaY = 0
if self.box[2] + deltaX > self.resized_img.size[0]:
deltaX = self.box[2] - self.resized_img.size[0]
if self.box[3] + deltaY > self.resized_img.size[1]:
deltaY = self.box[3] - self.resized_img.size[1]
# calc
self.box = [self.box[0] + deltaX, self.box[1] + deltaY,
self.box[2] + deltaX, self.box[3] + deltaY]
# move box
self._refresh_crop_rectangle(deltaX, deltaY)
self.lastLocation = [event.x, event.y]
class ImageCropper:
"""
Main module class
"""
def __init__(self, inputDir, outputDir, canvasHeight=800, canvasWidth=1200, cropBasePx=32):
# vals
self.inputDir = inputDir
self.outputDir = outputDir
self.currImage = None
self.fileQueue = deque()
self.queueIndex = 0
self.doneSet = set()
self.canvasHeight = canvasHeight
self.canvasWidth = canvasWidth
self.cropBasePx = cropBasePx
# objs
self._datasource = _DataStore(get_current_folder() + '/dataStore')
self._folderProgDict = self._datasource.get_value(_DataStore.FOLDER_PROG_KEY)
self.tkroot = tkinter.Tk()
self.imageCanvas = ImageCanvas(self.tkroot)
# movement button binds
self.tkroot.bind("<Key>", self._on_key_down)
# primary methods
# ================================================================
def run(self):
self._setup_store()
self._pull_files()
self._roll_image()
self.tkroot.geometry(str(self.canvasWidth) + 'x' + str(self.canvasHeight))
self.tkroot.mainloop()
def _pull_files(self):
if not os.path.isdir(self.inputDir):
raise IOError(self.inputDir + ' is not a directory')
files = os.listdir(self.inputDir)
if len(files) == 0:
print('No files found in ' + self.inputDir)
for filename in files:
self.fileQueue.append(os.path.join(self.inputDir, filename))
def _roll_image(self):
breakOut = False
while True and not(breakOut):
if self.fileQueue:
self.currImage = self.fileQueue.popleft()
if not(self.currImage in self.doneSet):
print('Index in queue ' + str(self.queueIndex))
try:
self.imageCanvas.roll_image(self.currImage)
breakOut = True
except IOError:
print('Ignore: ' + self.currImage + ' cannot be opened as an image')
breakOut = False
self.queueIndex += 1
else:
breakOut = True
self.tkroot.quit()
self.tkroot.update()
# helper methods
# ================================================================
def _setup_store(self):
if self.inputDir in self._folderProgDict:
self.doneSet = self._folderProgDict[self.inputDir]
else:
self._folderProgDict[self.inputDir] = self.doneSet
def _save_image(self, img):
if img:
outputName = _get_filename(self.currImage)
outputLoc = self.outputDir + '/' + outputName[:outputName.rfind('.')] + '_cropped'
img.save(outputLoc + '.png', 'png')
# movement methods
# ================================================================
def _on_key_down(self, event):
if event.char == ' ':
self._save_image(self.imageCanvas.crop_image())
self.doneSet.add(self.currImage)
self._datasource.save_store()
self._roll_image()
elif event.char == 's':
self.doneSet.add(self.currImage)
self._datasource.save_store()
self._roll_image()
elif event.char == 'q':
self.tkroot.destroy() | 34.855114 | 121 | 0.544706 | [
"MIT"
] | Writ3r/pyImageCropper | pyImageCropper/pyImageCropper.py | 12,269 | Python |
# -*- coding: utf-8 -*-
from gluon import *
from s3 import S3CustomController
THEME = "historic.CERT"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
response = current.response
response.s3.stylesheets.append("../themes/CERT/homepage.css")
title = current.deployment_settings.get_system_name()
response.title = title
T = current.T
# Check logged in and permissions
#auth = current.auth
#roles = current.session.s3.roles
#system_roles = auth.get_system_roles()
#ADMIN = system_roles.ADMIN
#AUTHENTICATED = system_roles.AUTHENTICATED
#has_role = auth.s3_has_role
menus = [{"title": T("Volunteers"),
"icon": "user",
"description": T("Manage people who have volunteered for your organization, their contact details, certicates and trainings."),
"module": "vol",
"function": "volunteer",
"buttons": [{"args": "summary",
"icon": "list",
"label": T("View"),
},
{"args": "create",
"icon": "plus",
"label": T("Create"),
}]
},
{"title": T("Trainings"),
"icon": "book",
"description": T("Catalog of Training Courses which your Volunteers can attend."),
"module": "vol",
"function": "course",
"buttons": [{"args": "summary",
"icon": "list",
"label": T("View"),
},
{"args": "create",
"icon": "plus",
"label": T("Create"),
}]
},
{"title": T("Certificates"),
"icon": "certificate",
"description": T("Catalog of Certificates which your Volunteers can get."),
"module": "vol",
"function": "certificate",
"buttons": [{"args": "summary",
"icon": "list",
"label": T("View"),
},
{"args": "create",
"icon": "plus",
"label": T("Create"),
}]
},
{"title": T("Messaging"),
"icon": "envelope-o",
"description": T("Send Email, SMS and Twitter messages to your Volunteers."),
"module": "msg",
"function": "Index",
"args": None,
"buttons": [{"function": "inbox",
"args": None,
"icon": "inbox",
"label": T("Inbox"),
},
{"function": "compose",
"args": None,
"icon": "plus",
"label": T("Compose"),
}]
},
]
self._view(THEME, "index.html")
return dict(title = title,
menus = menus,
)
# END =========================================================================
| 39.177083 | 145 | 0.346716 | [
"MIT"
] | AlexanderLaughlin/eden | modules/templates/historic/CERT/controllers.py | 3,761 | Python |
import pandas as pd
import gc
import transformers
from transformers import BertForSequenceClassification, BertTokenizerFast, Trainer, TrainingArguments
from nlp import load_dataset, Dataset
import torch
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report, hamming_loss
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from random import sample, choices
from joblib import dump, load
def string_labels_to_int(Y):
keys={}
new_Y=[]
for item in Y:
if item in keys:
new_Y.append(keys[item])
else:
keys.update({item:len(keys)+1})
new_Y.append(keys[item])
return new_Y, keys
def int_labels_to_list(Y,keys):
new_Y=[]
for item in Y:
sublist=[0] * len(keys)
sublist[item-1]=1
sublist=torch.tensor(sublist)
new_Y.append(sublist)
return new_Y
class LTP:
def __init__ (self, Xdata=None, Ydata=None, csv=None,xlsx=None,x_col='X',y_col='Y',models='all',test_frac=0.1,train_frac=0.9):
if models=='all':
self.model_list = [
'bert-base-uncased',
'albert-base-v2',
'roberta-base',
'linear_SVM',
'multinomial_naive_bayesian',]
elif models=='count-vectorizer':
self.model_list = [
'linear_SVM',
'multinomial_naive_bayesian',]
elif models=='transformers':
self.model_list = [
'bert-base-uncased',
'albert-base-v2',
'roberta-base',]
else:
print('Models not recognized, the available options are currently "all", "count-vectorizer", and "transformers"')
return
if csv!=None and xlsx!= None and Xdata!=None:
print("You have provided too much data, give just x and y data, or a csv or xlsx file!")
return
if csv!=None:
csv_data=pd.read_csv(csv)
Xdata=csv_data[x_col]
Ydata=csv_data[y_col]
if xlsx!=None:
xlsx_data=pd.read_excel(xlsx)
Xdata=xlsx_data[x_col]
Ydata=xlsx_data[y_col]
if isinstance(Xdata, pd.Series):
print('converting pandas series to list')
Xdata=list(Xdata)
if isinstance(Ydata, pd.Series):
print('converting pandas series to list')
Ydata=list(Ydata)
if Xdata==Ydata==None or (Xdata==None and Ydata!=None) or (Xdata!=None and Ydata==None):
print('Either you have not put in your own data, or you have only put in X or Y data, loading default dataset...')
self.train_dataset_raw, self.test_dataset_raw = load_dataset('imdb', split=['train', 'test'])
X=self.train_dataset_raw['text']+self.test_dataset_raw['text']
Xdata = X
Y=self.train_dataset_raw['label']+self.test_dataset_raw['label']
Ydata = Y
keys=set(Y)
else:
X=Xdata
Y=Ydata
if all(isinstance(n, int) for n in Y):
keys=set(Y)
else:
Y,keys=string_labels_to_int(Y)
#add method to make min label 0
if min(Y)>=1:
Y=[y-min(Y) for y in Y]
if len(Xdata)<20:
print('dataset is really small, using default test/train split (0.25)')
test_frac=None
train_frac=None
if len(Xdata)<8:
print('dataset is really too small, using default test/train split (0.5)')
test_frac=0.5
train_frac=0.5
if len(Xdata)!=len(Ydata):
print('ERROR: X data and Y data lengths are not the same size, they need to be!')
return
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
stratify=Y,
test_size=test_frac,
train_size=train_frac)
self.num_labels=len(keys)
#self.train_dataset_raw_CNN = TensorDataset(X_train, int_labels_to_list(Y_train,keys))
#self.test_dataset_raw_CNN = TensorDataset(X_test, int_labels_to_list(Y_test,keys))
print('X_train length: ' + str(len(X_train)))
print('X_test length: ' + str(len(X_test)))
print('Y_train length: ' + str(len(Y_train)))
print('Y_test length: ' + str(len(Y_test)))
self.train_dataset_raw = Dataset.from_pandas(pd.DataFrame({'text':X_train, 'labels': Y_train}))
self.test_dataset_raw = Dataset.from_pandas(pd.DataFrame({'text':X_test, 'labels': Y_test}))
self.all_metrics = {}
def compute_metrics(self, pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
full_report = classification_report(labels, preds, output_dict=True)
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall,
'full_report': full_report
}
def get_metrics(self):
return self.all_metrics
def get_metrics_df(self):
dic = self.get_metrics()
df = pd.DataFrame.from_dict(dic)
df = df.rename_axis("model_name", axis="columns").T
df.reset_index(inplace=True)
df.rename_axis()
return df
def print_metrics_table(self):
dic = self.get_metrics()
print("{:>25} {:>15} {:>15} {:>15} {:>15} {:>15}".format('Model', 'loss', 'accuracy', 'f1', 'precision', 'recall'))
for k, v in dic.items():
print("{:>25} {:15.5} {:15.5} {:15.5} {:15.5} {:15.5}".format(k, v['eval_loss'], v['eval_accuracy'], v['eval_f1'], v['eval_precision'], v['eval_recall']))
def run(self, focused=False, focused_model=None, training_epochs=5):
if focused==True:
self.model_list=[focused_model]
else:
pass
for model_name in self.model_list:
training_args = TrainingArguments(
output_dir='./results/'+model_name,
num_train_epochs=training_epochs,
per_device_train_batch_size=16,
per_device_eval_batch_size=64,
warmup_steps=500,
weight_decay=0.01,
#evaluate_during_training=True,
logging_dir='./logs/'+model_name,
)
model = None
tokenizer = None
print('Training on a dataset with ' +str(self.num_labels)+ ' labels')
if model_name == "bert-base-uncased":
model = BertForSequenceClassification.from_pretrained(model_name, num_labels=self.num_labels)
tokenizer = BertTokenizerFast.from_pretrained(model_name)
elif model_name == "albert-base-v2":
tokenizer = transformers.AlbertTokenizer.from_pretrained('albert-base-v2')
model = transformers.AlbertForSequenceClassification.from_pretrained('albert-base-v2', return_dict=True, num_labels=self.num_labels)
elif model_name == "roberta-base":
tokenizer = transformers.RobertaTokenizer.from_pretrained('roberta-base')
model = transformers.RobertaForSequenceClassification.from_pretrained('roberta-base', return_dict=True, num_labels=self.num_labels)
elif model_name == "linear_SVM":
tokenizer = None
model = 'linear_SVM'
parameters={
'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (5e-2, 1e-2,5e-3, 1e-3,5e-3),
'clf__penalty': ('l2', 'l1', 'elasticnet')
}
classifier=SGDClassifier(loss='hinge',random_state=42,max_iter=5,tol=None)
elif model_name == "multinomial_naive_bayesian":
tokenizer = None
model = 'multinomial_naive_bayesian'
parameters= {
'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1,1e-1,1e-2, 1e-3,1e-4),
'clf__fit_prior': (True, False),
}
classifier=MultinomialNB()
if not model or not tokenizer: #use 'assert' here instead?
print("ERROR")
def tokenize(batch):
return tokenizer(batch['text'], padding='max_length', truncation=True)
if tokenizer is not None:
train_dataset = self.train_dataset_raw.map(tokenize, batched=True, batch_size=len(self.train_dataset_raw))
test_dataset = self.test_dataset_raw.map(tokenize, batched=True, batch_size=len(self.train_dataset_raw))
train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])
test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'labels'])
else:
train_dataset = self.train_dataset_raw
test_dataset = self.test_dataset_raw
if model_name== "linear_SVM" or model_name== "multinomial_naive_bayesian":
trainer=None
pipeline = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', classifier),
])
gs_clf = GridSearchCV(pipeline, parameters, cv=5, n_jobs=-1)
if len(train_dataset['labels'])<25:
print('not enough data to use a count vectorizer, sorry!')
else:
gs_ind=int(len(train_dataset['labels'])/10) #use a tenth of the training dataset to do gridsearch
gs_clf = gs_clf.fit(train_dataset['text'][:gs_ind], train_dataset['labels'][:gs_ind])
best_params=gs_clf.best_params_
pipeline.set_params(**best_params)
pipeline.fit(train_dataset['text'], train_dataset['labels'])
prediction=pipeline.predict(test_dataset['text'])
precision, recall, f1, _ = precision_recall_fscore_support(test_dataset['labels'], prediction, average=None)
full_report=classification_report(test_dataset['labels'], prediction)
acc = accuracy_score(test_dataset['labels'], prediction)
loss=hamming_loss(test_dataset['labels'], prediction)
curr_metrics={
'eval_loss': loss,
'eval_accuracy': np.mean(acc),
'eval_f1': np.mean(f1),
'eval_precision': np.mean(precision),
'eval_recall': np.mean(recall),
'eval_full_report': full_report
}
dump(pipeline, model_name + "_model.joblib")
print('best parameters are:')
print(best_params)
else:
trainer = Trainer(model=model,
args=training_args,
compute_metrics=self.compute_metrics,
train_dataset=train_dataset,
eval_dataset=test_dataset
)
trainer.train()
curr_metrics = trainer.evaluate()
trainer.save_model(model_name+"_model")
self.all_metrics[model_name] = curr_metrics
print(curr_metrics)
# adding this fully solves the out of memory (OOM) error; https://github.com/huggingface/transformers/issues/1742
del model, tokenizer, trainer
# these 2 lines may not be needed
gc.collect()
torch.cuda.empty_cache()
def predict(self,model_name=None,focused=False,text=None):
if text == None:
print('you did not enter any text to classify, sorry')
return
if focused==True:
if model_name == "linear_SVM" or model_name == "multinomial_naive_bayesian":
clf = load('/content/'+model_name+'_model.joblib')
y=clf.predict([text])
else:
if model_name == "bert-base-uncased":
model=BertForSequenceClassification.from_pretrained('/content/bert-base-uncased_model')
tokenizer=BertTokenizerFast.from_pretrained('bert-base-uncased')
text_classification= transformers.pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
y=text_classification(text)[0]
elif model_name == "albert-base-v2":
model=transformers.AlbertForSequenceClassification.from_pretrained('/content/albert-base-v2_model')
tokenizer=transformers.AlbertTokenizer.from_pretrained('albert-base-v2')
text_classification= transformers.pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
y=text_classification(text)[0]
elif model_name == "roberta-base":
model=transformers.RobertaForSequenceClassification.from_pretrained('/content/roberta-base_model')
tokenizer=transformers.RobertaTokenizer.from_pretrained('roberta-base')
text_classification= transformers.pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
y=text_classification(text)[0]
print(model_name)
print(y)
else:
for model_name in self.model_list:
if model_name == "linear_SVM" or model_name == "multinomial_naive_bayesian":
clf = load('/content/'+model_name+'_model.joblib')
y=clf.predict([text])
else:
if model_name == "bert-base-uncased":
model=BertForSequenceClassification.from_pretrained('/content/bert-base-uncased_model')
tokenizer=BertTokenizerFast.from_pretrained('bert-base-uncased')
text_classification= transformers.pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
y=text_classification(text)[0]
elif model_name == "albert-base-v2":
model=transformers.AlbertForSequenceClassification.from_pretrained('/content/albert-base-v2_model')
tokenizer=transformers.AlbertTokenizer.from_pretrained('albert-base-v2')
text_classification= transformers.pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
y=text_classification(text)[0]
elif model_name == "roberta-base":
model=transformers.RobertaForSequenceClassification.from_pretrained('/content/roberta-base_model')
tokenizer=transformers.RobertaTokenizer.from_pretrained('roberta-base')
text_classification= transformers.pipeline('sentiment-analysis', model=model, tokenizer=tokenizer)
y=text_classification(text)[0]
print(model_name)
print(y)
| 39.38024 | 157 | 0.69239 | [
"MIT"
] | lemay-ai/lazyTextPredict | lazytextpredict/basic_classification.py | 13,153 | Python |
import ray
from ray import workflow
import requests
@ray.remote
def compute_large_fib(M: int, n: int = 1, fib: int = 1):
next_fib = requests.post(
"https://nemo.api.stdlib.com/[email protected]/", data={"nth": n}
).json()
if next_fib > M:
return fib
else:
return workflow.continuation(compute_large_fib.bind(M, n + 1, next_fib))
if __name__ == "__main__":
workflow.init()
assert workflow.create(compute_large_fib.bind(100)).run() == 89
| 24.3 | 80 | 0.648148 | [
"Apache-2.0"
] | Anhmike/ray | python/ray/workflow/examples/comparisons/prefect/compute_fib_workflow.py | 486 | Python |
# blender imports
import bpy
# utility imports
import numpy as np
import csv
import random
import importlib
from src.TSSBase import TSSBase
class TSSMeshHandle(TSSBase):
"""docstring for TSSMeshHandle"""
def __init__(self):
super(TSSMeshHandle, self).__init__()
# class vars ###################################################################################################
self._mesh_list = [] # list of mesh [list]
self._mesh_obj_list = [] # list of mesh nodes [list]
############################################################################################ end of class vars #
def reset_module(self):
""" reset all local vars
Args:
None
Returns:
None
"""
# reset all mesh ############################################################################################
for mesh in self._mesh_obj_list:
# reset mesh
mesh.reset_module()
# maybe obsolete in future versions
del mesh
##################################################################################### end of reset all mesh #
self.reset_base()
self._mesh_list = []
self._mesh_obj_list = []
def activate_pass(self,pass_name, pass_cfg, keyframe=-1):
""" enables specific pass
Args:
pass_name: name of pass to activate [string]
pass_cfg: specific parameters for the pass [dict]
keyframe: current frame number; if value > -1, this should enable also the setting of a keyframe [int]
Returns:
None
"""
for mesh in self._mesh_obj_list:
mesh.activate_pass(pass_name=pass_name,pass_cfg=pass_cfg,keyframe=keyframe)
def create(self,stage_dict):
""" create function
Args:
stage_dict: dict of stages [dict]
Returns:
None
"""
self._create_meshes(cfg=self._cfg["MESHES"],
general_cfg=self._cfg["GENERAL"],
stage_dict=stage_dict)
def _create_meshes(self,cfg,general_cfg,stage_dict):
""" create function
Args:
cfg: list of mesh cfgs [list]
general_cfg: general cfg [dict]
stage_dict: dict of stages [dict]
Returns:
success code [boolean]
"""
_current_instance_label_count = 0
for ii, mesh in enumerate(cfg):
try:
# import module and create class #######################################################################
_module_name = "src.assets.meshes." + mesh["type"]
_module = importlib.import_module(_module_name)
_class = getattr(_module, mesh["type"])
_mesh = _class()
################################################################ end of import module and create class #
# set pass params and create pass ######################################################################
# set general cfg
_mesh.set_general_cfg(cfg=general_cfg)
_mesh.set_stage_dict(stage_dict=stage_dict)
# save name of material
mesh['meshParams']['name'] = mesh["name"]
# update mesh cfg
_mesh.update_cfg(cfg=mesh["meshParams"])
# create material
_instance_count, _instance_label_count = _mesh.create(instance_id_offset=_current_instance_label_count)
_current_instance_label_count += _instance_label_count
############################################################### end of set pass params and create pass #
# add pass to list
self._mesh_obj_list.append(_mesh)
self._mesh_list.append(_mesh.get_meshes())
except ImportError:
# manage import error
raise Exception("Cannot add mesh")
return -1
return 0
def get_meshes(self):
""" get all meshes
Args:
None
Returns:
list of meshes [list]
"""
return self._mesh_list
def get_mesh_objs(self):
""" get all mesh objects
Args:
None
Returns:
list of mesh objects [list]
"""
return self._mesh_obj_list | 32.173611 | 120 | 0.455213 | [
"MIT"
] | 5trobl/oaisys | src/assets/handle/TSSMeshHandle.py | 4,633 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import unittest
from unittest import mock
from flask_appbuilder import SQLA, Model, expose, has_access
from flask_appbuilder.security.sqla import models as sqla_models
from flask_appbuilder.views import BaseView, ModelView
from sqlalchemy import Column, Date, Float, Integer, String
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.models import DagModel
from airflow.security import permissions
from airflow.www import app as application
from airflow.www.utils import CustomSQLAInterface
from tests.test_utils import fab_utils
from tests.test_utils.db import clear_db_dags, clear_db_runs
from tests.test_utils.mock_security_manager import MockSecurityManager
READ_WRITE = {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}
READ_ONLY = {permissions.ACTION_CAN_READ}
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logging.getLogger().setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
class SomeModel(Model):
id = Column(Integer, primary_key=True)
field_string = Column(String(50), unique=True, nullable=False)
field_integer = Column(Integer())
field_float = Column(Float())
field_date = Column(Date())
def __repr__(self):
return str(self.field_string)
class SomeModelView(ModelView):
datamodel = CustomSQLAInterface(SomeModel)
base_permissions = [
'can_list',
'can_show',
'can_add',
permissions.ACTION_CAN_EDIT,
permissions.ACTION_CAN_DELETE,
]
list_columns = ['field_string', 'field_integer', 'field_float', 'field_date']
class SomeBaseView(BaseView):
route_base = ''
@expose('/some_action')
@has_access
def some_action(self):
return "action!"
class TestSecurity(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings.configure_orm()
cls.session = settings.Session
cls.app = application.create_app(testing=True)
cls.appbuilder = cls.app.appbuilder # pylint: disable=no-member
cls.app.config['WTF_CSRF_ENABLED'] = False
cls.security_manager = cls.appbuilder.sm
cls.delete_roles()
def setUp(self):
clear_db_runs()
clear_db_dags()
self.db = SQLA(self.app)
self.appbuilder.add_view(SomeBaseView, "SomeBaseView", category="BaseViews")
self.appbuilder.add_view(SomeModelView, "SomeModelView", category="ModelViews")
log.debug("Complete setup!")
@classmethod
def delete_roles(cls):
for role_name in ['team-a', 'MyRole1', 'MyRole5', 'Test_Role', 'MyRole3', 'MyRole2']:
fab_utils.delete_role(cls.app, role_name)
def expect_user_is_in_role(self, user, rolename):
self.security_manager.init_role(rolename, [])
role = self.security_manager.find_role(rolename)
if not role:
self.security_manager.add_role(rolename)
role = self.security_manager.find_role(rolename)
user.roles = [role]
self.security_manager.update_user(user)
def assert_user_has_dag_perms(self, perms, dag_id, user=None):
for perm in perms:
self.assertTrue(
self._has_dag_perm(perm, dag_id, user),
f"User should have '{perm}' on DAG '{dag_id}'",
)
def assert_user_does_not_have_dag_perms(self, dag_id, perms, user=None):
for perm in perms:
self.assertFalse(
self._has_dag_perm(perm, dag_id, user),
f"User should not have '{perm}' on DAG '{dag_id}'",
)
def _has_dag_perm(self, perm, dag_id, user):
# if not user:
# user = self.user
return self.security_manager.has_access(perm, self.security_manager.prefixed_dag_id(dag_id), user)
def tearDown(self):
clear_db_runs()
clear_db_dags()
self.appbuilder = None
self.app = None
self.db = None
log.debug("Complete teardown!")
def test_init_role_baseview(self):
role_name = 'MyRole3'
role_perms = [('can_some_action', 'SomeBaseView')]
self.security_manager.init_role(role_name, perms=role_perms)
role = self.appbuilder.sm.find_role(role_name)
self.assertIsNotNone(role)
self.assertEqual(len(role_perms), len(role.permissions))
def test_init_role_modelview(self):
role_name = 'MyRole2'
role_perms = [
('can_list', 'SomeModelView'),
('can_show', 'SomeModelView'),
('can_add', 'SomeModelView'),
(permissions.ACTION_CAN_EDIT, 'SomeModelView'),
(permissions.ACTION_CAN_DELETE, 'SomeModelView'),
]
self.security_manager.init_role(role_name, role_perms)
role = self.appbuilder.sm.find_role(role_name)
self.assertIsNotNone(role)
self.assertEqual(len(role_perms), len(role.permissions))
def test_update_and_verify_permission_role(self):
role_name = 'Test_Role'
self.security_manager.init_role(role_name, [])
role = self.security_manager.find_role(role_name)
perm = self.security_manager.find_permission_view_menu(permissions.ACTION_CAN_EDIT, 'RoleModelView')
self.security_manager.add_permission_role(role, perm)
role_perms_len = len(role.permissions)
self.security_manager.init_role(role_name, [])
new_role_perms_len = len(role.permissions)
self.assertEqual(role_perms_len, new_role_perms_len)
def test_get_user_roles(self):
user = mock.MagicMock()
user.is_anonymous = False
roles = self.appbuilder.sm.find_role('Admin')
user.roles = roles
self.assertEqual(self.security_manager.get_user_roles(user), roles)
def test_get_user_roles_for_anonymous_user(self):
viewer_role_perms = {
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_LINK),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_THIS_FORM_GET, permissions.RESOURCE_RESET_MY_PASSWORD_VIEW),
(permissions.ACTION_CAN_THIS_FORM_POST, permissions.RESOURCE_RESET_MY_PASSWORD_VIEW),
(permissions.ACTION_RESETMYPASSWORD, permissions.RESOURCE_USER_DB_MODELVIEW),
(permissions.ACTION_CAN_THIS_FORM_GET, permissions.RESOURCE_USERINFO_EDIT_VIEW),
(permissions.ACTION_CAN_THIS_FORM_POST, permissions.RESOURCE_USERINFO_EDIT_VIEW),
(permissions.ACTION_USERINFOEDIT, permissions.RESOURCE_USER_DB_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_DB_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_OID_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_LDAP_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_OAUTH_MODELVIEW),
(permissions.ACTION_CAN_USERINFO, permissions.RESOURCE_USER_REMOTEUSER_MODELVIEW),
}
self.app.config['AUTH_ROLE_PUBLIC'] = 'Viewer'
with self.app.app_context():
user = mock.MagicMock()
user.is_anonymous = True
perms_views = set()
for role in self.security_manager.get_user_roles(user):
perms_views.update(
{(perm_view.permission.name, perm_view.view_menu.name) for perm_view in role.permissions}
)
self.assertEqual(perms_views, viewer_role_perms)
@mock.patch('airflow.www.security.AirflowSecurityManager.get_user_roles')
def test_get_all_permissions_views(self, mock_get_user_roles):
role_name = 'MyRole5'
role_perm = 'can_some_action'
role_vm = 'SomeBaseView'
username = 'get_all_permissions_views'
with self.app.app_context():
user = fab_utils.create_user(
self.app,
username,
role_name,
permissions=[
(role_perm, role_vm),
],
)
role = user.roles[0]
mock_get_user_roles.return_value = [role]
self.assertEqual(self.security_manager.get_all_permissions_views(), {(role_perm, role_vm)})
mock_get_user_roles.return_value = []
self.assertEqual(len(self.security_manager.get_all_permissions_views()), 0)
def test_get_accessible_dag_ids(self):
role_name = 'MyRole1'
permission_action = [permissions.ACTION_CAN_READ]
dag_id = 'dag_id'
username = "ElUser"
user = fab_utils.create_user(
self.app,
username,
role_name,
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
],
)
dag_model = DagModel(dag_id=dag_id, fileloc="/tmp/dag_.py", schedule_interval="2 2 * * *")
self.session.add(dag_model)
self.session.commit()
self.security_manager.sync_perm_for_dag( # type: ignore # pylint: disable=no-member
dag_id, access_control={role_name: permission_action}
)
self.assertEqual(self.security_manager.get_accessible_dag_ids(user), {'dag_id'})
def test_dont_get_inaccessible_dag_ids_for_dag_resource_permission(self):
# In this test case,
# get_readable_dag_ids() don't return DAGs to which the user has CAN_EDIT permission
username = "Monsieur User"
role_name = "MyRole1"
permission_action = [permissions.ACTION_CAN_EDIT]
dag_id = "dag_id"
user = fab_utils.create_user(
self.app,
username,
role_name,
permissions=[
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
],
)
dag_model = DagModel(dag_id=dag_id, fileloc="/tmp/dag_.py", schedule_interval="2 2 * * *")
self.session.add(dag_model)
self.session.commit()
self.security_manager.sync_perm_for_dag( # type: ignore # pylint: disable=no-member
dag_id, access_control={role_name: permission_action}
)
self.assertEqual(self.security_manager.get_readable_dag_ids(user), set())
@mock.patch('airflow.www.security.AirflowSecurityManager._has_view_access')
def test_has_access(self, mock_has_view_access):
user = mock.MagicMock()
user.is_anonymous = False
mock_has_view_access.return_value = True
self.assertTrue(self.security_manager.has_access('perm', 'view', user))
def test_sync_perm_for_dag_creates_permissions_on_view_menus(self):
test_dag_id = 'TEST_DAG'
prefixed_test_dag_id = f'DAG:{test_dag_id}'
self.security_manager.sync_perm_for_dag(test_dag_id, access_control=None)
self.assertIsNotNone(
self.security_manager.find_permission_view_menu(permissions.ACTION_CAN_READ, prefixed_test_dag_id)
)
self.assertIsNotNone(
self.security_manager.find_permission_view_menu(permissions.ACTION_CAN_EDIT, prefixed_test_dag_id)
)
@mock.patch('airflow.www.security.AirflowSecurityManager._has_perm')
@mock.patch('airflow.www.security.AirflowSecurityManager._has_role')
def test_has_all_dag_access(self, mock_has_role, mock_has_perm):
mock_has_role.return_value = True
self.assertTrue(self.security_manager.has_all_dags_access())
mock_has_role.return_value = False
mock_has_perm.return_value = False
self.assertFalse(self.security_manager.has_all_dags_access())
mock_has_perm.return_value = True
self.assertTrue(self.security_manager.has_all_dags_access())
def test_access_control_with_non_existent_role(self):
with self.assertRaises(AirflowException) as context:
self.security_manager.sync_perm_for_dag(
dag_id='access-control-test',
access_control={
'this-role-does-not-exist': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
},
)
self.assertIn("role does not exist", str(context.exception))
def test_all_dag_access_doesnt_give_non_dag_access(self):
username = 'dag_access_user'
role_name = 'dag_access_role'
with self.app.app_context():
user = fab_utils.create_user(
self.app,
username,
role_name,
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
],
)
self.assertTrue(
self.security_manager.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
)
self.assertFalse(
self.security_manager.has_access(
permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE, user
)
)
def test_access_control_with_invalid_permission(self):
invalid_permissions = [
'can_varimport', # a real permission, but not a member of DAG_PERMS
'can_eat_pudding', # clearly not a real permission
]
username = "LaUser"
user = fab_utils.create_user(
self.app,
username=username,
role_name='team-a',
)
for permission in invalid_permissions:
self.expect_user_is_in_role(user, rolename='team-a')
with self.assertRaises(AirflowException) as context:
self.security_manager.sync_perm_for_dag(
'access_control_test', access_control={'team-a': {permission}}
)
self.assertIn("invalid permissions", str(context.exception))
def test_access_control_is_set_on_init(self):
username = 'access_control_is_set_on_init'
role_name = 'team-a'
with self.app.app_context():
user = fab_utils.create_user(
self.app,
username,
role_name,
permissions=[],
)
self.expect_user_is_in_role(user, rolename='team-a')
self.security_manager.sync_perm_for_dag(
'access_control_test',
access_control={'team-a': [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]},
)
self.assert_user_has_dag_perms(
perms=[permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ],
dag_id='access_control_test',
user=user,
)
self.expect_user_is_in_role(user, rolename='NOT-team-a')
self.assert_user_does_not_have_dag_perms(
perms=[permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ],
dag_id='access_control_test',
user=user,
)
def test_access_control_stale_perms_are_revoked(self):
username = 'access_control_stale_perms_are_revoked'
role_name = 'team-a'
with self.app.app_context():
user = fab_utils.create_user(
self.app,
username,
role_name,
permissions=[],
)
self.expect_user_is_in_role(user, rolename='team-a')
self.security_manager.sync_perm_for_dag(
'access_control_test', access_control={'team-a': READ_WRITE}
)
self.assert_user_has_dag_perms(perms=READ_WRITE, dag_id='access_control_test', user=user)
self.security_manager.sync_perm_for_dag(
'access_control_test', access_control={'team-a': READ_ONLY}
)
self.assert_user_has_dag_perms(
perms=[permissions.ACTION_CAN_READ], dag_id='access_control_test', user=user
)
self.assert_user_does_not_have_dag_perms(
perms=[permissions.ACTION_CAN_EDIT], dag_id='access_control_test', user=user
)
def test_no_additional_dag_permission_views_created(self):
ab_perm_view_role = sqla_models.assoc_permissionview_role
self.security_manager.sync_roles()
num_pv_before = self.db.session().query(ab_perm_view_role).count()
self.security_manager.sync_roles()
num_pv_after = self.db.session().query(ab_perm_view_role).count()
self.assertEqual(num_pv_before, num_pv_after)
def test_override_role_vm(self):
test_security_manager = MockSecurityManager(appbuilder=self.appbuilder)
self.assertEqual(len(test_security_manager.VIEWER_VMS), 1)
self.assertEqual(test_security_manager.VIEWER_VMS, {'Airflow'})
| 41.936543 | 110 | 0.669032 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 0x68/airflow | tests/www/test_security.py | 19,165 | Python |
from typing import Any, Dict, Set
import orjson
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import EventInfo, capture_event
from zerver.lib.user_status import get_user_info_dict, update_user_status
from zerver.models import UserProfile, UserStatus, get_client
def get_away_user_ids(realm_id: int) -> Set[int]:
user_dict = get_user_info_dict(realm_id)
return {
int(user_id)
for user_id in user_dict
if user_dict[user_id].get('away')
}
def user_info(user: UserProfile) -> Dict[str, Any]:
user_dict = get_user_info_dict(user.realm_id)
return user_dict.get(str(user.id), dict())
class UserStatusTest(ZulipTestCase):
def test_basics(self) -> None:
cordelia = self.example_user('cordelia')
hamlet = self.example_user('hamlet')
king_lear = self.lear_user('king')
realm_id = hamlet.realm_id
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, set())
client1 = get_client('web')
client2 = get_client('ZT')
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client1.id,
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {hamlet.id})
# Test that second client just updates
# the record. We only store one record
# per user. The user's status transcends
# clients; we only store the client for
# reference and to maybe reconcile timeout
# situations.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.AWAY,
status_text='out to lunch',
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(away=True, status_text='out to lunch'),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {hamlet.id})
rec_count = UserStatus.objects.filter(user_profile_id=hamlet.id).count()
self.assertEqual(rec_count, 1)
# Setting status_text to None causes it be ignored.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.NORMAL,
status_text=None,
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(status_text='out to lunch'),
)
# Clear the status_text now.
update_user_status(
user_profile_id=hamlet.id,
status=None,
status_text='',
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, set())
# Now set away status for three different users across
# two realms.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client1.id,
)
update_user_status(
user_profile_id=cordelia.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client2.id,
)
update_user_status(
user_profile_id=king_lear.id,
status=UserStatus.AWAY,
status_text=None,
client_id=client2.id,
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {cordelia.id, hamlet.id})
away_user_ids = get_away_user_ids(realm_id=king_lear.realm.id)
self.assertEqual(away_user_ids, {king_lear.id})
# Set Hamlet to NORMAL but in a meeting.
update_user_status(
user_profile_id=hamlet.id,
status=UserStatus.NORMAL,
status_text='in a meeting',
client_id=client2.id,
)
self.assertEqual(
user_info(hamlet),
dict(status_text='in a meeting'),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, {cordelia.id})
def test_endpoints(self) -> None:
hamlet = self.example_user('hamlet')
realm_id = hamlet.realm_id
self.login_user(hamlet)
# Try to omit parameter--this should be an error.
payload: Dict[str, Any] = dict()
result = self.client_post('/json/users/me/status', payload)
self.assert_json_error(result, "Client did not pass any new values.")
# Try a long message.
long_text = 'x' * 61
payload = dict(status_text=long_text)
result = self.client_post('/json/users/me/status', payload)
self.assert_json_error(result, "status_text is too long (limit: 60 characters)")
payload = dict(
away=orjson.dumps(True).decode(),
status_text='on vacation',
)
event_info = EventInfo()
with capture_event(event_info):
result = self.client_post('/json/users/me/status', payload)
self.assert_json_success(result)
self.assertEqual(
event_info.payload,
dict(type='user_status', user_id=hamlet.id, away=True, status_text='on vacation'),
)
self.assertEqual(
user_info(hamlet),
dict(away=True, status_text='on vacation'),
)
# Now revoke "away" status.
payload = dict(away=orjson.dumps(False).decode())
event_info = EventInfo()
with capture_event(event_info):
result = self.client_post('/json/users/me/status', payload)
self.assert_json_success(result)
self.assertEqual(
event_info.payload,
dict(type='user_status', user_id=hamlet.id, away=False),
)
away_user_ids = get_away_user_ids(realm_id=realm_id)
self.assertEqual(away_user_ids, set())
# And now just update your info.
# The server will trim the whitespace here.
payload = dict(status_text=' in office ')
event_info = EventInfo()
with capture_event(event_info):
result = self.client_post('/json/users/me/status', payload)
self.assert_json_success(result)
self.assertEqual(
event_info.payload,
dict(type='user_status', user_id=hamlet.id, status_text='in office'),
)
self.assertEqual(
user_info(hamlet),
dict(status_text='in office'),
)
# And finally clear your info.
payload = dict(status_text='')
event_info = EventInfo()
with capture_event(event_info):
result = self.client_post('/json/users/me/status', payload)
self.assert_json_success(result)
self.assertEqual(
event_info.payload,
dict(type='user_status', user_id=hamlet.id, status_text=''),
)
self.assertEqual(
get_user_info_dict(realm_id=realm_id),
{},
)
| 30.854077 | 94 | 0.607317 | [
"Apache-2.0"
] | Benocs/zulip | zerver/tests/test_user_status.py | 7,189 | Python |
'''
DCGAN - MNIST Grayscale Handwritten Digits.
Ref: https://machinelearningmastery.com/generative_adversarial_networks/
'''
# import tensorflow.python.ops.numpy_ops.np_config
from data import loadRealSamples
from generator import createGenerator
from discriminator import createDiscriminator
from gan import createGan, train
if __name__ == '__main__':
latentDim = 100
dataset = loadRealSamples()
discriminator = createDiscriminator()
generator = createGenerator(latentDim)
gan = createGan(discriminator, generator)
train(discriminator, generator, gan, dataset, latentDim)
| 30 | 72 | 0.786667 | [
"MIT"
] | sem-onyalo/mlm-2-dcgan-digits | main.py | 600 | Python |
# -*- coding: utf-8 -*-
import io
import json
import re
import os
import urllib.error, urllib.parse
from collections import defaultdict
from contextlib import closing
from .. import biblio
from .. import config
from .. import constants
from ..h import *
from ..DefaultOrderedDict import DefaultOrderedDict
from ..messages import *
from ..Spec import Spec
TEST_DIR = os.path.abspath(os.path.join(config.scriptPath(), "..", "tests"))
def findTestFiles():
for root, dirnames, filenames in os.walk(TEST_DIR):
for filename in filenames:
if filename.endswith(".bs") and "/github/" in root:
yield os.path.join(root, filename)
def testNameForPath(path):
if path.startswith(TEST_DIR):
return path[len(TEST_DIR)+1:]
return path
def update(path, dryRun=False):
return # early exit while working on this...
say("Downloading backref data...")
constants.quiet = float("inf")
if not dryRun:
specs = defaultdict(dict)
backrefs = defaultdict(lambda: defaultdict(list))
for i,testPath in enumerate(findTestFiles()):
if i > 1:
break
print(i,testNameForPath(testPath))
doc = Spec(inputFilename=testPath)
doc.preprocess()
if doc.md.ED:
url = doc.md.ED
elif doc.md.TR:
url = doc.md.TR
else:
continue
referencingShortname = doc.md.vshortname
for ref in processRefs(doc.externalRefsUsed):
_,_,referencedID = ref.url.partition("#")
referencedID = urllib.parse.unquote(referencedID)
referencedShortname = ref.spec
referencingLinks = findAll("[href='{0}']".format(ref.url), doc)
referencingIDs = [link.get("id") for link in referencingLinks if link.get("id")]
referencingURLs = ["{0}#{1}".format(url, id) for id in referencingIDs]
backrefs[referencedShortname][referencedID].append({
"shortname": referencingShortname,
"urls": referencingURLs
})
print(config.printjson(backrefs))
def processRefs(refs):
seenRefs = set()
# shape is {spec: {reftext: {forKey: ref}}}, just collect all the refs
for keysByText in refs.values():
for refsByKey in keysByText.values():
for ref in refsByKey.values():
key = (ref.url, ref.spec)
if key not in seenRefs:
yield ref
seenRefs.add(key)
ignoredSpecs = {
"css-foo-1",
"css-typed-om-2",
"css-2015-0",
"d0???-1",
"svg-color-1",
"{{repo}}-1"
} | 30.322222 | 96 | 0.584463 | [
"CC0-1.0"
] | agl/bikeshed | bikeshed/update/updateBackRefs.py | 2,729 | Python |
#!/usr/bin/env/python3
# Modified by contributors from Intel Labs
"""
Create json with config tag
"""
import os
import math
def create_json_file(config):
config_file = os.path.join(os.environ['TVM_HOME'],'3rdparty/vta-hw/config/vta_config.json')
vta_params = config.split('_')
gemm_params = vta_params[0].split('x')
batch = int(math.log(int(gemm_params[0]),2))
blockIn = int(math.log(int(gemm_params[1]),2))
blockOut = int(math.log(int(gemm_params[2]),2))
try:
fin = open(config_file, 'rt')
data = fin.read()
fin.close()
data = data.replace('"LOG_BATCH" : 0', f'"LOG_BATCH" : {batch}')
data = data.replace('"LOG_BLOCK_IN" : 4', f'"LOG_BLOCK_IN" : {blockIn}')
data = data.replace('"LOG_BLOCK_OUT" : 4', f'"LOG_BLOCK_OUT" : {blockOut}')
data = data.replace('"LOG_UOP_BUFF_SIZE" : 15', f'"LOG_UOP_BUFF_SIZE" : {vta_params[1]}')
data = data.replace('"LOG_INP_BUFF_SIZE" : 15', f'"LOG_INP_BUFF_SIZE" : {vta_params[2]}')
data = data.replace('"LOG_WGT_BUFF_SIZE" : 18', f'"LOG_WGT_BUFF_SIZE" : {vta_params[3]}')
data = data.replace('"LOG_ACC_BUFF_SIZE" : 17', f'"LOG_ACC_BUFF_SIZE" : {vta_params[4]}')
except IOError:
print(f'Cannot open config file {config_file} for reading default config')
new_config_file = os.path.join(os.environ['TVM_HOME'], '3rdparty/vta-hw/config', f'{config}.json')
try:
json_file = open(new_config_file, 'wt')
json_file.write(data)
json_file.close()
print(f'New config written to {new_config_file}')
except IOError:
print(f'Cannot open config file {new_config_file} for writing new config')
if __name__ == '__main__':
import sys
from argparse import ArgumentParser
if len(sys.argv) < 2:
sys.exit("At least 1 argument is required")
else:
if sys.argv[0].endswith('create_json.py'):
ap = ArgumentParser(description='Create VTA json files with config and target')
ap.add_argument('-c', '--config', type=str, default='1x16x16_15_15_18_17',
help='VTA config (default: %(default)s)')
args=ap.parse_args()
create_json_file(args.config)
| 42 | 102 | 0.649725 | [
"Apache-2.0"
] | ekiwi/incubator-tvm-vta | apps/params/create_json.py | 2,184 | Python |
# Generated by Django 2.2.20 on 2021-04-27 18:12
from django.db import migrations
class Migration(migrations.Migration):
"""
Clean records in the `CERTIFICATES_GENERATEDCERTIFICATE` table that are in the downloadable state but also have
old errors still part of their certificate record.
As part of this migration we are also altering the Managers for the GeneratedCertificate model. We need the ability
to access the `objects` attribute for the cleanup.
"""
def cleanup_certificate_records(apps, schema_editor):
GeneratedCertificate = apps.get_model('certificates', 'GeneratedCertificate')
GeneratedCertificate.objects.filter(status='downloadable').exclude(error_reason='').update(error_reason='')
dependencies = [
('certificates', '0024_delete_allowlistgenerationconfiguration'),
]
operations = [
migrations.AlterModelManagers(
name='generatedcertificate',
managers=[
],
),
migrations.RunPython(cleanup_certificate_records, reverse_code=migrations.RunPython.noop)
]
| 35.580645 | 119 | 0.714415 | [
"MIT"
] | luque/better-ways-of-thinking-about-software | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/certificates/migrations/0025_cleanup_certificate_errors.py | 1,103 | Python |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_topic: What :mod:`rpc` topic to listen to (default: `cinder-volume`).
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder import compute
from cinder import context
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000ISCSIDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver',
'cinder.volume.drivers.huawei.huawei_18000.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.huawei.huawei_driver.Huawei18000FCDriver':
'cinder.volume.drivers.huawei.huawei_driver.HuaweiFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_fc.FJDXFCDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_fc.FJDXFCDriver',
'cinder.volume.drivers.fujitsu_eternus_dx_iscsi.FJDXISCSIDriver':
'cinder.volume.drivers.fujitsu.eternus_dx_iscsi.FJDXISCSIDriver',
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver',
'cinder.volume.drivers.san.hp.hp_3par_fc.HP3PARFCDriver':
'cinder.volume.drivers.hpe.hpe_3par_fc.HPE3PARFCDriver',
'cinder.volume.drivers.san.hp.hp_3par_iscsi.HP3PARISCSIDriver':
'cinder.volume.drivers.hpe.hpe_3par_iscsi.HPE3PARISCSIDriver',
'cinder.volume.drivers.san.hp.hp_lefthand_iscsi.HPLeftHandISCSIDriver':
'cinder.volume.drivers.hpe.hpe_lefthand_iscsi.HPELeftHandISCSIDriver',
'cinder.volume.drivers.san.hp.hp_xp_fc.HPXPFCDriver':
'cinder.volume.drivers.hpe.hpe_xp_fc.HPEXPFCDriver',
}
def locked_volume_operation(f):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
Example use:
If a volume operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
volume e.g. delete VolA while create volume VolB from VolA is in progress.
"""
def lvo_inner1(inst, context, volume_id, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def lvo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lvo_inner2(inst, context, volume_id, **kwargs)
return lvo_inner1
def locked_detach_operation(f):
"""Lock decorator for volume detach operations.
Takes a named lock prior to executing the detach call. The lock is named
with the operation executed and the id of the volume. This lock can then
be used by other operations to avoid operation conflicts on shared volumes.
This locking mechanism is only for detach calls. We can't use the
locked_volume_operation, because detach requires an additional
attachment_id in the parameter list.
"""
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
def ldo_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
return ldo_inner1
def locked_snapshot_operation(f):
"""Lock decorator for snapshot operations.
Takes a named lock prior to executing the operation. The lock is named with
the operation executed and the id of the snapshot. This lock can then be
used by other operations to avoid operation conflicts on shared snapshots.
Example use:
If a snapshot operation uses this decorator, it will block until the named
lock is free. This is used to protect concurrent operations on the same
snapshot e.g. delete SnapA while create volume VolA from SnapA is in
progress.
"""
def lso_inner1(inst, context, snapshot, **kwargs):
@utils.synchronized("%s-%s" % (snapshot.id, f.__name__), external=True)
def lso_inner2(*_args, **_kwargs):
return f(*_args, **_kwargs)
return lso_inner2(inst, context, snapshot, **kwargs)
return lso_inner1
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '2.0'
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, incase there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# add more items to the update if theyr'e releveant but we need
# to be safe in what we allow and add a list of allowed keys
# things that make sense are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']][0])
if update:
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def init_host(self):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = objects.VolumeList.get_all_by_host(ctxt, self.host)
snapshots = self.db.snapshot_get_by_host(ctxt, self.host)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
try:
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.status = 'error'
volume.save()
elif volume['status'] in ('downloading', 'creating'):
LOG.warning(_LW("Detected volume stuck "
"in %(curr_status)s "
"status, setting to ERROR."),
{'curr_status': volume['status']},
resource=volume)
if volume['status'] == 'downloading':
self.driver.clear_download(ctxt, volume)
volume.status = 'error'
volume.save()
elif volume.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, volume.id)
else:
pass
snapshots = objects.SnapshotList.get_by_host(
ctxt, self.host, {'status': fields.SnapshotStatus.CREATING})
for snapshot in snapshots:
LOG.warning(_LW("Detected snapshot stuck in creating "
"status, setting to ERROR."), resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
for volume in volumes:
if volume['status'] == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
volume['id'], volume=volume)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'], volume=volume)
LOG.info(_LI("Resume volume delete completed successfully."),
resource=volume)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
'cinder-volume')
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
volume=None):
"""Creates the volume."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = {}
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume.id,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
@utils.synchronized(locked_action, external=True)
def _run_flow_locked():
_run_flow()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
vol_ref = None
try:
if locked_action is None:
_run_flow()
else:
_run_flow_locked()
finally:
try:
vol_ref = flow_engine.storage.fetch('volume_ref')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
if not vol_ref:
# Flow was reverted and not rescheduled, fetching
# volume_ref from the DB, because it will be needed.
vol_ref = objects.Volume.get_by_id(context, volume.id)
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(vol_ref)
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
return vol_ref.id
@locked_volume_operation
def delete_volume(self, context, volume_id,
unmanage_only=False,
volume=None,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
volume = objects.Volume.get_by_id(context, volume_id)
else:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume_id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if vol_utils.extract_host(volume.host) != self.host:
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
self._notify_about_volume_usage(context, volume, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
self._notify_about_volume_usage(context, volume, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
LOG.info(_LI("Deleted volume successfully."), resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
def create_snapshot(self, context, volume_id, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id}, resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@locked_snapshot_operation
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
LOG.info(_LI("Delete snapshot completed successfully"),
resource=snapshot)
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode):
"""Updates db to show volume is attached."""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume['status'] == 'in-use' and not volume['multiattach']
and not volume['migration_status']):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = \
self.db.volume_attachment_get_all_by_instance_uuid(
context, volume_id, instance_uuid)
else:
attachments = (
self.db.volume_attachment_get_all_by_host(
context,
volume_id,
host_name_sanitized))
if attachments:
self.db.volume_update(context, volume_id,
{'status': 'in-use'})
return
self._notify_about_volume_usage(context, volume,
"attach.start")
values = {'volume_id': volume_id,
'attach_status': 'attaching', }
attachment = self.db.volume_attach(context.elevated(), values)
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(), volume_id,
{"attached_mode": mode}, False)
attachment_id = attachment['id']
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_attachment_update(context, attachment_id,
{'attach_status':
'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.',
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_id,
{'attach_status': 'error_attaching'})
volume = self.db.volume_attached(context.elevated(),
attachment_id,
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return self.db.volume_attachment_get(context, attachment_id)
return do_attach()
@locked_detach_operation
def detach_volume(self, context, volume_id, attachment_id=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
volume = self.db.volume_get(context, volume_id)
attachment = None
if attachment_id:
try:
attachment = self.db.volume_attachment_get(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
self.db.volume_detached(context, volume_id, attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = self.db.volume_attachment_get_all_by_volume_id(
context, volume_id)
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
self.db.volume_update(context, volume_id,
{'status': 'available',
'attach_status': 'detached'})
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.debug('Detaching volume %(volume_id)s from instance '
'%(instance)s.',
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
volume = self.db.volume_get(context, volume_id)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(
ctx,
volume_ref['size'],
volume_ref['host']):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on host %(host)s.'),
{'image': image_id, 'host': volume_ref['host']})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume.id)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = 'detached'
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume.id,
allow_reschedule=False, volume=image_volume)
image_volume = self.db.volume_get(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume.id)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta = {'glance_image_id': image_meta['id'],
'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = self.db.volume_get(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context, defined_messages.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def initialize_connection(self, context, volume_id, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume = self.db.volume_get(context, volume_id)
model_update = None
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException:
err_msg = (_("Create export for volume failed."))
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume = self.db.volume_update(context,
volume_id,
model_update)
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
# Add qos_specs to connection info
typeid = volume['volume_type_id']
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.get('encryption_key_id'))
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume['id'], properties)
return self._connect_device(conn)
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False):
connector = attach_info['connector']
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(ctxt, dest_vol, properties,
remote=dest_remote)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(ctxt, src_vol, properties,
remote=src_remote)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt, dest_vol['host'],
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote)
def _migrate_volume_generic(self, ctxt, volume, host, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | {'host'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
new_volume = objects.Volume(
context=ctxt,
host=host['host'],
status='creating',
attach_status='detached',
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, host['host'],
None, None, allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination host")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume.id,
new_volume.id,
error=False,
volume=volume,
new_volume=new_volume)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume_id, new_volume_id,
error=False, volume=None, new_volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None or new_volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
new_volume = objects.Volume.get_by_id(ctxt, new_volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
try:
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
self.detach_volume(ctxt, volume.id, attachment['id'])
except Exception as ex:
LOG.error(_LE("Detach migration source volume failed: %(err)s"),
{'err': ex}, resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
if orig_volume_status == 'in-use':
attachments = volume.volume_attachment
for attachment in attachments:
rpcapi.attach_volume(ctxt, volume,
attachment['instance_uuid'],
attachment['attached_host'],
attachment['mountpoint'],
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False,
new_type_id=None, volume=None):
"""Migrate the volume to the specified host (called on source host)."""
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(ctxt, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host,
new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume_id, new_size, reservations,
volume=None):
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def retype(self, ctxt, volume_id, new_type_id, host,
migration_policy='never', reservations=None,
volume=None, old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
context = ctxt.elevated()
# FIXME(dulek): Remove this in v3.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context, volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host is the same
# as the current. If it's not don't call the driver.retype
# method, otherwise drivers that implement retype may report
# success, but it's invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
vol_utils.hosts_are_equivalent(self.driver.host,
host['host'])):
try:
new_type = volume_types.get_volume_type(context, new_type_id)
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume.id, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
def manage_existing(self, ctxt, volume_id, ref=None):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume_id,
ref)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
pool = vol_utils.extract_host(vol_ref['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol_ref['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= vol_ref['size']
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol_ref['size'])
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Promote volume replica failed."),
resource=volume)
try:
model_update = self.driver.promote_replica(ctxt, volume)
except exception.CinderException:
err_msg = (_('Error promoting secondary volume to primary'))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
LOG.info(_LI("Promote volume replica completed successfully."),
resource=volume)
def reenable_replication(self, ctxt, volume_id):
"""Re-enable replication of secondary volume with primary volumes."""
volume = self.db.volume_get(ctxt, volume_id)
model_update = None
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Sync volume replica failed."),
resource=volume)
try:
model_update = self.driver.reenable_replication(ctxt, volume)
except exception.CinderException:
err_msg = (_("Synchronizing secondary volume to primary failed."))
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
try:
if model_update:
volume = self.db.volume_update(ctxt,
volume_id,
model_update)
except exception.CinderException:
err_msg = (_("Failed updating model"
" with driver provided model %(model)s") %
{'model': model_update})
raise exception.ReplicationError(reason=err_msg,
volume_id=volume_id)
def _update_replication_relationship_status(self, ctxt):
# Only want volumes that do not have a 'disabled' replication status
filters = {'replication_status': ['active', 'copying', 'error',
'active-stopped', 'inactive']}
volumes = self.db.volume_get_all_by_host(ctxt, self.host,
filters=filters)
for vol in volumes:
model_update = None
try:
model_update = self.driver.get_replication_status(
ctxt, vol)
if model_update:
self.db.volume_update(ctxt, vol['id'], model_update)
except Exception:
LOG.exception(_LE("Get replication status for volume failed."),
resource=vol)
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
context = context.elevated()
status = fields.ConsistencyGroupStatus.AVAILABLE
model_update = None
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Consistency group %s: creating"), group.name)
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.ConsistencyGroupStatus.ERROR):
msg = (_('Create consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.ConsistencyGroupStatus.ERROR
group.save()
LOG.error(_LE("Consistency group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Consistency group %s: created successfully"),
group.name)
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = self.db.volume_get_all_by_group(context, group.id)
for volume_ref in volumes:
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_ref['id'])
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
if volume_ref['host']:
new_host = vol_utils.extract_host(volume_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node"))
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume_ref in volumes:
# Get reservations for volume
try:
volume_id = volume_ref['id']
reserve_opts = {'volumes': -1,
'gigabytes': -volume_ref['size']}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume_ref['size']
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = self.db.volume_get(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref['id'],
'group_id': group.id,
'status': add_vol_ref['status'],
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
# self.host is 'host@backend'
# volume_ref['host'] is 'host@backend#pool'
# Extract host before doing comparison
new_host = vol_utils.extract_host(add_vol_ref['host'])
if new_host != self.host:
raise exception.InvalidVolume(
reason=_("Volume is not local to this node."))
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref['id']},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 methods
def failover_host(self, context,
secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
volumes = objects.VolumeList.get_all_by_host(context, self.host)
exception_encountered = False
try:
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
(active_backend_id, volume_update_list) = (
self.driver.failover_host(
context,
volumes,
secondary_id=secondary_backend_id))
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
service.replication_status = (
fields.ReplicationStatus.FAILOVER_ERROR)
service.save()
exception_encountered = True
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status
if secondary_backend_id == "default":
service.replication_status = (
fields.ReplicationStatus.FAILED_OVER)
else:
service.replication_status = fields.ReplicationStatus.ENABLED
service.save()
exception_encountered = True
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
service.status = 'error'
service.save()
exception_encountered = True
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
return
if secondary_backend_id == "default":
service.replication_status = fields.ReplicationStatus.ENABLED
service.active_backend_id = ""
if service.frozen:
service.disabled = True
service.disabled_reason = "frozen"
else:
service.disabled = False
service.disabled_reason = ""
service.save()
else:
service.replication_status = fields.ReplicationStatus.FAILED_OVER
service.active_backend_id = active_backend_id
service.disabled = True
service.disabled_reason = "failed-over"
service.save()
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
'cinder-volume')
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
return backup_device_dict
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
| 44.743448 | 79 | 0.549317 | [
"Apache-2.0"
] | ISCAS-VDI/cinder-base | cinder/volume/manager.py | 153,649 | Python |
from __future__ import absolute_import, division, print_function
from six.moves import range
from psana import *
import sys
import numpy as np
from xfel.amo.pnccd_ana import pnccd_tbx
from xfel.amo.pnccd_ana import pnccd_hit
from xfel.amo.pnccd_ana import fxs
import matplotlib.pyplot as plt
from six.moves import zip
plt.ion()
########################################
# Due to the mask sometimes having zero values
# we're bound to get divisions with zeros at
#times. Here ignoring those errors.
np.seterr(divide='ignore', invalid='ignore')
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def h5gen(run,timestamps = None, first = None, last = None):
# Singel CPU
if size == 1:
nom = rank
denom = size
# MPI
else:
nom = rank - 1
denom = size - 1
times = timestamps
nevents = len(times)
mytimes,myevents = zip(*[(times[i],i) for i in range(nevents) if (i+nom)%denom == 0])
for j in range(len(mytimes)):
yield myevents[j],mytimes[j]
def idxgen(run,timestamps = None, first = None, last = None):
#print "idx mode"
# Use timestamps from index file
if timestamps is None:
timestamps = run.times()
if first is None :
first = 0
if last is None :
last = len(timestamps)
else:
last = min(last,len(timestamps)) # Check that last time-stamp exists
# Singel CPU
if size == 1:
nom = rank
denom = size
# MPI
else:
nom = rank - 1
denom = size - 1
times = timestamps[first:last]
nevents = len(times)
mytimes,myevents = zip(*[(times[i],i) for i in range(nevents) if (i+nom)%denom == 0])
for j in range(len(mytimes)):
yield myevents[j],run.event(mytimes[j])
def smdgen(run,timestamps = None, first = None, last = None):
#print "smd mode"
if first is None :
first = 0
if last is None :
last = 1e20 # We typically don't know what the last events is. So for now use a large number
# Singel CPU
if size == 1:
nom = rank
denom = size
# MPI
else:
nom = rank - 1
denom = size - 1
if timestamps is None :
for nevent,evt in enumerate(run.events()):
if nevent < first : continue
elif nevent == last : return
elif nevent%denom == nom:
yield nevent-first,evt
else : # Only applicable for xtc format
ct = 0
for nevent,evt in enumerate(run.events()):
t = pnccd_tbx.get_psana_time(evt)
# Check if event exists in timestamps
if np.equal(t, timestamps).all(axis=1).any() :
if ct < first : continue
elif ct == last : return
elif ct%denom == nom:
yield ct,evt
ct += 1
def compute_mask(argv=None) :
"""Function to compute a mask of non-resposive pixels from FXS images
extracted from xtc (smd,idx,xtc format) or h5 files.
Works for Single CPU, Multi-Processor interactive jobs and MPI batch jobs
For a definition of input arguments argv and batch processing instructions see *** mpi_fxs_launch.py ***
compute_mask produces the following output files:
* Index file : Information about the events processed including time-stamps, beam center, total and peak intensities, streak locations, particle size etc
* Average : Average image in cartesian coordinates
* Variance : Variance map of intensities in cartesian coordinates
* Mask : Mask image in cartesian coordinates
"""
if argv == None:
argv = sys.argv[1:]
try:
from mpi4py import MPI
except ImportError:
raise Sorry("MPI not found")
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if argv.hit is None :
hit = -1.0e20 # Process everything
else:
hit = argv.hit # Process everything > hit
ftype = argv.ftype
if argv.param_path is not None :
if ftype == 'h5' :
param_file = np.genfromtxt(argv.param_path,skiprows=1,dtype=None)
timestamps,filestamps = pnccd_tbx.get_h5_event(param_file)
elif ftype == 'xtc' :
param_file = np.genfromtxt(argv.param_path,skiprows=1,dtype=None)
timestamps = pnccd_tbx.get_time(param_file)
else :
param_file = np.genfromtxt(argv.param_path,skiprows=1)
timestamps = pnccd_tbx.get_psana_event(param_file)
else:
timestamps = None
# The first and last events to processed
first = argv.first
last = argv.last
# Check data format
if ftype == 'h5' :
import h5py
run = int(argv.run)
# Get time-stamps from all h5-files
if argv.param_path is None :
timestamps = []
filestamps = []
# Loop over all h5-files and store the time-stamps
for i in os.listdir(argv.xtc_dir):
if i.endswith(".h5"):
f = h5py.File(i,'r')
filestamps.append(i[-7:-4])
timestamps.append(list(f.keys()))
continue
else:
continue
dataset_name = "%s-r%s"%(argv.experiment, str(argv.run).zfill(4)) # Ascert 4 digit run number
exprun = os.path.join(argv.xtc_dir,dataset_name)
if argv.first is None :
first = 0
if argv.last is None :
last = len(timestamps)
else:
last = min(last,len(timestamps)) # Check that last time-stamp exists
timestamps = timestamps[first:last]
filestamps = filestamps[first:last]
evtgen = h5gen
else :
exprun = "exp=%s:run=%d"%(argv.experiment, argv.run)
if (ftype == 'xtc') :
dataset_name = exprun+':xtc'
elif (ftype == 'idx') :
dataset_name = exprun+':idx'
elif(ftype == 'idx_ffb') :
dataset_name = exprun+':idx'
# as ffb is only at SLAC, ok to hardcode /reg/d here
dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc"%(argv.experiment[0:3],argv.experiment)
elif(ftype == 'smd') :
dataset_name = exprun+':smd'
elif(ftype == 'smd_ffb') :
dataset_name = exprun+':smd'
# as ffb is only at SLAC, ok to hardcode /reg/d here ADD live!
dataset_name += ":dir=/reg/d/ffb/%s/%s/xtc:live"%(argv.experiment[0:3],argv.experiment)
exprun = dataset_name
ds = DataSource(dataset_name)
run = next(ds.runs())
# Select event generator
if (ftype=='smd') or (ftype == 'smd_ffb') or (ftype == 'xtc'):
evtgen = smdgen
elif (ftype=='idx') or (ftype == 'idx_ffb'):
evtgen = idxgen
if size == 1:
plot = argv.plot
else:
plot = 0
FXS = fxs.fluctuation_scattering(dataset_name = exprun,
detector_address = argv.address,
data_type = argv.ftype,
mask_path = argv.mask_path,
mask_angles = None, #np.array([88, 270]), # static masking at 88 and 270 deg
mask_widths = None, #np.array([6, 10]), # +/- degree
backimg_path = argv.bg_img_path,
backmsk_path = argv.bg_msk_path,
geom_path = argv.geom_path,
det_dist = argv.det_distance,
det_pix = argv.det_pixel,
beam_l = argv.lambda_b,
mask_thr = argv.thr,
nQ = argv.nQ,
nPhi = argv.nPhi,
dQ = argv.dQ,
dPhi = argv.dP,
cent0 = [argv.x,argv.y],
r_max = argv.r_max,
dr = argv.dr,
dx = argv.dx,
dy = argv.dy,
r_0 = argv.r0,
q_bound = argv.q_bound,
peak = [0.037, 0.064],
dpeak = [0.002, 0.002])
# Initialize iterator
FXS.cnt = np.array([0.])
# Initialize Index variables
if argv.param_path is None :
maxevents = 400000 # We don't always know the total nr of events. Therefore set to large value
else:
maxevents = min(len(timestamps),len(timestamps[first:last]))
FXS.get_index(maxevents, flag = 1)
# chop the list into pieces, depending on rank. This assigns each process
# events such that the get every Nth event where N is the number of processes
if size > 1 :
if rank > 0 :
hd=pnccd_hit.hit()
# MPI process. Here we set rank 0 to work as a listening server only.
for j,evt in evtgen(run,timestamps = timestamps, first = first, last = last):
#print '***',rank,j,evt.get(EventId).fiducials()
if j%10==0: print('Rank',rank,'processing event',j)
if ftype == 'h5' :
FXS.get_h5(filestamps[j],evt)
else :
FXS.get_image(evt) # Geometry applied image (FXS.img)
FXS.image = np.copy(FXS.img)
# Process hits
if (FXS.image is not None) and (float(FXS.image.sum()) > hit) :
FXS.get_beam(plot = plot) # Beam center refinement
FXS.get_polar(plot = plot) # Polar transform
FXS.get_streak_mask(plot = plot) # Get info on streak
FXS.store_image(j) # Store raw images
if ftype == 'h5' :
FXS.store_index_h5(evt, j, flag = 0)
else:
######################################
# Ugly way to get the time-stamps. Fix!!
time = evt.get(EventId).time()
fid = evt.get(EventId).fiducials()
sec = time[0]
nsec = time[1]
et = EventTime(int((sec<<32)|nsec),fid)
#######################################
FXS.store_index(et, j, flag = 0) # Store index
if int(FXS.cnt)%10==0: print('Rank',rank,'processed events: ', int(FXS.cnt))
# Send partial results to master (rank 0)
if int(FXS.cnt) % 50 == 0: # Send every 50 events
# C2 and Saxs data
tmp_n = int(FXS.cnt)
# Total intensity, Size and Score
tmp_ind = np.column_stack((FXS.tot_int,FXS.tot_size,FXS.tot_score))
hd.send(tmp_n,ind=tmp_ind)
hd.endrun()
print('Rank',rank,'total events: ', int(FXS.cnt),' * ')
else:
if ftype == 'h5' :
FXS.run_nr = run
else:
FXS.run_nr = int(run.run())
hd = pnccd_hit.hit()
idim = (maxevents,3)
hd.total_ind = [np.zeros(idim)]*(size-1)
hd.total_ev_i = [0.0]*(size-1)
nClients = size - 1
while nClients > 0:
# Remove client if the run ended
if hd.recv():
nClients -= 1
else:
ns = sum(hd.total_ev_s)
ni = sum(hd.total_ev_i)
if (ns % 100 == 0) : # Publish every 100 events
IND = np.zeros(idim)
for i in range(size-1) :
IND = IND + hd.total_ind[i]
FXS.publish(ind=IND, n_i=ni)
else :
# Single CPU
for j,evt in evtgen(run,timestamps = timestamps, first = first, last = last):
#print '***',rank,j,evt.get(EventId).fiducials()
if j%10==0: print('Rank',rank,'processing event',j)
if ftype == 'h5' :
FXS.get_h5(filestamps[j],evt)
else :
FXS.get_image(evt) # Geometry applied image (FXS.img)
FXS.image = np.copy(FXS.img)
# Process hits
if (FXS.image is not None)and (float(FXS.image.sum()) > hit) :
FXS.get_beam(plot=plot) # Beam center refinement
FXS.get_polar() # Polar transform
FXS.get_streak_mask(plot=0) # Get info on streak
FXS.store_image(j) # Store raw images
if ftype == 'h5' :
FXS.store_index_h5(evt, j, flag = 0)
else:
######################################
# Ugly way to get the time-stamps. Fix!!
time = evt.get(EventId).time()
fid = evt.get(EventId).fiducials()
sec = time[0]
nsec = time[1]
et = EventTime(int((sec<<32)|nsec),fid)
#######################################
FXS.store_index(et, j, flag = 0) # Store index
print('Rank',rank,'total events: ', int(FXS.cnt),' * ')
#sum the images across mpi cores
if size > 1:
print("Synchronizing rank", rank)
Tot = np.zeros(FXS.cnt.shape)
comm.Reduce(FXS.cnt,Tot)
if rank == 0 and Tot[0] == 0:
raise Sorry("No events found in the run")
# Collect Variables
Images = np.zeros(FXS.images.shape)
comm.Reduce(FXS.images,Images)
# Collect Indexing variables
Tot_t = np.zeros(FXS.tot_t.shape)
comm.Reduce(FXS.tot_t,Tot_t)
Tot_s = np.zeros(FXS.tot_s.shape)
comm.Reduce(FXS.tot_s,Tot_s)
Tot_ns = np.zeros(FXS.tot_ns.shape)
comm.Reduce(FXS.tot_ns,Tot_ns)
Tot_fd = np.zeros(FXS.tot_fd.shape)
comm.Reduce(FXS.tot_fd,Tot_fd)
Tot_int = np.zeros(FXS.tot_int.shape)
comm.Reduce(FXS.tot_int,Tot_int)
Tot_peak1 = np.zeros(FXS.tot_peak1_int.shape)
comm.Reduce(FXS.tot_peak1_int,Tot_peak1)
Tot_peak2 = np.zeros(FXS.tot_peak2_int.shape)
comm.Reduce(FXS.tot_peak2_int,Tot_peak2)
Tot_s_m = np.zeros(FXS.tot_streak_m.shape)
comm.Reduce(FXS.tot_streak_m,Tot_s_m)
Tot_s_s = np.zeros(FXS.tot_streak_s.shape)
comm.Reduce(FXS.tot_streak_s,Tot_s_s)
Tot_cx = np.zeros(FXS.tot_cx.shape)
comm.Reduce(FXS.tot_cx,Tot_cx)
Tot_cy = np.zeros(FXS.tot_cy.shape)
comm.Reduce(FXS.tot_cy,Tot_cy)
Tot_size = np.zeros(FXS.tot_size.shape)
comm.Reduce(FXS.tot_size,Tot_size)
Tot_score = np.zeros(FXS.tot_score.shape)
comm.Reduce(FXS.tot_score,Tot_score)
# Reduce results
if rank==0:
if size > 1:
print("Synchronized")
# Identify dead lines and pixels, get binary pixel mask
Ave,Var,Mask = pnccd_tbx.pixel_mask(Images, thr = 0.12)
# Write out data
if argv.outputdir is None:
opath = os.getcwd()
else:
opath = argv.outputdir
f_index = os.path.join(opath,'Index_run' + str(argv.run) + '.dat')
stamps = ['Time','Seconds','Nanoseconds','Fiducial','Total Intensity','Peak1, q='+str(FXS.peak[0])+'+/-'+str(FXS.dpeak[0]),'Peak2, q='+str(FXS.peak[1])+'+/-'+str(FXS.dpeak[1]),'Mean streak angle','Std streak angle','Beam X','Beam Y','Radius [Ang]','Score']
head =" ".join(stamps)
f_ave = os.path.join(opath,'Average_map_' + str(argv.run) + '.dat')
f_var = os.path.join(opath,'Variance_map_' + str(argv.run) + '.dat')
f_mask = os.path.join(opath,'Mask_map_' + str(argv.run) + '.dat')
# Get rid of zero lines at the end
# Last non-zero intensity
nz = np.nonzero(Tot_t)
fend = nz[0][-1]+1
f = open(f_index,'w')
np.savetxt(f,np.c_[Tot_t[:fend],Tot_s[:fend],Tot_ns[:fend],Tot_fd[:fend],Tot_int[:fend],Tot_peak1[:fend],Tot_peak2[:fend],Tot_s_m[:fend],Tot_s_s[:fend],Tot_cx[:fend],Tot_cy[:fend],Tot_size[:fend],Tot_score[:fend]],header = head, comments='' )
f.close()
f = open(f_ave,'w')
np.savetxt(f,Ave)
f.close()
f = open(f_var,'w')
np.savetxt(f,Var)
f.close()
f = open(f_mask,'w')
np.savetxt(f,Mask)
f.close()
| 32.700743 | 265 | 0.493776 | [
"BSD-3-Clause"
] | jorgediazjr/dials-dev20191018 | modules/cctbx_project/xfel/amo/pnccd_ana/mpi_fxs_mask.py | 17,593 | Python |
"""
Provides `to_ltx` to convert numpy arrays to LaTeX.
"""
import numpy as np
from numpyarray_to_latex.utils import (
math_form,
)
def to_ltx(a,
fmt='{:6.4f}',
latexarraytype='array',
imstring='i',
is_row_vector=True,
mathform=True,
brackets='()',
mark_elements=[],
mark_color='pink',
separate_columns=[],
separate_rows=[],
):
r"""
Return a LaTeX array given a numpy array.
Parameters
----------
a : numpy.ndarray
fmt : str, default = '{:6.2f}'
python 3 formatter, optional-
https://mkaz.tech/python-string-format.html
latexarraytype : str, default = 'array'
Any of
.. code:: python
"array"
"pmatrix"
"bmatrix"
"vmatrix"
"Vmatrix"
"Bmatrix"
if "array", you can specifiy the brackets
with the keyword ``brackets``.
imstring : str, default = 'i'
Character to use to represent the imaginary unit.
Usually ``'i'`` or ``'j'``
is_row_vector : bool, default = True
If the array is 1D, should the output be
a row (True) or column (False) vector?
mathform : bool, default = True
wether to convert strings like ``1e+05``
to ``1\times10^{5}``.
brackets : iterable, default = '()'
which brackets to use to wrap the matrix
(must be two elements long).
Use ``brackets = None`` if you don't want
any brackets around the array.
mark_elements : list, default = []
list of tuples containing element indices that
should be marked with a colorbox.
mark_color : str, default = 'pink'
The color with which to mark matrix elements.
separate_columns : list, default = []
list of column indices before which a vertical
line should be drawn
separate_rows : list, default = []
list of row indices before which a horizontal
line should be drawn
Returns
-------
out: str
Formatted LaTeX string
Examples
--------
>>> from numpyarray_to_latex import to_ltx
>>> tex = to_ltx([[2.,2.],[2.,2.]])
>>> print(tex)
\left(
\begin{array}{}
2.00 & 2.00\\
2.00 & 2.00
\end{array}
\right)
"""
a = np.array(a)
if len(a.shape) > 2:
raise NotImplementedError('Arrays having more than two dimensions cannot be converted.')
if mark_elements is None:
mark_elements = []
if a.ndim == 2 and len(mark_elements)>0 and not all([hasattr(mark,'__len__') for mark in mark_elements]):
raise ValueError("If the array is 2D, ``mark_elements`` should be 2D as well, but isn't")
if len(a.shape) == 1:
if len(mark_elements)>0 and hasattr(mark_elements[0],'__len__'):
raise ValueError("If the array is 1D, ``mark_elements`` should be 1D as well, but isn't.")
a = np.array([a])
if is_row_vector is False:
a = a.T
mark_elements = [ (mark,0) for mark in mark_elements]
else:
mark_elements = [ (0,mark) for mark in mark_elements]
if isinstance(mark_elements, np.ndarray):
mark_elements = mark_elements.tolist()
mark_elements = [ tuple(row) for row in mark_elements ]
nrow, ncol = a.shape
out = ''
if brackets is not None and latexarraytype not in [
"bmatrix",
"pmatrix",
"vmatrix",
"Bmatrix",
"Vmatrix",
]:
out = r'\left' + brackets[0] + '\n'
if len(separate_columns) > 0:
if latexarraytype != 'array':
raise ValueError('column separators can only be used for `latexarraytype = "array"`')
colstr = '{'
for i in range(ncol):
if i in separate_columns and i > 0:
colstr += '|'
colstr += 'c'
colstr += '}'
else:
colstr = '{}'
out += r'\begin{' + latexarraytype + '}' +colstr+'\n'
for i in np.arange(nrow):
if i in separate_rows and i > 0:
out += ' \\hline\n'
out = out + ' '
for j in np.arange(ncol):
this_element = ''
if np.real(a[i, j]) < 0:
leadstr = ''
else:
leadstr = ' '
if '.' not in fmt.format(a[i, j]):
dot_space = ' '
else:
dot_space = ''
if np.iscomplexobj(a[i, j]):
real = math_form(fmt.format(np.real(a[i, j])),
mathform=mathform)
real = real.lstrip(' ')
imag = math_form(fmt.format(np.imag(a[i, j])),
is_imaginary=True,
mathform=mathform)
imag = imag.lstrip(' ')
if not (imag.startswith('-') or imag.startswith('+')):
number = real + '+' + imag
else:
number = real + imag
this_element = (
this_element
+ leadstr
+ number
+ imstring
+ dot_space
)
else:
this_element = (
this_element
+ leadstr
+ math_form(fmt.format(np.real(a[i, j])),
mathform=mathform)
+ dot_space
)
if (i,j) in mark_elements:
this_element = r'\colorbox{'+ mark_color +'}{$'+ this_element+'$} '
if j < ncol-1:
this_element += r' & '
out += this_element
if i < nrow-1:
out = out + '\\\\\n'
out = out + '\n' + r'\end{' + latexarraytype + '}'
if brackets is not None and latexarraytype not in [
"bmatrix",
"pmatrix",
"vmatrix",
"Bmatrix",
"Vmatrix",
]:
out += '\n\\right' + brackets[1]
return out
| 29.115741 | 109 | 0.480204 | [
"MIT"
] | psychemedia/numpyarray_to_latex | numpyarray_to_latex/main.py | 6,289 | Python |
# -*- test-case-name: xmantissa.test.test_offering -*-
# Copyright 2008 Divmod, Inc. See LICENSE file for details
"""
Axiomatic commands for manipulating Mantissa offerings.
"""
from twisted.python import usage
from axiom.scripts import axiomatic
from xmantissa import offering, publicweb
class Install(axiomatic.AxiomaticSubCommand):
synopsis = "<offering>"
def parseArgs(self, offering):
self["offering"] = self.decodeCommandLine(offering)
def postOptions(self):
for o in offering.getOfferings():
if o.name == self["offering"]:
offering.installOffering(self.store, o, None)
break
else:
raise usage.UsageError("No such offering")
class List(axiomatic.AxiomaticSubCommand):
def postOptions(self):
for o in offering.getOfferings():
print "%s: %s" % (o.name, o.description)
class SetFrontPage(axiomatic.AxiomaticSubCommand):
"""
Command for selecting the site front page.
"""
def parseArgs(self, offering):
"""
Collect an installed offering's name.
"""
self["name"] = self.decodeCommandLine(offering)
def postOptions(self):
"""
Find an installed offering and set the site front page to its
application's front page.
"""
o = self.store.findFirst(
offering.InstalledOffering,
(offering.InstalledOffering.offeringName ==
self["name"]))
if o is None:
raise usage.UsageError("No offering of that name"
" is installed.")
fp = self.store.findUnique(publicweb.FrontPage)
fp.defaultApplication = o.application
class OfferingCommand(axiomatic.AxiomaticCommand):
name = "offering"
description = "View and accept the offerings of puny mortals."
subCommands = [
("install", None, Install, "Install an offering."),
("list", None, List, "List available offerings."),
("frontpage", None, SetFrontPage,
"Select an application for the front page."),
]
def getStore(self):
return self.parent.getStore()
| 28.697368 | 69 | 0.624484 | [
"MIT"
] | jonathanj/mantissa | axiom/plugins/offeringcmd.py | 2,181 | Python |
# -*- coding: utf-8 -*-
#
# RequestsThrottler documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 31 13:40:59 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import requests_throttler
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RequestsThrottler'
copyright = u'2013, Lou Marvin Caraig'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = requests_throttler.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'RequestsThrottlerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'RequestsThrottler.tex', u'RequestsThrottler Documentation',
u'Lou Marvin Caraig', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'requeststhrottler', u'RequestsThrottler Documentation',
[u'Lou Marvin Caraig'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RequestsThrottler', u'RequestsThrottler Documentation',
u'Lou Marvin Caraig', 'RequestsThrottler', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = 'bysource'
| 31.758491 | 81 | 0.72279 | [
"Apache-2.0"
] | a-tal/requests-throttler | docs/conf.py | 8,416 | Python |
# -*- coding: utf-8 -*-
from datetime import date
import json
from operator import itemgetter
import os
import warnings
from django.core.urlresolvers import NoReverseMatch
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.db.models import signals, Model
from django.db.models.base import model_unpickle, ModelBase
from django.db.models.query_utils import DeferredAttribute
from django.utils import six, timezone
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils.six.moves import filter
from django.utils.translation import ugettext_lazy as _
from cms.exceptions import DontUsePageAttributeWarning
from cms.models.placeholdermodel import Placeholder
from cms.plugin_rendering import PluginContext, render_plugin
from cms.utils import get_cms_setting
from cms.utils.helpers import reversion_register
from cms.utils.urlutils import admin_reverse
from treebeard.mp_tree import MP_Node
class BoundRenderMeta(object):
def __init__(self, meta):
self.index = 0
self.total = 1
self.text_enabled = getattr(meta, 'text_enabled', False)
class PluginModelBase(ModelBase):
"""
Metaclass for all CMSPlugin subclasses. This class should not be used for
any other type of models.
"""
def __new__(cls, name, bases, attrs):
# remove RenderMeta from the plugin class
attr_meta = attrs.pop('RenderMeta', None)
# create a new class (using the super-metaclass)
new_class = super(PluginModelBase, cls).__new__(cls, name, bases, attrs)
# if there is a RenderMeta in attrs, use this one
# else try to use the one from the superclass (if present)
meta = attr_meta or getattr(new_class, '_render_meta', None)
treebeard_view_fields = (f for f in new_class._meta.fields
if f.name in ('depth', 'numchild', 'path'))
for field in treebeard_view_fields:
field.editable = False
# set a new BoundRenderMeta to prevent leaking of state
new_class._render_meta = BoundRenderMeta(meta)
return new_class
@python_2_unicode_compatible
class CMSPlugin(six.with_metaclass(PluginModelBase, MP_Node)):
'''
The base class for a CMS plugin model. When defining a new custom plugin, you should
store plugin-instance specific information on a subclass of this class.
An example for this would be to store the number of pictures to display in a galery.
Two restrictions apply when subclassing this to use in your own models:
1. Subclasses of CMSPlugin *cannot be further subclassed*
2. Subclasses of CMSPlugin cannot define a "text" field.
'''
placeholder = models.ForeignKey(Placeholder, editable=False, null=True)
parent = models.ForeignKey('self', blank=True, null=True, editable=False)
position = models.PositiveSmallIntegerField(_("position"), blank=True, null=True, editable=False)
language = models.CharField(_("language"), max_length=15, blank=False, db_index=True, editable=False)
plugin_type = models.CharField(_("plugin_name"), max_length=50, db_index=True, editable=False)
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
changed_date = models.DateTimeField(auto_now=True)
child_plugin_instances = None
translatable_content_excluded_fields = []
class Meta:
app_label = 'cms'
class RenderMeta:
index = 0
total = 1
text_enabled = False
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something wierd with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
deferred_fields = [f for f in self._meta.fields
if isinstance(self.__class__.__dict__.get(f.attname),
DeferredAttribute)]
model = self._meta.proxy_for_model
return (model_unpickle, (model, deferred_fields), data)
def __str__(self):
return force_text(self.pk)
def get_plugin_name(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type).name
def get_short_description(self):
instance = self.get_plugin_instance()[0]
if instance is not None:
return force_text(instance)
return _("<Empty>")
def get_plugin_class(self):
from cms.plugin_pool import plugin_pool
return plugin_pool.get_plugin(self.plugin_type)
def get_plugin_class_instance(self, admin=None):
plugin_class = self.get_plugin_class()
# needed so we have the same signature as the original ModelAdmin
return plugin_class(plugin_class.model, admin)
def get_plugin_instance(self, admin=None):
'''
Given a plugin instance (usually as a CMSPluginBase), this method
returns a tuple containing:
instance - The instance AS THE APPROPRIATE SUBCLASS OF
CMSPluginBase and not necessarily just 'self', which is
often just a CMSPluginBase,
plugin - the associated plugin class instance (subclass
of CMSPlugin)
'''
plugin = self.get_plugin_class_instance(admin)
if hasattr(self, "_inst"):
return self._inst, plugin
if plugin.model != self.__class__: # and self.__class__ == CMSPlugin:
# (if self is actually a subclass, getattr below would break)
try:
instance = plugin.model.objects.get(cmsplugin_ptr=self)
instance._render_meta = self._render_meta
except (AttributeError, ObjectDoesNotExist):
instance = None
else:
instance = self
self._inst = instance
return self._inst, plugin
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
instance, plugin = self.get_plugin_instance()
if instance and not (admin and not plugin.admin_preview):
if not placeholder or not isinstance(placeholder, Placeholder):
placeholder = instance.placeholder
placeholder_slot = placeholder.slot
current_app = context.current_app if context else None
context = PluginContext(context, instance, placeholder, current_app=current_app)
context = plugin.render(context, instance, placeholder_slot)
request = context.get('request', None)
page = None
if request:
page = request.current_page
context['allowed_child_classes'] = plugin.get_child_classes(placeholder_slot, page)
if plugin.render_plugin:
template = plugin._get_render_template(context, instance, placeholder)
if not template:
raise ValidationError("plugin has no render_template: %s" % plugin.__class__)
else:
template = None
return render_plugin(context, instance, placeholder, template, processors, context.current_app)
else:
from cms.middleware.toolbar import toolbar_plugin_processor
if processors and toolbar_plugin_processor in processors:
if not placeholder:
placeholder = self.placeholder
current_app = context.current_app if context else None
context = PluginContext(context, self, placeholder, current_app=current_app)
template = None
return render_plugin(context, self, placeholder, template, processors, context.current_app)
return ""
def get_media_path(self, filename):
pages = self.placeholder.page_set.all()
if pages.count():
return pages[0].get_media_path(filename)
else: # django 1.0.2 compatibility
today = date.today()
return os.path.join(get_cms_setting('PAGE_MEDIA_PATH'),
str(today.year), str(today.month), str(today.day), filename)
@property
def page(self):
warnings.warn(
"Don't use the page attribute on CMSPlugins! CMSPlugins are not "
"guaranteed to have a page associated with them!",
DontUsePageAttributeWarning)
return self.placeholder.page if self.placeholder_id else None
def get_instance_icon_src(self):
"""
Get src URL for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return plugin.icon_src(instance) if instance else u''
def get_instance_icon_alt(self):
"""
Get alt text for instance's icon
"""
instance, plugin = self.get_plugin_instance()
return force_text(plugin.icon_alt(instance)) if instance else u''
def save(self, no_signals=False, *args, **kwargs):
if not self.depth:
if self.parent_id or self.parent:
self.parent.add_child(instance=self)
else:
if not self.position and not self.position == 0:
self.position == CMSPlugin.objects.filter(parent__isnull=True,
placeholder_id=self.placeholder_id).count()
self.add_root(instance=self)
return
super(CMSPlugin, self).save()
def reload(self):
return CMSPlugin.objects.get(pk=self.pk)
def move(self, target, pos=None):
super(CMSPlugin, self).move(target, pos)
return self.reload()
def set_base_attr(self, plugin):
for attr in ['parent_id', 'placeholder', 'language', 'plugin_type', 'creation_date', 'depth', 'path',
'numchild', 'pk', 'position']:
setattr(plugin, attr, getattr(self, attr))
def copy_plugin(self, target_placeholder, target_language, parent_cache, no_signals=False):
"""
Copy this plugin and return the new plugin.
The logic of this method is the following:
# get a new generic plugin instance
# assign the position in the plugin tree
# save it to let mptt/treebeard calculate the tree attributes
# then get a copy of the current plugin instance
# assign to it the id of the generic plugin instance above;
this will effectively change the generic plugin created above
into a concrete one
# copy the tree related attributes from the generic plugin to
the concrete one
# save the concrete plugin
# trigger the copy relations
# return the generic plugin instance
This copy logic is required because we don't know what the fields of
the real plugin are. By getting another instance of it at step 4 and
then overwriting its ID at step 5, the ORM will copy the custom
fields for us.
"""
try:
plugin_instance, cls = self.get_plugin_instance()
except KeyError: # plugin type not found anymore
return
# set up some basic attributes on the new_plugin
new_plugin = CMSPlugin()
new_plugin.placeholder = target_placeholder
# we assign a parent to our new plugin
parent_cache[self.pk] = new_plugin
if self.parent:
parent = parent_cache[self.parent_id]
parent = CMSPlugin.objects.get(pk=parent.pk)
new_plugin.parent_id = parent.pk
new_plugin.parent = parent
new_plugin.language = target_language
new_plugin.plugin_type = self.plugin_type
if no_signals:
from cms.signals import pre_save_plugins
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
signals.pre_save.disconnect(pre_save_plugins, sender=CMSPlugin)
new_plugin._no_reorder = True
new_plugin.save()
if plugin_instance:
# get a new instance so references do not get mixed up
plugin_instance = plugin_instance.__class__.objects.get(pk=plugin_instance.pk)
plugin_instance.pk = new_plugin.pk
plugin_instance.id = new_plugin.pk
plugin_instance.placeholder = target_placeholder
plugin_instance.cmsplugin_ptr = new_plugin
plugin_instance.language = target_language
plugin_instance.parent = new_plugin.parent
plugin_instance.depth = new_plugin.depth
plugin_instance.path = new_plugin.path
plugin_instance.numchild = new_plugin.numchild
plugin_instance._no_reorder = True
plugin_instance.save()
old_instance = plugin_instance.__class__.objects.get(pk=self.pk)
plugin_instance.copy_relations(old_instance)
if no_signals:
signals.pre_save.connect(pre_save_plugins, sender=CMSPlugin, dispatch_uid='cms_pre_save_plugin')
return new_plugin
def post_copy(self, old_instance, new_old_ziplist):
"""
Handle more advanced cases (eg Text Plugins) after the original is
copied
"""
pass
def copy_relations(self, old_instance):
"""
Handle copying of any relations attached to this plugin. Custom plugins
have to do this themselves!
"""
pass
def has_change_permission(self, request):
page = self.placeholder.page if self.placeholder else None
if page:
return page.has_change_permission(request)
elif self.placeholder:
return self.placeholder.has_change_permission(request)
return False
def get_position_in_placeholder(self):
"""
1 based position!
"""
return self.position + 1
def get_breadcrumb(self):
from cms.models import Page
model = self.placeholder._get_attached_model() or Page
breadcrumb = []
if not self.parent_id:
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[self.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[self.pk]))
breadcrumb.append({'title': force_text(self.get_plugin_name()), 'url': url})
return breadcrumb
for parent in self.get_ancestors().reverse():
try:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (model._meta.app_label, model._meta.model_name),
args=[parent.pk]))
except NoReverseMatch:
url = force_text(
admin_reverse("%s_%s_edit_plugin" % (Page._meta.app_label, Page._meta.model_name),
args=[parent.pk]))
breadcrumb.append({'title': force_text(parent.get_plugin_name()), 'url': url})
return breadcrumb
def get_breadcrumb_json(self):
result = json.dumps(self.get_breadcrumb())
result = mark_safe(result)
return result
def num_children(self):
return self.numchild
def notify_on_autoadd(self, request, conf):
"""
Method called when we auto add this plugin via default_plugins in
CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when they are
auto added.
"""
pass
def notify_on_autoadd_children(self, request, conf, children):
"""
Method called when we auto add children to this plugin via
default_plugins/<plugin>/children in CMS_PLACEHOLDER_CONF.
Some specific plugins may have some special stuff to do when we add
children to them. ie : TextPlugin must update its content to add HTML
tags to be able to see his children in WYSIWYG.
"""
pass
def get_translatable_content(self):
"""
Returns {field_name: field_contents} for translatable fields, where
field_contents > ''
"""
fields = (f for f in self._meta.fields
if isinstance(f, (models.CharField, models.TextField)) and
f.editable and not f.choices and
f.name not in self.translatable_content_excluded_fields)
return dict(filter(itemgetter(1),
((f.name, getattr(self, f.name)) for f in fields)))
def set_translatable_content(self, fields):
for field, value in fields.items():
setattr(self, field, value)
self.save()
return all(getattr(self, field) == value
for field, value in fields.items())
def delete(self, no_mp=False, *args, **kwargs):
if no_mp:
Model.delete(self, *args, **kwargs)
else:
super(CMSPlugin, self).delete(*args, **kwargs)
@property
def add_url(self):
"""
Returns a custom url to add plugin instances
"""
return None
@property
def edit_url(self):
"""
Returns a custom url to edit plugin instances
"""
return None
@property
def move_url(self):
"""
Returns a custom url to move plugin instances
"""
return None
@property
def delete_url(self):
"""
Returns a custom url to delete plugin instances
"""
return None
@property
def copy_url(self):
"""
Returns a custom url to copy plugin instances
"""
return None
reversion_register(CMSPlugin)
def get_plugin_media_path(instance, filename):
"""
Django 1.7 requires that unbound function used in fields' definitions are defined outside the parent class
(see https://docs.djangoproject.com/en/dev/topics/migrations/#serializing-values)
This function is used withing field definition:
file = models.FileField(_("file"), upload_to=get_plugin_media_path)
and it invokes the bounded method on the given instance at runtime
"""
return instance.get_media_path(filename)
| 39.557411 | 111 | 0.639751 | [
"BSD-3-Clause"
] | stefanw/django-cms | cms/models/pluginmodel.py | 18,948 | Python |
from numpy import *
from scipy import ndimage
class RansacModel(object):
""" Class for testing homography fit with ransac.py from
http://www.scipy.org/Cookbook/RANSAC"""
def __init__(self,debug=False):
self.debug = debug
def fit(self, data):
""" Fit homography to four selected correspondences. """
# transpose to fit H_from_points()
data = data.T
# from points
fp = data[:3,:4]
# target points
tp = data[3:,:4]
# fit homography and return
return H_from_points(fp,tp)
def get_error( self, data, H):
""" Apply homography to all correspondences,
return error for each transformed point. """
data = data.T
# from points
fp = data[:3]
# target points
tp = data[3:]
# transform fp
fp_transformed = dot(H,fp)
# normalize hom. coordinates
fp_transformed = normalize(fp_transformed)
# return error per point
return sqrt( sum((tp-fp_transformed)**2,axis=0) )
def H_from_ransac(fp,tp,model,maxiter=1000,match_theshold=10):
""" Robust estimation of homography H from point
correspondences using RANSAC (ransac.py from
http://www.scipy.org/Cookbook/RANSAC).
input: fp,tp (3*n arrays) points in hom. coordinates. """
from PCV.tools import ransac
# group corresponding points
data = vstack((fp,tp))
# compute H and return
H,ransac_data = ransac.ransac(data.T,model,4,maxiter,match_theshold,10,return_all=True)
return H,ransac_data['inliers']
def H_from_points(fp,tp):
""" Find homography H, such that fp is mapped to tp
using the linear DLT method. Points are conditioned
automatically. """
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# condition points (important for numerical reasons)
# --from points--
m = mean(fp[:2], axis=1)
maxstd = max(std(fp[:2], axis=1)) + 1e-9
C1 = diag([1/maxstd, 1/maxstd, 1])
C1[0][2] = -m[0]/maxstd
C1[1][2] = -m[1]/maxstd
fp = dot(C1,fp)
# --to points--
m = mean(tp[:2], axis=1)
maxstd = max(std(tp[:2], axis=1)) + 1e-9
C2 = diag([1/maxstd, 1/maxstd, 1])
C2[0][2] = -m[0]/maxstd
C2[1][2] = -m[1]/maxstd
tp = dot(C2,tp)
# create matrix for linear method, 2 rows for each correspondence pair
nbr_correspondences = fp.shape[1]
A = zeros((2*nbr_correspondences,9))
for i in range(nbr_correspondences):
A[2*i] = [-fp[0][i],-fp[1][i],-1,0,0,0,
tp[0][i]*fp[0][i],tp[0][i]*fp[1][i],tp[0][i]]
A[2*i+1] = [0,0,0,-fp[0][i],-fp[1][i],-1,
tp[1][i]*fp[0][i],tp[1][i]*fp[1][i],tp[1][i]]
U,S,V = linalg.svd(A)
H = V[8].reshape((3,3))
# decondition
H = dot(linalg.inv(C2),dot(H,C1))
# normalize and return
return H / H[2,2]
def Haffine_from_points(fp,tp):
""" Find H, affine transformation, such that
tp is affine transf of fp. """
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# condition points
# --from points--
m = mean(fp[:2], axis=1)
maxstd = max(std(fp[:2], axis=1)) + 1e-9
C1 = diag([1/maxstd, 1/maxstd, 1])
C1[0][2] = -m[0]/maxstd
C1[1][2] = -m[1]/maxstd
fp_cond = dot(C1,fp)
# --to points--
m = mean(tp[:2], axis=1)
C2 = C1.copy() #must use same scaling for both point sets
C2[0][2] = -m[0]/maxstd
C2[1][2] = -m[1]/maxstd
tp_cond = dot(C2,tp)
# conditioned points have mean zero, so translation is zero
A = concatenate((fp_cond[:2],tp_cond[:2]), axis=0)
U,S,V = linalg.svd(A.T)
# create B and C matrices as Hartley-Zisserman (2:nd ed) p 130.
tmp = V[:2].T
B = tmp[:2]
C = tmp[2:4]
tmp2 = concatenate((dot(C,linalg.pinv(B)),zeros((2,1))), axis=1)
H = vstack((tmp2,[0,0,1]))
# decondition
H = dot(linalg.inv(C2),dot(H,C1))
return H / H[2,2]
def normalize(points):
""" Normalize a collection of points in
homogeneous coordinates so that last row = 1. """
for row in points:
row /= points[-1]
return points
def make_homog(points):
""" Convert a set of points (dim*n array) to
homogeneous coordinates. """
return vstack((points,ones((1,points.shape[1]))))
| 28.349693 | 91 | 0.550747 | [
"BSD-2-Clause"
] | BeToMeve/PCV | PCV/geometry/homography.py | 4,621 | Python |
from django.core.paginator import InvalidPage, EmptyPage, Paginator, PageNotAnInteger
from django.http import HttpResponse
from django.shortcuts import render
from youtube.models import Youtube
# Create your views here.
def youtube(request):
is_login = None
try:
is_login = request.session['is_login']
except:
pass
if not is_login:
return render(request, "youtube.html", {})
elif request.session['user_level'] < 0:
return render(request, "youtube.html", {})
# user_list = models.UserDetails.objects.all().values('user_name','user_password')
youtubes = Youtube.objects.using('youtube_db').all().values('title', 'description', 'cover',
'uploadtime', 'channeltitle', 'video'
).order_by('-uploadtime')
paginator = Paginator(youtubes, 10)
res = []
if request.method == "GET":
# 获取 url 后面的 page 参数的值, 首页不显示 page 参数, 默认值是 1
page = request.GET.get('page')
if page is None:
res_page = 1
else:
res_page = int(page)
try:
res = paginator.page(page).object_list
pages = paginator.page(page)
# todo: 注意捕获异常
except PageNotAnInteger:
# 如果请求的页数不是整数, 返回第一页。
res = paginator.page(1).object_list
pages = paginator.page(1)
res_page = 1
except InvalidPage:
# 如果请求的页数不存在, 重定向页面
return HttpResponse('找不到页面的内容')
except EmptyPage:
# 如果请求的页数不在合法的页数范围内,返回结果的最后一页。
res = paginator.page(paginator.num_pages).object_list
pages = paginator.page(paginator.num_pages)
return render(request, "youtube.html", {"data": res, "pages": pages, "current_page": res_page})
| 38.3 | 102 | 0.56658 | [
"MIT"
] | amdone/DjangoWebs | PythonWeb/apps/youtube/views.py | 2,103 | Python |
# ----------------------------------------------------------------------------
# Copyright 2021 MonaLabs.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
This module holds all authentication information and related functions. For a given
api_key, it can provide a new access token, refresh an expired access token or give
authentication status information.
"""
import os
import time
import datetime
from functools import wraps
from threading import Lock
import requests
from requests.models import Response
from .logger import get_logger
from .client_util import get_boolean_value_for_env_var
from .client_exceptions import MonaAuthenticationException
# A new token expires after 22 hours, REFRESH_TOKEN_SAFETY_MARGIN is the safety gap of
# time to refresh the token before it expires (i.e. - in case
# REFRESH_TOKEN_SAFETY_MARGIN = 2, and the token is about to expire in 2 hours or less,
# the client will automatically refresh the token to a new one).
REFRESH_TOKEN_SAFETY_MARGIN = datetime.timedelta(
hours=int(os.environ.get("REFRESH_TOKEN_SAFETY_MARGIN", 12))
)
AUTH_API_TOKEN_URL = os.environ.get(
"AUTH_API_TOKEN_URL",
"https://monalabs.frontegg.com/identity/resources/auth/v1/api-token",
)
REFRESH_TOKEN_URL = os.environ.get(
"REFRESH_TOKEN_URL",
"https://monalabs.frontegg.com/identity/resources/auth/v1/api-token/"
"token/refresh",
)
BASIC_HEADER = {"Content-Type": "application/json"}
TOKEN_EXPIRED_DATE_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
# Number of retries to authenticate in case the authentication server failed to
# respond.
NUM_OF_RETRIES_FOR_AUTHENTICATION = int(
os.environ.get("NUM_OF_RETRIES_FOR_AUTHENTICATION", 3)
)
# Time to wait (in seconds) between retries in case the authentication server failed to
# respond.
WAIT_TIME_FOR_AUTHENTICATION_RETRIES_SEC = int(
os.environ.get("WAIT_TIME_FOR_AUTHENTICATION_RETRIES_SEC", 2)
)
# Note: if RAISE_AUTHENTICATION_EXCEPTIONS = False and the client could not
# authenticate, every function call will return false.
# Use client.is_active() in order to check authentication status.
RAISE_AUTHENTICATION_EXCEPTIONS = get_boolean_value_for_env_var(
"RAISE_AUTHENTICATION_EXCEPTIONS", False
)
# This dict maps between every api_key (each api_key is saved only once in this dict)
# and its access token info (if the given api_key is authenticated it will contain the
# token itself, its expiration date and the key to refresh it, otherwise it will contain
# the errors that occurred while trying to authenticate).
API_KEYS_TO_TOKEN_DATA = {}
# Token data args names:
ERRORS = "errors"
EXPIRES = "expires"
ACCESS_TOKEN = "accessToken"
REFRESH_TOKEN = "refreshToken"
TIME_TO_REFRESH = "timeToRefresh"
IS_AUTHENTICATED = "isAuthenticated"
# TODO(anat): consider initializing a different lock for each api_key.
authentication_lock = Lock()
def first_authentication(api_key, secret):
# TODO(anat): Support non-authenticated init.
if not is_authenticated(api_key):
# Make sure only one instance of the client (with the given api_key) can get a
# new token. That token will be shared between all instances that share an
# api_key.
with authentication_lock:
# The inner check is needed to avoid multiple redundant authentications.
if not is_authenticated(api_key):
response = _request_access_token_with_retries(api_key, secret)
API_KEYS_TO_TOKEN_DATA[api_key] = response.json()
# response.ok will be True if authentication was successful and
# false if not.
_set_api_key_authentication_status(api_key, response.ok)
_calculate_and_set_time_to_refresh(api_key)
# If the authentication failed, handle error and return false.
if not is_authenticated(api_key):
return _handle_authentications_error(
f"Mona's client could not authenticate. "
f"errors: {_get_error_string_from_token_info(api_key)}"
)
else:
get_logger().info(f"New client token info: {API_KEYS_TO_TOKEN_DATA[api_key]}")
return True
def _get_error_string_from_token_info(api_key):
error_list = _get_token_info_by_api_key(api_key, ERRORS)
return ", ".join(_get_token_info_by_api_key(api_key, ERRORS)) if error_list else ""
def _request_access_token_with_retries(api_key, secret):
return _get_auth_response_with_retries(
lambda: _request_access_token_once(api_key, secret)
)
def _request_refresh_token_with_retries(refresh_token_key):
return _get_auth_response_with_retries(
lambda: _request_refresh_token_once(refresh_token_key)
)
def _get_auth_response_with_retries(
response_generator,
num_of_retries=NUM_OF_RETRIES_FOR_AUTHENTICATION,
auth_wait_time_sec=WAIT_TIME_FOR_AUTHENTICATION_RETRIES_SEC,
):
"""
Sends an authentication request (first time/refresh) with a retry mechanism.
:param response_generator (lambda)
A function call that sends the wanted REST request.
:return: The response received from the authentication server.
"""
for i in range(num_of_retries + 1):
try:
response = response_generator()
# Check that response is json-serializable.
response.json()
# Got a response, log and break the retry loop.
get_logger().info(f"Got an authentication response after {i} retries.")
break
except Exception:
if i == num_of_retries:
# Retried to authenticate num_of_retries times and failed due to
# authentications server problems, return a response with the relevant
# info.
response = _create_a_bad_response(
'{"errors": ["Could not connect to authentication server",'
' "Number of retries: ' + str(i) + '"]}'
)
else:
# TODO(anat): support exponential growth in wait times between retries.
# Has more retries, sleep before trying again.
time.sleep(auth_wait_time_sec)
return response
def _request_access_token_once(api_key, secret):
"""
Sends an access token REST request and returns the response.
"""
return requests.request(
"POST",
AUTH_API_TOKEN_URL,
headers=BASIC_HEADER,
json={"clientId": api_key, "secret": secret},
)
def _request_refresh_token_once(refresh_token_key):
"""
Sends a refresh token REST request and returns the response.
"""
return requests.request(
"POST",
REFRESH_TOKEN_URL,
headers=BASIC_HEADER,
json={"refreshToken": refresh_token_key},
)
def _create_a_bad_response(content):
"""
:param: content (str)
The content of the response.
:return: A functioning bad REST response instance with the given content.
"""
response = Response()
response.status_code = 400
if type(content) is str:
# _content expect bytes.
response._content = bytes(content, "utf8")
return response
def get_current_token_by_api_key(api_key):
"""
:return: The given api_key's current access token.
"""
return _get_token_info_by_api_key(api_key, ACCESS_TOKEN)
def _get_token_info_by_api_key(api_key, token_data_arg):
"""
Returns the value of the wanted data for the given api_key.
Returns None if the api_key or the arg does not exist.
"""
return API_KEYS_TO_TOKEN_DATA.get(api_key, {}).get(token_data_arg)
def is_authenticated(api_key):
"""
:return: True if Mona's client holds a valid token and can communicate with Mona's
servers (or can refresh the token in order to), False otherwise.
"""
return _get_token_info_by_api_key(api_key, IS_AUTHENTICATED)
def _set_api_key_authentication_status(api_key, bool_value):
"""
Sets the IS_AUTHENTICATED arg in the token data dict of the given api_key, this
setter is only needed to spare redundant calls for authentication.
"""
API_KEYS_TO_TOKEN_DATA[api_key][IS_AUTHENTICATED] = bool_value
def _calculate_and_set_time_to_refresh(api_key):
"""
Calculates the time the access token needs to be refreshed and updates the relevant
api_key token data.
"""
if is_authenticated(api_key):
token_expires = datetime.datetime.strptime(
_get_token_info_by_api_key(api_key, EXPIRES), TOKEN_EXPIRED_DATE_FORMAT
)
# Set the found value in the clients token info.
API_KEYS_TO_TOKEN_DATA[api_key][TIME_TO_REFRESH] = (
token_expires - REFRESH_TOKEN_SAFETY_MARGIN
)
def _handle_authentications_error(error_message):
"""
Logs an error and raises MonaAuthenticationException if
RAISE_AUTHENTICATION_EXCEPTIONS is true, else returns false.
"""
get_logger().error(error_message)
if RAISE_AUTHENTICATION_EXCEPTIONS:
raise MonaAuthenticationException(error_message)
return False
def _should_refresh_token(api_key):
"""
:return: True if the token has expired, or is about to expire in
REFRESH_TOKEN_SAFETY_MARGIN hours or less, False otherwise.
"""
return (
_get_token_info_by_api_key(api_key, TIME_TO_REFRESH) < datetime.datetime.now()
)
def _refresh_token(api_key):
"""
Gets a new token and sets the needed fields.
"""
refresh_token_key = _get_token_info_by_api_key(api_key, REFRESH_TOKEN)
response = _request_refresh_token_with_retries(refresh_token_key)
authentications_response_info = response.json()
# Log or raise an error in case one occurred.
# The current client token info will not change so that on the next function call
# the client will try to refresh the token again.
if not response.ok:
return _handle_authentications_error(
f"Could not refresh token: {response.text}"
)
# Update the client's new token info.
API_KEYS_TO_TOKEN_DATA[api_key] = authentications_response_info
_set_api_key_authentication_status(api_key, True)
_calculate_and_set_time_to_refresh(api_key)
get_logger().info(
f"Refreshed access token, the new token info:"
f" {API_KEYS_TO_TOKEN_DATA[api_key]}"
)
return True
def get_basic_auth_header(api_key):
return {
"Content-Type": "application/json",
"Authorization": f"Bearer "
f"{get_current_token_by_api_key(api_key)}",
}
class Decorators(object):
@classmethod
def refresh_token_if_needed(cls, decorated):
"""
This decorator checks if the current client's access token is about to
be expired/already expired, and if so, updates to a new one.
"""
@wraps(decorated)
def inner(*args, **kwargs):
# args[0] is the current client instance.
api_key = args[0]._api_key
if not is_authenticated(api_key):
get_logger().warn("Mona's client is not authenticated")
return False
if _should_refresh_token(api_key):
with authentication_lock:
# The inner check is needed to avoid double token refresh.
if _should_refresh_token(api_key):
did_refresh_token = _refresh_token(api_key)
if not did_refresh_token:
# TODO(anat): Check if the current token is still valid to
# call the function anyway.
return False
return decorated(*args, **kwargs)
return inner
| 35.72093 | 88 | 0.688232 | [
"Apache-2.0"
] | TalzMona/mona-sdk | mona_sdk/authentication.py | 12,288 | Python |
/usr/lib64/python3.5/_dummy_thread.py | 37 | 37 | 0.837838 | [
"MIT"
] | ndebuhr/thermo-state-solver | thermo-env/lib/python3.5/_dummy_thread.py | 37 | Python |
# Generated by Django 3.0.4 on 2020-04-08 18:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.416667 | 118 | 0.615836 | [
"MIT"
] | bertelsm/recipe-app-api | app/core/migrations/0002_tag.py | 682 | Python |
"""
Exceptions interface.
Exceptions allow for ignoring detected issues. This is commonly done to
suppress false positives or to ignore issues that a group has no intention
of addressing.
The two types of exceptions are a list of filenames or regular expressions.
If using filename matching for the exception it is required that the
reported issue contain the absolute path to the file containing the issue
to be ignored. The path for the issue is set in the tool plugin that
generates the issues.
"""
from __future__ import print_function
import fnmatch
import os
import re
import yaml
class Exceptions(object):
"""Interface for applying exceptions."""
def __init__(self, filename):
"""Initialize exceptions interface."""
with open(filename) as fname:
self.exceptions = yaml.safe_load(fname)
def get_ignore_packages(self):
"""Get list of packages to skip when scanning a workspace."""
ignore = []
if "ignore_packages" in self.exceptions and self.exceptions["ignore_packages"] is not None:
ignore = self.exceptions["ignore_packages"]
return ignore
def get_exceptions(self, package):
"""Get specific exceptions for given package."""
exceptions = {"file": [], "message_regex": []}
if "global" in self.exceptions and "exceptions" in self.exceptions["global"]:
global_exceptions = self.exceptions["global"]["exceptions"]
if "file" in global_exceptions:
exceptions["file"] += global_exceptions["file"]
if "message_regex" in global_exceptions:
exceptions["message_regex"] += global_exceptions["message_regex"]
# pylint: disable=too-many-boolean-expressions
if self.exceptions and "packages" in self.exceptions \
and self.exceptions["packages"] \
and package.name in self.exceptions["packages"] \
and self.exceptions["packages"][package.name] \
and "exceptions" in self.exceptions["packages"][package.name]:
package_exceptions = self.exceptions["packages"][package.name]["exceptions"]
if "file" in package_exceptions:
exceptions["file"] += package_exceptions["file"]
if "message_regex" in package_exceptions:
exceptions["message_regex"] += package_exceptions["message_regex"]
# pylint: enable=too-many-boolean-expressions
return exceptions
def filter_file_exceptions_early(self, package, file_list):
"""
Filter files based on file pattern exceptions list.
Only filters files which have tools=all, intended for use after the
discovery plugins have been run (so that Statick doesn't run the tool
plugins against files which will be ignored anyway).
"""
exceptions = self.get_exceptions(package)
to_remove = []
for filename in file_list:
removed = False
for exception in exceptions["file"]:
if exception["tools"] == 'all':
for pattern in exception["globs"]:
# Hack to avoid exceptions for everything on Travis CI.
fname = filename
prefix = '/home/travis/build/'
if pattern == '*/build/*' and fname.startswith(prefix):
fname = fname[len(prefix):]
if fnmatch.fnmatch(fname, pattern):
to_remove.append(filename)
removed = True
break
if removed:
break
file_list = [filename for filename in file_list if filename not in
to_remove]
return file_list
def filter_file_exceptions(self, package, exceptions, issues):
"""Filter issues based on file pattern exceptions list."""
for tool, tool_issues in list(issues.items()): # pylint: disable=too-many-nested-blocks
warning_printed = False
to_remove = []
for issue in tool_issues:
if not os.path.isabs(issue.filename):
if not warning_printed:
self.print_exception_warning(tool)
warning_printed = True
continue
rel_path = os.path.relpath(issue.filename, package.path)
for exception in exceptions:
if exception["tools"] == 'all' or tool in exception["tools"]:
for pattern in exception["globs"]:
# Hack to avoid exceptions for everything on Travis CI.
fname = issue.filename
prefix = '/home/travis/build/'
if pattern == '*/build/*' and fname.startswith(prefix):
fname = fname[len(prefix):]
if fnmatch.fnmatch(fname, pattern) or \
fnmatch.fnmatch(rel_path, pattern):
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in
to_remove]
return issues
@classmethod
def filter_regex_exceptions(cls, exceptions, issues):
"""Filter issues based on message regex exceptions list."""
for exception in exceptions:
exception_re = exception["regex"]
exception_tools = exception["tools"]
compiled_re = re.compile(exception_re)
for tool, tool_issues in list(issues.items()):
to_remove = []
if exception_tools == "all" or tool in exception_tools:
for issue in tool_issues:
match = compiled_re.match(issue.message)
if match:
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in
to_remove]
return issues
def filter_nolint(self, issues):
"""
Filter out lines that have an explicit NOLINT on them.
Sometimes the tools themselves don't properly filter these out if
there is a complex macro or something.
"""
for tool, tool_issues in list(issues.items()):
warning_printed = False
to_remove = []
for issue in tool_issues:
if not os.path.isabs(issue.filename):
if not warning_printed:
self.print_exception_warning(tool)
warning_printed = True
continue
lines = open(issue.filename, "r+", encoding="utf-8").readlines()
line_number = int(issue.line_number) - 1
if line_number < len(lines) and "NOLINT" in lines[line_number]:
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in to_remove]
return issues
def filter_issues(self, package, issues):
"""Filter issues based on exceptions list."""
exceptions = self.get_exceptions(package)
if exceptions["file"]:
issues = self.filter_file_exceptions(package,
exceptions["file"],
issues)
if exceptions["message_regex"]:
issues = self.filter_regex_exceptions(exceptions["message_regex"],
issues)
issues = self.filter_nolint(issues)
return issues
@classmethod
def print_exception_warning(cls, tool):
"""
Print warning about exception not being applied for an issue.
Warning will only be printed once per tool.
"""
print("[WARNING] File exceptions not available for {} tool "
"plugin due to lack of absolute paths for issues.".format(tool))
| 42.747368 | 99 | 0.571903 | [
"CC0-1.0"
] | axydes/statick | statick_tool/exceptions.py | 8,122 | Python |
#!/usr/bin/env python
# Decodes a given Caesar Cipher encoded string, provided on stdin
# Credit: http://stackoverflow.com/a/10792382
import sys
import string
# Frequency of each character (English)
frequency = dict(zip(string.ascii_uppercase,
[.0817,.0149,.0278,.0425,.1270,.0223,.0202,
.0609,.0697,.0015,.0077,.0402,.0241,.0675,
.0751,.0193,.0009,.0599,.0633,.0906,.0276,
.0098,.0236,.0015,.0197,.0007]))
# Create 26 translation tables, one for each rotation 0 through 25
# eg: ABCDEFGHIJKLMNOPQRSTUVWXYZ, BCDEFGHIJKLMNOPQRSTUVWXYZA...
trans_tables = [ string.maketrans(string.ascii_uppercase,
string.ascii_uppercase[i:]+string.ascii_uppercase[:i])
for i in range(26)]
def fitness(msg):
# Sum all the frequencies of each character in a string
return sum(frequency[char] for char in msg)
def all_shifts(msg):
# Try every rotation using the translation tables generated earlier
# Returns a generator with three values
msg = msg.upper()
for index, table in enumerate(trans_tables):
output = msg.translate(table)
yield fitness(output), index, output
# Main code - accept input from stdin, find rotation with highest fitness value
ciphertext = raw_input().replace(" ", "")
(score, index, output) = max(all_shifts(ciphertext))
print "Rotation by {:d} (key {:d}) yields decoded text:\n{}".format(index, 26-index, output)
| 36.926829 | 92 | 0.655878 | [
"MIT"
] | AkenSec/scripts | caesar.py | 1,514 | Python |
#El módulo 'os' nos permitirá consultar si un archivo existe.
import os
def mostrar_bienvenida():
print("Bienvenido a ... ")
print("""
_ __
____ ___ (_) ________ ____/ /
/ __ `__ \/ / / ___/ _ \/ __ /
/ / / / / / / / / / __/ /_/ /
/_/ /_/ /_/_/ /_/ \___/\__,_/
""")
def obtener_nombre():
nombre = input("Para empezar, dime como te llamas. ")
return nombre
def obtener_edad():
agno = int(input("Para preparar tu perfil, dime en qué año naciste. "))
return 2017-agno-1
def obtener_estatura():
estatura = float(input("Cuéntame más de ti, para agregarlo a tu perfil. ¿Cuánto mides? DÃmelo en metros. "))
metros = int(estatura)
centimetros = int( (estatura - metros)*100 )
return (metros, centimetros)
def obtener_sexo():
sexo = input("Por favor, ingresa tu sexo (M=Masculino, F=Femenino): ")
while sexo != 'M' and sexo != 'F':
sexo = input("Por favor, ingresa tu sexo (M=Masculino, F=Femenino): ")
return sexo
def obtener_pais():
pais = input("Indica tu paÃs de nacimiento: ")
return pais
def obtener_lista_amigos():
linea = input("Muy bien. Finalmente, escribe una lista con los nombres de tus amigos, separados por una ',': ")
amigos = linea.split(",")
return amigos
def mostrar_perfil(nombre, edad, estatura_m, estatura_cm, sexo, pais, amigos):
print("--------------------------------------------------")
print("Nombre: ", nombre)
print("Edad: ", edad, "años")
print("Estatura: ", estatura_m, "m y ", estatura_cm, "centÃmetros")
print("Sexo: ", sexo)
print("PaÃs: ", pais)
print("Amigos: ", len(amigos))
print("--------------------------------------------------")
def opcion_menu():
print("Acciones disponibles:")
print(" 1. Escribir un mensaje")
print(" 2. Mostrar mi muro")
print(" 3. Mostrar los datos de perfil")
print(" 4. Actualizar el perfil de usuario")
print(" 0. Salir")
opcion = int(input("Ingresa una opción: "))
while opcion < 0 or opcion > 5:
print("No conozco la opción que has ingresado. Inténtalo otra vez.")
opcion = int(input("Ingresa una opción: "))
return opcion
def obtener_mensaje():
mensaje = input("Ahora vamos a publicar un mensaje. ¿Qué piensas hoy? ")
return mensaje
def mostrar_mensaje(origen, mensaje):
print("--------------------------------------------------")
print(origen+":", mensaje)
print("--------------------------------------------------")
#Muestra los mensajes recibidos
def mostrar_muro(muro):
print("------ MURO ("+str(len(muro))+" mensajes) ---------")
for mensaje in muro:
print(mensaje)
print("--------------------------------------------------")
#Publica un mensaje en el timeline personal y en el de los amigos
def publicar_mensaje(origen, amigos, mensaje, muro):
print("--------------------------------------------------")
print(origen, "dice:", mensaje)
print("--------------------------------------------------")
#Agrega el mensaje al final del timeline local
muro.append(mensaje)
#Agrega, al final del archivo de cada amigo, el mensaje publicado
for amigo in amigos:
if existe_archivo(amigo+".user"):
archivo = open(amigo+".user","a")
archivo.write(origen+":"+mensaje+"\n")
archivo.close()
def existe_archivo(ruta):
return os.path.isfile(ruta)
def leer_usuario(nombre):
archivo_usuario = open(nombre+".user","r")
nombre = archivo_usuario.readline().rstrip()
edad = int(archivo_usuario.readline())
estatura = float(archivo_usuario.readline())
estatura_m = int(estatura)
estatura_cm = int( (estatura - estatura_m)*100 )
sexo = archivo_usuario.readline().rstrip()
pais = archivo_usuario.readline().rstrip()
amigos = archivo_usuario.readline().rstrip().split(",")
estado = archivo_usuario.readline().rstrip()
#Lee el 'muro'. Esto es, todos los mensajes que han sido publicados en el timeline del usuario.
muro = []
mensaje = archivo_usuario.readline().rstrip()
while mensaje != "":
muro.append(mensaje)
mensaje = archivo_usuario.readline().rstrip()
#Una vez que hemos leido los datos del usuario no debemos olvidar cerrar el archivo
archivo_usuario.close()
return(nombre, edad, estatura_m, estatura_cm, sexo, pais, amigos, estado, muro)
def escribir_usuario(nombre, edad, estatura_m, estatura_cm, sexo, pais, amigos, estado, muro):
archivo_usuario = open(nombre+".user","w")
archivo_usuario.write(nombre+"\n")
archivo_usuario.write(str(edad)+"\n")
archivo_usuario.write(str(estatura_m + estatura_cm/100)+"\n")
archivo_usuario.write(sexo+"\n")
archivo_usuario.write(pais+"\n")
archivo_usuario.write(",".join(amigos)+"\n")
archivo_usuario.write(estado+"\n")
#Escribe el 'timeline' en el archivo, a continuación del último estado
for mensaje in muro:
archivo_usuario.write(mensaje+"\n")
#Una vez que hemos escrito todos los datos del usuario en el archivo, no debemos olvidar cerrarlo
archivo_usuario.close()
| 38.029197 | 117 | 0.597889 | [
"MIT"
] | rociof/community-starter-kit | s6red.py | 5,252 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501
:return: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:rtype: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.
List of awsclustertemplates. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md # noqa: E501
:param items: The items of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:type: list[IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplate]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:return: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList.
:param metadata: The metadata of this IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoXK8sClusterInfrastructureV1alpha4AWSClusterTemplateList):
return True
return self.to_dict() != other.to_dict()
| 38.927184 | 312 | 0.673027 | [
"Apache-2.0"
] | mariusgheorghies/python | kubernetes/client/models/io_xk8s_cluster_infrastructure_v1alpha4_aws_cluster_template_list.py | 8,019 | Python |
"""Test the submodule “batchelper.py”."""
import unittest
import audiorename
import helper
class TestBatch(unittest.TestCase):
def setUp(self):
self.singles = helper.gen_file_list(
['album', 'compilation'],
helper.get_testfile('files'),
)
self.album_broken = helper.gen_file_list(
['01', '03', '05', '07', '09', '11'],
helper.get_testfile('files', 'album_broken'),
)
self.album_broken_all = helper.gen_file_list(
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11'],
helper.get_testfile('files', 'album_broken'),
)
self.album_complete = helper.gen_file_list(
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11'],
helper.get_testfile('files', 'album_complete'),
)
self.album_incomplete = helper.gen_file_list(
['01', '02', '04', '05', '06', '07', '09', '10', '11'],
helper.get_testfile('files', 'album_incomplete'),
)
self.album_small = helper.gen_file_list(
['01', '02', '03', '04', '05'],
helper.get_testfile('files', 'album_small'),
)
self.all = self.singles + \
self.album_broken_all + \
self.album_complete + \
self.album_incomplete + \
self.album_small
def test_single(self):
single = helper.get_testfile('files', 'album.mp3')
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--verbose', single)
self.assertEqual([single], helper.filter_source(output))
def test_folder_complete(self):
with helper.Capturing() as output:
audiorename.execute('--dry-run', '--verbose',
helper.get_testfile('files'))
self.assertEqual(self.all, helper.filter_source(output))
def test_folder_sub(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
helper.get_testfile('files', 'album_complete')
)
self.assertEqual(self.album_complete, helper.filter_source(output))
def test_album_min(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-min',
'7',
helper.get_testfile('files')
)
self.assertEqual(self.album_complete + self.album_incomplete,
helper.filter_source(output))
def test_album_min_no_match(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-min',
'23',
helper.get_testfile('files')
)
self.assertEqual([], helper.filter_source(output))
def test_album_complete(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-complete',
helper.get_testfile('files')
)
self.assertEqual(
self.singles + self.album_complete + self.album_small,
helper.filter_source(output)
)
def test_filter_all(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--album-min',
'7',
'--album-complete',
helper.get_testfile('files')
)
self.assertEqual(self.album_complete, helper.filter_source(output))
class TestExtension(unittest.TestCase):
def setUp(self):
self.test_files = helper.get_testfile('mixed_formats')
def test_default(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
self.test_files,
)
self.assertEqual(
helper.filter_source(output),
helper.gen_file_list(
['01.flac', '02.m4a', '03.mp3'],
self.test_files,
extension=False
)
)
def test_one(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--extension',
'mp3,flac',
self.test_files
)
self.assertEqual(
helper.filter_source(output),
helper.gen_file_list(
['01.flac', '03.mp3'],
self.test_files,
extension=False
)
)
def test_two(self):
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
'--extension',
'mp3',
self.test_files
)
self.assertEqual(
helper.filter_source(output),
helper.gen_file_list(['03.mp3'], self.test_files,
extension=False)
)
class TestSkip(unittest.TestCase):
def setUp(self):
self.file = helper.get_testfile('broken', 'binary.mp3')
with helper.Capturing() as output:
audiorename.execute('-d', '--verbose', self.file)
self.output = helper.join(output)
def test_message(self):
self.assertTrue('Broken file' in self.output)
def test_file_in_message(self):
self.assertTrue('Broken file' in self.output)
self.assertTrue(self.file in self.output)
def test_continuation(self):
path = helper.get_testfile('broken')
with helper.Capturing() as output:
audiorename.execute(
'--dry-run', '--verbose',
path
)
output = helper.filter_source(output)
self.assertTrue(output[1])
if __name__ == '__main__':
unittest.main()
| 31.427083 | 79 | 0.521545 | [
"MIT"
] | Josef-Friedrich/audiorename | test/test_batch.py | 6,038 | Python |
#!/usr/bin/env python
from __future__ import print_function
import unittest
import sys
import hashlib
import os
import numpy as np
import cv2
import cv2.cv as cv
# Python 3 moved urlopen to urllib.requests
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
class OpenCVTests(unittest.TestCase):
# path to local repository folder containing 'samples' folder
repoPath = None
# github repository url
repoUrl = 'https://raw.github.com/opencv/opencv/2.4'
# path to local folder containing 'camera_calibration.tar.gz'
dataPath = None
# data url
dataUrl = 'http://docs.opencv.org/data'
depths = [ cv.IPL_DEPTH_8U, cv.IPL_DEPTH_8S, cv.IPL_DEPTH_16U, cv.IPL_DEPTH_16S, cv.IPL_DEPTH_32S, cv.IPL_DEPTH_32F, cv.IPL_DEPTH_64F ]
mat_types = [
cv.CV_8UC1,
cv.CV_8UC2,
cv.CV_8UC3,
cv.CV_8UC4,
cv.CV_8SC1,
cv.CV_8SC2,
cv.CV_8SC3,
cv.CV_8SC4,
cv.CV_16UC1,
cv.CV_16UC2,
cv.CV_16UC3,
cv.CV_16UC4,
cv.CV_16SC1,
cv.CV_16SC2,
cv.CV_16SC3,
cv.CV_16SC4,
cv.CV_32SC1,
cv.CV_32SC2,
cv.CV_32SC3,
cv.CV_32SC4,
cv.CV_32FC1,
cv.CV_32FC2,
cv.CV_32FC3,
cv.CV_32FC4,
cv.CV_64FC1,
cv.CV_64FC2,
cv.CV_64FC3,
cv.CV_64FC4,
]
mat_types_single = [
cv.CV_8UC1,
cv.CV_8SC1,
cv.CV_16UC1,
cv.CV_16SC1,
cv.CV_32SC1,
cv.CV_32FC1,
cv.CV_64FC1,
]
def depthsize(self, d):
return { cv.IPL_DEPTH_8U : 1,
cv.IPL_DEPTH_8S : 1,
cv.IPL_DEPTH_16U : 2,
cv.IPL_DEPTH_16S : 2,
cv.IPL_DEPTH_32S : 4,
cv.IPL_DEPTH_32F : 4,
cv.IPL_DEPTH_64F : 8 }[d]
def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
if not filename in self.image_cache:
filedata = None
if OpenCVTests.repoPath is not None:
candidate = OpenCVTests.repoPath + '/' + filename
if os.path.isfile(candidate):
with open(candidate, 'rb') as f:
filedata = f.read()
if filedata is None:
filedata = urllib.urlopen(OpenCVTests.repoUrl + '/' + filename).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
self.image_cache[filename] = cv.DecodeImageM(imagefiledata, iscolor)
return self.image_cache[filename]
def get_data(self, filename, urlbase):
if (not os.path.isfile(filename)):
if OpenCVTests.dataPath is not None:
candidate = OpenCVTests.dataPath + '/' + filename
if os.path.isfile(candidate):
return candidate
urllib.urlretrieve(urlbase + '/' + filename, filename)
return filename
def setUp(self):
self.image_cache = {}
def snap(self, img):
self.snapL([img])
def snapL(self, L):
for i,img in enumerate(L):
cv.NamedWindow("snap-%d" % i, 1)
cv.ShowImage("snap-%d" % i, img)
cv.WaitKey()
cv.DestroyAllWindows()
def hashimg(self, im):
""" Compute a hash for an image, useful for image comparisons """
return hashlib.md5(im.tostring()).digest()
class NewOpenCVTests(unittest.TestCase):
# path to local repository folder containing 'samples' folder
repoPath = None
extraTestDataPath = None
# github repository url
repoUrl = 'https://raw.github.com/opencv/opencv/master'
def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR):
if not filename in self.image_cache:
filedata = None
if NewOpenCVTests.repoPath is not None:
candidate = NewOpenCVTests.repoPath + '/' + filename
if os.path.isfile(candidate):
with open(candidate, 'rb') as f:
filedata = f.read()
if NewOpenCVTests.extraTestDataPath is not None:
candidate = NewOpenCVTests.extraTestDataPath + '/' + filename
if os.path.isfile(candidate):
with open(candidate, 'rb') as f:
filedata = f.read()
if filedata is None:
return None#filedata = urlopen(NewOpenCVTests.repoUrl + '/' + filename).read()
self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
return self.image_cache[filename]
def setUp(self):
cv2.setRNGSeed(10)
self.image_cache = {}
def hashimg(self, im):
""" Compute a hash for an image, useful for image comparisons """
return hashlib.md5(im.tostring()).hexdigest()
if sys.version_info[:2] == (2, 6):
def assertLess(self, a, b, msg=None):
if not a < b:
self.fail('%s not less than %s' % (repr(a), repr(b)))
def assertLessEqual(self, a, b, msg=None):
if not a <= b:
self.fail('%s not less than or equal to %s' % (repr(a), repr(b)))
def assertGreater(self, a, b, msg=None):
if not a > b:
self.fail('%s not greater than %s' % (repr(a), repr(b)))
def intersectionRate(s1, s2):
x1, y1, x2, y2 = s1
s1 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
x1, y1, x2, y2 = s2
s2 = np.array([[x1, y1], [x2,y1], [x2, y2], [x1, y2]])
area, intersection = cv2.intersectConvexConvex(s1, s2)
return 2 * area / (cv2.contourArea(s1) + cv2.contourArea(s2))
def isPointInRect(p, rect):
if rect[0] <= p[0] and rect[1] <=p[1] and p[0] <= rect[2] and p[1] <= rect[3]:
return True
else:
return False
| 31.860215 | 139 | 0.570368 | [
"BSD-3-Clause"
] | 552103917/opcv3.4 | modules/python/test/tests_common.py | 5,926 | Python |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPycmd(PythonPackage):
"""pycmd is a collection of command line tools for helping with Python
development."""
homepage = "https://pypi.org/project/pycmd/"
url = "https://pypi.io/packages/source/p/pycmd/pycmd-1.2.tar.gz"
version('1.2', sha256='adc1976c0106919e9338db20102b91009256dcfec924a66928d7297026f72477')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| 33.25 | 93 | 0.718797 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 0t1s1/spack | var/spack/repos/builtin/packages/py-pycmd/package.py | 665 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generates a dictionary file from the training data.
## Examples
```bash
# learn the vocabulary from one task, then train on another task.
parlai build_dict -t convai2 --dict-file premade.dict
parlai train_model -t squad --dict-file premade.dict -m seq2seq
```
"""
from parlai.core.dict import DictionaryAgent
from parlai.core.params import ParlaiParser, str2class
from parlai.core.worlds import create_task
from parlai.utils.misc import TimeLogger
from parlai.utils.distributed import is_distributed
from parlai.core.script import ParlaiScript, register_script
from parlai.utils.io import PathManager
import parlai.utils.logging as logging
import copy
import tqdm
def setup_args(parser=None, hidden=True):
if parser is None:
parser = ParlaiParser(True, True, 'Build a dictionary.')
dict_loop = parser.add_argument_group('Dictionary Loop Arguments')
dict_loop.add_argument(
'--dict-maxexs',
default=-1,
type=int,
help='max number of examples to build dict on',
hidden=hidden,
)
dict_loop.add_argument(
'--dict-include-valid',
default=False,
type='bool',
help='Include validation set in dictionary building ' 'for task.',
hidden=hidden,
)
dict_loop.add_argument(
'--dict-include-test',
default=False,
type='bool',
help='Include test set in dictionary building for task.',
hidden=hidden,
)
dict_loop.add_argument(
'-ltim', '--log-every-n-secs', type=float, default=10, hidden=hidden
)
DictionaryAgent.add_cmdline_args(parser)
return parser
def build_dict(opt, skip_if_built=False):
if isinstance(opt, ParlaiParser):
logging.error('Should be passed opt not Parser')
opt = opt.parse_args()
if not opt.get('dict_file'):
logging.error(
'Tried to build dictionary but `--dict-file` is not set. Set '
'this param so the dictionary can be saved.'
)
return
if skip_if_built and PathManager.exists(opt['dict_file']):
# Dictionary already built, skip all loading or setup
logging.debug("dictionary already built.")
return None
if opt.get('dict_class'):
# Custom dictionary class
dictionary = str2class(opt['dict_class'])(opt)
else:
# Default dictionary class
dictionary = DictionaryAgent(opt)
if PathManager.exists(opt['dict_file']) or (
hasattr(dictionary, 'is_prebuilt') and dictionary.is_prebuilt()
):
# Dictionary already built, return loaded dictionary agent
logging.debug("dictionary already built.")
return dictionary
if is_distributed():
raise ValueError('Dictionaries should be pre-built before distributed train.')
ordered_opt = copy.deepcopy(opt)
cnt = 0
# we use train set to build dictionary
ordered_opt['batchsize'] = 1
# Set this to none so that image features are not calculated when Teacher is
# instantiated while building the dict
ordered_opt['image_mode'] = 'no_image_model'
ordered_opt.log()
datatypes = ['train:ordered:stream']
if opt.get('dict_include_valid'):
datatypes.append('valid:stream')
if opt.get('dict_include_test'):
datatypes.append('test:stream')
cnt = 0
for dt in datatypes:
ordered_opt['datatype'] = dt
world_dict = create_task(ordered_opt, dictionary)
# pass examples to dictionary
log_time = TimeLogger()
total = world_dict.num_examples()
if opt['dict_maxexs'] >= 0:
total = min(total, opt['dict_maxexs'])
log_every_n_secs = opt.get('log_every_n_secs', None)
if log_every_n_secs:
pbar = tqdm.tqdm(
total=total, desc='Building dictionary', unit='ex', unit_scale=True
)
else:
pbar = None
while not world_dict.epoch_done():
cnt += 1
if cnt > opt['dict_maxexs'] and opt['dict_maxexs'] >= 0:
logging.info('Processed {} exs, moving on.'.format(opt['dict_maxexs']))
# don't wait too long...
break
world_dict.parley()
if pbar:
pbar.update(1)
if pbar:
pbar.close()
dictionary.save(opt['dict_file'], sort=True)
logging.info(
f'dictionary built with {len(dictionary)} tokens '
f'in {log_time.total_time():.1f}s'
)
return dictionary
@register_script('build_dict', hidden=True)
class BuildDict(ParlaiScript):
@classmethod
def setup_args(cls):
return setup_args(hidden=False)
def run(self):
return build_dict(self.opt)
if __name__ == '__main__':
BuildDict.main()
| 31.339623 | 87 | 0.645194 | [
"MIT"
] | 418sec/ParlAI | parlai/scripts/build_dict.py | 4,983 | Python |
import pytest
from lcs import Perception
from lcs.agents.acs2 import Configuration, ClassifiersList, \
Classifier
class TestClassifierList:
@pytest.fixture
def cfg(self):
return Configuration(8, 8)
def test_should_deny_insertion_illegal_types(self, cfg):
population = ClassifiersList()
with pytest.raises(TypeError):
# Try to insert an integer instead of classifier object
population.append(4)
def test_should_insert_classifier(self, cfg):
# given
population = ClassifiersList()
cl = Classifier(cfg=cfg)
# when
population.append(cl)
# then
assert len(population) == 1
def test_should_form_match_set(self, cfg):
# given
cl_1 = Classifier(cfg=cfg)
cl_2 = Classifier(condition='1###0###', cfg=cfg)
cl_3 = Classifier(condition='0###1###', cfg=cfg)
population = ClassifiersList(*[cl_1, cl_2, cl_3])
p0 = Perception('11110000')
# when
match_set = ClassifiersList.form_match_set(population, p0)
# then
assert len(match_set) == 2
assert cl_1 in match_set
assert cl_2 in match_set
def test_should_form_action_set(self, cfg):
# given
cl_1 = Classifier(action=0, cfg=cfg)
cl_2 = Classifier(action=0, cfg=cfg)
cl_3 = Classifier(action=1, cfg=cfg)
population = ClassifiersList(*[cl_1, cl_2, cl_3])
action = 0
# when
action_set = ClassifiersList.form_action_set(population, action)
# then
assert len(action_set) == 2
assert cl_1 in action_set
assert cl_2 in action_set
def test_should_expand(self, cfg):
# given
cl_1 = Classifier(action=0, cfg=cfg)
cl_2 = Classifier(action=1, numerosity=2, cfg=cfg)
cl_3 = Classifier(action=2, numerosity=3, cfg=cfg)
population = ClassifiersList(*[cl_1, cl_2, cl_3])
# when
expanded = population.expand()
# then
assert len(expanded) == 6
assert cl_1 in expanded
assert cl_2 in expanded
assert cl_3 in expanded
def test_should_calculate_maximum_fitness(self, cfg):
# given
population = ClassifiersList()
# when & then
# C1 - does not anticipate change
c1 = Classifier(cfg=cfg)
population.append(c1)
assert 0.0 == population.get_maximum_fitness()
# when & then
# C2 - does anticipate some change
c2 = Classifier(effect='1###0###',
reward=0.25,
cfg=cfg)
population.append(c2)
assert 0.125 == population.get_maximum_fitness()
# when & then
# C3 - does anticipate change and is quite good
c3 = Classifier(effect='1#######',
quality=0.8,
reward=5,
cfg=cfg)
population.append(c3)
assert 4 == population.get_maximum_fitness()
def test_should_apply_reinforcement_learning(self, cfg):
# given
cl = Classifier(reward=34.29, immediate_reward=11.29, cfg=cfg)
population = ClassifiersList(*[cl])
# when
ClassifiersList.apply_reinforcement_learning(
population, 0, 28.79, cfg.beta, cfg.gamma)
# then
assert abs(33.94 - cl.r) < 0.1
assert abs(10.74 - cl.ir) < 0.1
def test_should_form_match_set_backwards(self, cfg):
# given
population = ClassifiersList()
situation = Perception('11110000')
# C1 - general condition
c1 = Classifier(cfg=cfg)
# C2 - matching
c2 = Classifier(condition='0##0####', effect='1##1####', cfg=cfg)
# C3 - non-matching
c3 = Classifier(condition='0###1###', effect='1######0', cfg=cfg)
# C4 - non-matching
c4 = Classifier(condition='0###0###', effect='1###1###', cfg=cfg)
population.append(c1)
population.append(c2)
population.append(c3)
population.append(c4)
# when
match_set = ClassifiersList.form_match_set_backwards(population,
situation)
# then
assert 2 == len(match_set)
assert c1 in match_set
assert c2 in match_set
| 29.013245 | 73 | 0.575439 | [
"MIT"
] | Gab0/pyalcs | tests/lcs/agents/acs2/test_ClassifierList.py | 4,381 | Python |
from django.contrib import admin
from adminsortable.admin import SortableAdmin, SortableTabularInline
from .models import SearchTermCategory, SearchTermItem
class SearchTermItemInline(SortableTabularInline):
model = SearchTermItem
extra = 1
@admin.register(SearchTermCategory)
class SearchTermCategoryAdmin(SortableAdmin):
list_display = ('name', 'description', 'order_number')
ordering = ('order_number',)
inlines = [SearchTermItemInline]
@admin.register(SearchTermItem)
class SearchTermItemAdmin(SortableAdmin):
list_display = (
'slug', 'name', 'category', 'call_to_action_type', 'call_to_action_text', 'description', 'order_number'
)
list_filter = ('category', 'call_to_action_type')
search_fields = ('name', 'slug', 'description')
ordering = ('order_number',)
| 29.285714 | 111 | 0.743902 | [
"Apache-2.0"
] | invinst/CPDBv2_backend | cpdb/search_terms/admin.py | 820 | Python |
# Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
import csv
import os
import numpy as np
import PIL.Image
class ObjectType:
Dontcare, Car, Van, Truck, Bus, Pickup, VehicleWithTrailer, SpecialVehicle,\
Person, Person_fa, Person_unsure, People, Cyclist, Tram, Person_Sitting,\
Misc = range(16)
def __init__(self):
pass
class Bbox:
def __init__(self, x_left=0, y_top=0, x_right=0, y_bottom=0):
self.xl = x_left
self.yt = y_top
self.xr = x_right
self.yb = y_bottom
def area(self):
return (self.xr - self.xl) * (self.yb - self.yt)
def width(self):
return self.xr - self.xl
def height(self):
return self.yb - self.yt
def get_array(self):
return [self.xl, self.yt, self.xr, self.yb]
class GroundTruthObj:
""" This class is the data ground-truth
#Values Name Description
----------------------------------------------------------------------------
1 type Class ID
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries.
-1 corresponds to a don't care region.
1 occluded Integer (-1,0,1,2) indicating occlusion state:
-1 = unknown, 0 = fully visible,
1 = partly occluded, 2 = largely occluded
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
Here, 'DontCare' labels denote regions in which objects have not been labeled,
for example because they have been too far away from the laser scanner.
"""
# default class mappings
OBJECT_TYPES = {
'bus': ObjectType.Bus,
'car': ObjectType.Car,
'cyclist': ObjectType.Cyclist,
'pedestrian': ObjectType.Person,
'people': ObjectType.People,
'person': ObjectType.Person,
'person_sitting': ObjectType.Person_Sitting,
'person-fa': ObjectType.Person_fa,
'person?': ObjectType.Person_unsure,
'pickup': ObjectType.Pickup,
'misc': ObjectType.Misc,
'special-vehicle': ObjectType.SpecialVehicle,
'tram': ObjectType.Tram,
'truck': ObjectType.Truck,
'van': ObjectType.Van,
'vehicle-with-trailer': ObjectType.VehicleWithTrailer}
def __init__(self):
self.stype = ''
self.truncated = 0
self.occlusion = 0
self.angle = 0
self.height = 0
self.width = 0
self.length = 0
self.locx = 0
self.locy = 0
self.locz = 0
self.roty = 0
self.bbox = Bbox()
self.object = ObjectType.Dontcare
@classmethod
def lmdb_format_length(cls):
"""
width of an LMDB datafield returned by the gt_to_lmdb_format function.
:return:
"""
return 16
def gt_to_lmdb_format(self):
"""
For storage of a bbox ground truth object into a float32 LMDB.
Sort-by attribute is always the last value in the array.
"""
result = [
# bbox in x,y,w,h format:
self.bbox.xl,
self.bbox.yt,
self.bbox.xr - self.bbox.xl,
self.bbox.yb - self.bbox.yt,
# alpha angle:
self.angle,
# class number:
self.object,
0,
# Y axis rotation:
self.roty,
# bounding box attributes:
self.truncated,
self.occlusion,
# object dimensions:
self.length,
self.width,
self.height,
self.locx,
self.locy,
# depth (sort-by attribute):
self.locz,
]
assert(len(result) is self.lmdb_format_length())
return result
def set_type(self):
self.object = self.OBJECT_TYPES.get(self.stype, ObjectType.Dontcare)
class GroundTruth:
"""
this class loads the ground truth
"""
def __init__(self,
label_dir,
label_ext='.txt',
label_delimiter=' ',
min_box_size=None,
class_mappings=None):
self.label_dir = label_dir
self.label_ext = label_ext # extension of label files
self.label_delimiter = label_delimiter # space is used as delimiter in label files
self._objects_all = dict() # positive bboxes across images
self.min_box_size = min_box_size
if class_mappings is not None:
GroundTruthObj.OBJECT_TYPES = class_mappings
def update_objects_all(self, _key, _bboxes):
if _bboxes:
self._objects_all[_key] = _bboxes
else:
self._objects_all[_key] = []
def load_gt_obj(self):
""" load bbox ground truth from files either via the provided label directory or list of label files"""
files = os.listdir(self.label_dir)
files = list(filter(lambda x: x.endswith(self.label_ext), files))
if len(files) == 0:
raise RuntimeError('error: no label files found in %s' % self.label_dir)
for label_file in files:
objects_per_image = list()
with open(os.path.join(self.label_dir, label_file), 'rt') as flabel:
for row in csv.reader(flabel, delimiter=self.label_delimiter):
if len(row) == 0:
# This can happen when you open an empty file
continue
if len(row) < 15:
raise ValueError('Invalid label format in "%s"'
% os.path.join(self.label_dir, label_file))
# load data
gt = GroundTruthObj()
gt.stype = row[0].lower()
gt.truncated = float(row[1])
gt.occlusion = int(row[2])
gt.angle = float(row[3])
gt.bbox.xl = float(row[4])
gt.bbox.yt = float(row[5])
gt.bbox.xr = float(row[6])
gt.bbox.yb = float(row[7])
gt.height = float(row[8])
gt.width = float(row[9])
gt.length = float(row[10])
gt.locx = float(row[11])
gt.locy = float(row[12])
gt.locz = float(row[13])
gt.roty = float(row[14])
gt.set_type()
box_dimensions = [gt.bbox.xr - gt.bbox.xl, gt.bbox.yb - gt.bbox.yt]
if self.min_box_size is not None:
if not all(x >= self.min_box_size for x in box_dimensions):
# object is smaller than threshold => set to "DontCare"
gt.stype = ''
gt.object = ObjectType.Dontcare
objects_per_image.append(gt)
key = os.path.splitext(label_file)[0]
self.update_objects_all(key, objects_per_image)
@property
def objects_all(self):
return self._objects_all
# return the # of pixels remaining in a
def pad_bbox(arr, max_bboxes=64, bbox_width=16):
if arr.shape[0] > max_bboxes:
raise ValueError(
'Too many bounding boxes (%d > %d)' % arr.shape[0], max_bboxes
)
# fill remainder with zeroes:
data = np.zeros((max_bboxes + 1, bbox_width), dtype='float')
# number of bounding boxes:
data[0][0] = arr.shape[0]
# width of a bounding box:
data[0][1] = bbox_width
# bounding box data. Merge nothing if no bounding boxes exist.
if arr.shape[0] > 0:
data[1:1 + arr.shape[0]] = arr
return data
def bbox_to_array(arr, label=0, max_bboxes=64, bbox_width=16):
"""
Converts a 1-dimensional bbox array to an image-like
3-dimensional array CHW array
"""
arr = pad_bbox(arr, max_bboxes, bbox_width)
return arr[np.newaxis, :, :]
def bbox_overlap(abox, bbox):
# the abox box
x11 = abox[0]
y11 = abox[1]
x12 = abox[0] + abox[2] - 1
y12 = abox[1] + abox[3] - 1
# the closer box
x21 = bbox[0]
y21 = bbox[1]
x22 = bbox[0] + bbox[2] - 1
y22 = bbox[1] + bbox[3] - 1
overlap_box_x2 = min(x12, x22)
overlap_box_x1 = max(x11, x21)
overlap_box_y2 = min(y12, y22)
overlap_box_y1 = max(y11, y21)
# make sure we preserve any non-bbox components
overlap_box = list(bbox)
overlap_box[0] = overlap_box_x1
overlap_box[1] = overlap_box_y1
overlap_box[2] = overlap_box_x2 - overlap_box_x1 + 1
overlap_box[3] = overlap_box_y2 - overlap_box_y1 + 1
xoverlap = max(0, overlap_box_x2 - overlap_box_x1)
yoverlap = max(0, overlap_box_y2 - overlap_box_y1)
overlap_pix = xoverlap * yoverlap
return overlap_pix, overlap_box
def pad_image(img, padding_image_height, padding_image_width):
"""
pad a single image to the specified dimensions
"""
src_width = img.size[0]
src_height = img.size[1]
if padding_image_width < src_width:
raise ValueError("Source image width %d is greater than padding width %d" % (src_width, padding_image_width))
if padding_image_height < src_height:
raise ValueError("Source image height %d is greater than padding height %d" %
(src_height, padding_image_height))
padded_img = PIL.Image.new(
img.mode,
(padding_image_width, padding_image_height),
"black")
padded_img.paste(img, (0, 0)) # copy to top-left corner
return padded_img
def resize_bbox_list(bboxlist, rescale_x=1, rescale_y=1):
# this is expecting x1,y1,w,h:
bboxListNew = []
for bbox in bboxlist:
abox = bbox
abox[0] *= rescale_x
abox[1] *= rescale_y
abox[2] *= rescale_x
abox[3] *= rescale_y
bboxListNew.append(abox)
return bboxListNew
| 33.561514 | 117 | 0.560391 | [
"BSD-3-Clause"
] | dcmartin/digits | digits/extensions/data/objectDetection/utils.py | 10,639 | Python |
# -*- coding: utf-8 -*-
"""
This Python module provides various service functions.
Updated since version 1.1:
1. Added support for postprocess and visualization.
2. Added file path validation for parameters of all related methods.
Updated since version 1.2: Merge Code and Update GUI
1. Integrate New Nemoh using hdf5 and python.
"""
__author__ = "caoweiquan322, TCSASSEMBLER"
__copyright__ = "Copyright (C) 2014-2015 TopCoder Inc. All rights reserved."
__version__ = "1.2"
import collections
import uuid
from settings import *
import os
import time
import subprocess
from multiprocessing import Process, Manager
import logging
from openwarp import helper
from nemoh import utility
from nemoh import preprocessor
from nemoh import postprocessor
from nemoh import solver
import warnings
import fnmatch
import h5py
# This class represents parameters used in the meshing process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
MeshingParameters = collections.namedtuple('MeshingParameters',
'infile outfile maxh minh fineness grading usetolerance tolerance')
# This class represents parameters used in the simulation process.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
SimulationParameters = collections.namedtuple('SimulationParameters', 'rho g depth xeff yeff wave_frequencies ' +
'min_wave_frequencies max_wave_frequencies wave_directions ' +
'max_wave_direction min_wave_directions floating_bodies ' +
'indiq_solver ires tol_gmres max_iterations save_potential ' +
'green_tabulation_numx green_tabulation_numz ' +
'green_tabulation_simpson_npoints use_ode_influence_coefficients ' +
'use_higher_order num_panel_higher_order b_spline_order ' +
'use_dipoles_implementation thin_panels compute_drift_forces ' +
'compute_yaw_moment remove_irregular_frequencies')
# This class represents a floating body used in the SimulationParameters.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
FloatingBody = collections.namedtuple('FloatingBody', 'mesh_file points panels degrees_of_freedom surge sway ' +
'heave roll_about_cdg pitch_about_cdg yaw_about_cdg ' +
'resulting_generalised_forces force_in_x_direction force_in_y_direction ' +
'force_in_z_direction moment_cdg_force_in_x_direction ' +
'moment_cdg_force_in_y_direction moment_cdg_force_in_z_direction ' +
'additional_info_lines')
# This class represents parameters used in the post-proessing.
# This class is a subclass of "tuple", and is created using collections.namedtuple factory function.
PostprocessingParameters = collections.namedtuple('PostprocessingParameters', 'irf show_pressure ' +
'kochin_function free_surface_elevation')
# The pre-defined config file name used by MESH_GENERATOR_BIN.
_CONFIG_FILE_NAME = 'config.txt'
# The pre-defined stdout log file name.
_LOG_FILE_NAME = 'log.txt'
# The logger object for logging.
_LOGGER = logging.getLogger(__name__)
class ServiceError(Exception):
'''
This exception indicates a service error.
It will be raised by methods of this module.
'''
pass
def generate_mesh(meshing_dir, params):
'''
Launch Mesh Generator to generate mesh.
@param meshing_dir: the meshing directory
@param params: the meshing parameters
@return: the mesh generation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of MeshingParameters is not
of valid value
@raise ServiceError: if error occurs during generating mesh
'''
signature = __name__ + '.generate_mesh()'
helper.log_entrance(_LOGGER, signature,
{'meshing_dir': meshing_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(meshing_dir, 'meshing_dir')
helper.check_is_directory(meshing_dir, 'meshing_dir')
helper.check_type_value(params, 'params', MeshingParameters, False)
helper.check_not_none_nor_empty(params.infile, 'params.infile')
helper.check_is_file(params.infile, 'params.infile')
helper.check_not_none_nor_empty(params.outfile, 'params.outfile')
helper.check_not_none_nor_empty(params.maxh, 'params.maxh')
helper.check_not_none_nor_empty(params.minh, 'params.minh')
helper.check_not_none_nor_empty(params.fineness, 'params.fineness')
helper.check_not_none_nor_empty(params.grading, 'params.grading')
helper.check_not_none_nor_empty(params.usetolerance, 'params.usetolerance')
if params.usetolerance == '1':
helper.check_not_none_nor_empty(params.tolerance, 'params.tolerance')
try:
config_file_path = os.path.join(meshing_dir, _CONFIG_FILE_NAME)
log_file_path = os.path.join(meshing_dir, _LOG_FILE_NAME)
# Generate config.txt according to given parameters
with open(config_file_path, 'w') as f:
f.write('\n'.join("%s: %s" % item for item in vars(params).items() if item[1] is not None))
# Launch mesh generator
with open(log_file_path, 'w') as log_file:
_LOGGER.debug('Start mesh generator in subprocess.')
subprocess.call(MESH_GENERATOR_BIN, cwd=meshing_dir, stdout=log_file)
_LOGGER.debug('End mesh generator in subprocess.')
# Read and return the log file content
with open(log_file_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when generating mesh. Caused by:\n' + unicode(str(e)))
def simulate(simulation_dir, params):
'''
Run simulation.
@param simulation_dir: the simulation directory
@param params: the simulation parameters
@return: the simulation log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of SimulationParameters is not
of valid value
@raise ServiceError: if any other error occurred when launching the simulation
'''
signature = __name__ + '.simulate()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', SimulationParameters, False)
helper.check_not_none_nor_empty(params.rho, 'params.rho')
helper.check_not_none_nor_empty(params.g, 'params.g')
helper.check_not_none_nor_empty(params.depth, 'params.depth')
helper.check_not_none_nor_empty(params.xeff, 'params.xeff')
helper.check_not_none_nor_empty(params.yeff, 'params.yeff')
helper.check_not_none_nor_empty(params.wave_frequencies, 'params.wave_frequencies')
helper.check_not_none_nor_empty(params.min_wave_frequencies, 'params.min_wave_frequencies')
helper.check_not_none_nor_empty(params.max_wave_frequencies, 'params.max_wave_frequencies')
helper.check_not_none_nor_empty(params.wave_directions, 'params.wave_directions')
helper.check_not_none_nor_empty(params.min_wave_directions, 'params.min_wave_directions')
helper.check_not_none_nor_empty(params.max_wave_direction, 'params.max_wave_direction')
helper.check_not_none_nor_empty(params.indiq_solver, 'params.indiq_solver')
helper.check_not_none_nor_empty(params.ires, 'params.ires')
helper.check_not_none_nor_empty(params.tol_gmres, 'params.tol_gmres')
helper.check_not_none_nor_empty(params.max_iterations, 'params.max_iterations')
helper.check_not_none_nor_empty(params.save_potential, 'params.save_potential')
helper.check_not_none_nor_empty(params.green_tabulation_numx, 'params.green_tabulation_numx')
helper.check_not_none_nor_empty(params.green_tabulation_numz, 'params.green_tabulation_numz')
helper.check_not_none_nor_empty(params.green_tabulation_simpson_npoints, 'params.green_tabulation_simpson_npoints')
helper.check_not_none_nor_empty(params.use_ode_influence_coefficients, 'params.use_ode_influence_coefficients')
helper.check_not_none_nor_empty(params.use_higher_order, 'params.use_higher_order')
helper.check_not_none_nor_empty(params.num_panel_higher_order, 'params.num_panel_higher_order')
helper.check_not_none_nor_empty(params.b_spline_order, 'params.b_spline_order')
helper.check_not_none_nor_empty(params.use_dipoles_implementation, 'params.use_dipoles_implementation')
helper.check_not_none_nor_empty(params.thin_panels, 'params.thin_panels')
helper.check_not_none_nor_empty(params.compute_drift_forces, 'params.compute_drift_forces')
helper.check_not_none_nor_empty(params.remove_irregular_frequencies, 'params.remove_irregular_frequencies')
helper.check_not_none_nor_empty(params.compute_yaw_moment, 'params.compute_yaw_moment')
helper.check_type_value(params.floating_bodies, 'params.floating_bodies', list, True)
if params.floating_bodies is not None:
for body in params.floating_bodies:
helper.check_type_value(body, 'params.floating_bodies item', FloatingBody, False)
helper.check_not_none_nor_empty(body.mesh_file, 'body.mesh_file')
helper.check_not_none_nor_empty(body.points, 'body.points')
helper.check_not_none_nor_empty(body.panels, 'body.panels')
helper.check_not_none_nor_empty(body.degrees_of_freedom, 'body.degrees_of_freedom')
helper.check_not_none_nor_empty(body.resulting_generalised_forces, 'body.resulting_generalised_forces')
helper.check_not_none_nor_empty(body.additional_info_lines, 'body.additional_info_lines')
try:
# Write the hdf5 inputs according to given parameters
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_calculations(params, hdf5_data)
# Launch preProcessor and Solver
# A prepared 'results' folder is necessary for the Nemoh software suite
os.mkdir(os.path.join(simulation_dir, 'results'))
simulation_log_path = os.path.join(simulation_dir, 'simulation_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : int(params.green_tabulation_numx),
'GREEN_TABULATION_NUMZ' : int(params.green_tabulation_numz),
'GREEN_TABULATION_SIMPSON_NPOINTS' : int(params.green_tabulation_simpson_npoints),
'USE_ODE_INFLUENCE_COEFFICIENTS': bool(int(params.use_ode_influence_coefficients)),
'USE_HIGHER_ORDER' : bool(int(params.use_higher_order)),
'NUM_PANEL_HIGHER_ORDER' : int(params.num_panel_higher_order),
'B_SPLINE_ORDER': int(params.b_spline_order),
'USE_DIPOLES_IMPLEMENTATION': bool(int(params.use_dipoles_implementation)),
'THIN_PANELS': [int(i) for i in params.thin_panels.split()],
'COMPUTE_DRIFT_FORCES' : bool(int(params.compute_drift_forces)),
'COMPUTE_YAW_MOMENT': bool(int(params.compute_yaw_moment)),
'REMOVE_IRREGULAR_FREQUENCIES' : bool(int(params.remove_irregular_frequencies))
}
_LOGGER.debug('Start preProcessor function.')
run_thread(preprocessor.preprocess, (custom_config,), simulation_log_path)
_LOGGER.debug('End preProcessor function.')
_LOGGER.debug('Start solver function.')
output = run_thread(solver.solve, (custom_config,), None)
with open(simulation_log_path, 'a') as log_file:
log_file.write(output)
_LOGGER.debug('End solver function.')
with open(simulation_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when doing simulation. Caused by:\n' + unicode(str(e)))
def postprocess(simulation_dir, params):
'''
Run post-processing.
@param simulation_dir: the simulation directory
@param params: the post-processing parameters
@return: the post-processing log content
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty, or any field of PostprocessingParameters is not
of valid value
@raise ServiceError: if error occurs during launching the post-processing
'''
signature = __name__ + '.postprocess()'
helper.log_entrance(_LOGGER, signature,
{'simulation_dir': simulation_dir,
'params': params})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
helper.check_type_value(params, 'params', PostprocessingParameters, False)
helper.check_type_value(params.irf, 'params.irf', list, False)
for irf_item in params.irf:
helper.check_not_none_nor_empty(irf_item, 'irf_item')
helper.check_not_none_nor_empty(params.show_pressure, 'params.show_pressure')
helper.check_type_value(params.kochin_function, 'params.kochin_function', list, False)
for kochin_function_item in params.kochin_function:
helper.check_not_none_nor_empty(kochin_function_item, 'kochin_function_item')
helper.check_type_value(params.free_surface_elevation, 'params.free_surface_elevation', list, False)
for elevation_item in params.free_surface_elevation:
helper.check_not_none_nor_empty(elevation_item, 'elevation_item')
try:
with h5py.File(os.path.join(simulation_dir, 'db.hdf5'), "a") as hdf5_data:
utility.write_postprocessing_section(params, hdf5_data)
# Launch postProcessor
postprocessing_log_path = os.path.join(simulation_dir, 'postprocessing_log.txt')
custom_config = {
'HDF5_FILE': os.path.join(simulation_dir, 'db.hdf5'),
'NEMOH_CALCULATIONS_FILE': None,
'NEMOH_INPUT_FILE': None,
'MESH_TEC_FILE': os.path.join(simulation_dir, 'mesh', 'mesh.tec'),
'FK_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'fkforce.tec'),
'RADIATION_COEFFICIENTS_TEC_FILE': os.path.join(simulation_dir, 'results', 'radiationcoefficients.tec'),
'DIFFRACTION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'diffractionforce.tec'),
'EXCITATION_FORCE_TEC_FILE': os.path.join(simulation_dir, 'results', 'excitationforce.tec'),
'IRF_TEC_FILE': os.path.join(simulation_dir, 'results', 'irf.tec'),
'WAVE_FIELD_TEC_FILE': os.path.join(simulation_dir, 'results', 'WaveField.tec'),
'GREEN_TABULATION_NUMX' : 328,
'GREEN_TABULATION_NUMZ' : 46,
'GREEN_TABULATION_SIMPSON_NPOINTS' : 251,
'USE_ODE_INFLUENCE_COEFFICIENTS': False,
'USE_HIGHER_ORDER' : False,
'NUM_PANEL_HIGHER_ORDER' : 1,
'B_SPLINE_ORDER': 1,
'USE_DIPOLES_IMPLEMENTATION': False,
'THIN_PANELS': [-1],
'COMPUTE_DRIFT_FORCES' : False,
'COMPUTE_YAW_MOMENT': False,
'REMOVE_IRREGULAR_FREQUENCIES' : False
}
_LOGGER.debug('Start postProcessor function.')
run_thread(postprocessor.postprocess, (custom_config,), postprocessing_log_path)
_LOGGER.debug('End postProcessor in subprocess.')
with open(postprocessing_log_path, 'r') as log_file:
ret = log_file.read()
helper.log_exit(_LOGGER, signature, [ret])
return ret
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when running postprocess. Caused by:\n' + unicode(str(e)))
def visualize(simulation_dir):
'''
Launch ParaView to visualize simulation results.
@param simulation_dir: the simulation directory
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if error occurs during launching the ParaView
'''
signature = __name__ + '.visualize()'
helper.log_entrance(_LOGGER, signature, {'simulation_dir': simulation_dir})
# Checking parameters
helper.check_not_none_nor_empty(simulation_dir, 'simulation_dir')
helper.check_is_directory(simulation_dir, 'simulation_dir')
try:
# Filter files to be opened in ParaView
files = []
for f in os.listdir(os.path.join(simulation_dir, 'results')):
for ext in VISUALIZATION_FILE_EXTENSIONS:
if fnmatch.fnmatch(f, '*.' + ext):
files.append(os.path.join(simulation_dir, 'results', f))
# Check if there's tec/vtk/stl file to visualize
if len(files) == 0:
raise ServiceError('There is no accepted file to visualize.')
_LOGGER.debug('List of files to load:')
_LOGGER.debug(str(files))
# Prepare script to run by ParaView
paraview_script = os.path.join(os.path.join(simulation_dir, 'results'), 'load_data.py')
prepare_paraview_script(paraview_script, files)
# Launch ParaView without waiting for the ParaView to exit
_LOGGER.debug('Start launching ParaView in subprocess.')
subprocess.Popen([PARAVIEW_BIN, '--script=' + paraview_script + ''])
_LOGGER.debug('End launching ParaView in subprocess.')
helper.log_exit(_LOGGER, signature, None)
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when launching the ParaView. Caused by:\n' + unicode(str(e)))
def prepare_paraview_script(script_path, files):
'''
Prepare a script to be run by ParaView from a template.
@param script_path: path of the new script to create
@param files: a list of data files path
@raise Exception: to its caller if any error occurs
'''
# Since this is a inner function, no entrance/exit information would be logged.
with open(PARAVIEW_SCRIPT_TEMPLATE, 'r') as fin:
with open(script_path, 'w') as fout:
for line in fin.readlines():
fout.write(line.rstrip().replace('<parameter_files>', str(files)) + '\n')
# From http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
class Silence:
"""
Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Example usage
with Silence(stderr='output.txt', mode='a'):
... # appending to existing file
... print >> sys.stderr, "Hello from stderr"
... print "Stdout redirected to os.devnull"
=== contents of 'output.txt' ===
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='w'):
"""
Initialize
Args:
self: The class itself
stdout: the descriptor or file name where to redirect stdout
stdout: the descriptor or file name where to redirect stdout
mode: the output descriptor or file mode
"""
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
def __enter__(self):
"""
Enter the context
Args:
self: The class itself
"""
import sys
self.sys = sys
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = map(os.dup, fds)
# flush any pending output
for s in saved_streams: s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = map(os.fdopen, fds, ['w']*2, [0]*2)
else: null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
map(os.dup2, null_fds, fds)
def __exit__(self, *args):
"""
Exit the context
Args:
self: The class itself
args: other arguments
"""
sys = self.sys
# flush any pending output
for s in self.saved_streams: s.flush()
# restore original streams and file descriptors
map(os.dup2, self.saved_fds, self.fds)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams: s.close()
for fd in self.saved_fds: os.close(fd)
return False
def wrapper_io(func, fd, args, return_dict):
"""
Run a function while redirecting its output to a file descriptor
Args:
func: A python function to run
fd: a file descriptor
args: A tuple containing argument for the function
return_dict: Dictionary where to put the result of the function
"""
return_dict["output"] = ''
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if fd:
with Silence(stdout=fd, stderr=os.devnull, mode='a'):
return_dict["output"] = func(*args)
else:
return_dict["output"] = func(*args)
def run_thread(func, args, fd):
"""
Run a python function in a thread and wait for it to complete.
Redirect its output to fd
Args:
func: A python function to run
args: A tuple containing argument for the function
fd: a file descriptor
"""
manager = Manager()
return_dict = manager.dict()
p = Process(target=wrapper_io, args=(func, fd, args, return_dict))
p.start()
p.join()
return return_dict["output"]
def writeline_if_not_none(fout, data):
'''
Write one line to the specified file if data is not None.
@param fout: the file object to write line in
@param data: the data to write as line
'''
# Since this is a inner function, no entrance/exit information would be logged.
if data is not None:
fout.write(str(data) + '\n')
def prepare_dir(prefix):
'''
Prepare a directory, the directory will be a sub-directory of USER_DATA_DIRECTORY with current timestamp
prefixed given prefix as the directory name.
@param prefix: the directory prefix
@return: the meshing/simulation directory full path
@raise TypeError: if any input parameter is not of required type
@raise ValueError: if any input parameter is None/empty
@raise ServiceError: if any error occurred when preparing the directory
'''
signature = __name__ + '.prepare_dir()'
helper.log_entrance(_LOGGER, signature, {'prefix': prefix})
# Checking parameters
helper.check_not_none_nor_empty(prefix, 'prefix')
try:
# Create a directory for this run (sub-directory name in format simulation_YYYYMMDDhhmmss)
# We should consider adding some more uuid suffix to allow more concurrent requests within 1 SINGLE second.
run_dir = os.path.join(USER_DATA_DIRECTORY, prefix + time.strftime('%Y%m%d%H%M%S') + '_' + uuid.uuid1().hex)
os.makedirs(run_dir)
helper.log_exit(_LOGGER, signature, [run_dir])
return run_dir
except Exception as e:
helper.log_exception(_LOGGER, signature, e)
raise ServiceError('Error occurs when preparing the directory. Caused by:\n' + unicode(str(e)))
| 49.40038 | 120 | 0.671814 | [
"Apache-2.0"
] | rhydar/Test | source/openwarpgui/openwarp/services.py | 26,034 | Python |
import scrapy
from scrapy.spiders import Rule, CrawlSpider
from scrapy.linkextractors import LinkExtractor
from sulekha.items import SulekhaItem, AddressItem, AddressItemLoader, SulekhaItemLoader
class SulekhaScrapy(CrawlSpider):
name = 'sulekha'
allowed_domains =['sulekha.com','yellowpages.sulekha.com']
start_urls = ['http://yellowpages.sulekha.com/']
rules = [\
Rule(LinkExtractor(restrict_xpaths=["//div[@class='sc-belt']/div/ul/li/a"]),\
'parse_next', follow=True),\
Rule(LinkExtractor(restrict_xpaths=["//h3[@data-ypcid]/a[@itemprop='url']"]),'parse_details'),\
Rule(LinkExtractor(restrict_xpaths=["//li[@class='next']/a"]),\
'parse_next', follow=True),\
]
def parse_details(self, response):
self.logger.info('Parse item called on %s', response.url)
loader = SulekhaItemLoader(item=SulekhaItem(), response=response)
loader.add_xpath('category', '//div[@itemprop="breadcrumb"]/a[3]/text()');
loader.add_xpath('name', '//span[@itemprop="name"]/text()');
loader.add_xpath('phone', '//em[@itemprop="telephone"]/text()');
loader.add_value('address', self.parse_address_item(response));
loader.add_xpath('email', '//span[@itemprop="email"]/a/text()');
loader.add_xpath('website', '//a[@id="websitelink"]/text()');
loader.add_xpath('contact_preson', '//div[@class="profile-child"]/text()');
loader.add_xpath('working_hours', '//time[@itemprop="openingHours"]/em/text()');
return loader.load_item()
def parse_address_item(self, response):
address_loader = AddressItemLoader(item=AddressItem(), response=response)
address_loader.add_xpath('street_address', '//span[@itemprop="streetAddress"]/text()');
address_loader.add_xpath('address_locality', '//span[@itemprop="addressLocality"]/a/text()');
address_loader.add_xpath('address_region', '//span[@itemprop="addressRegion"]/text()');
address_loader.add_xpath('postal_code', '//span[@itemprop="postalCode"]/text()');
address_loader.add_xpath('land_mark', '//span[@class="land-mark"]/text()');
return address_loader.load_item()
def parse_next(self, response):
self.logger.info('Parse next called on %s', response.url)
yield scrapy.Request(response.url, callback=self.parse_details)
| 54.022727 | 103 | 0.668069 | [
"BSD-2-Clause"
] | Dalibisi/Scraping-India-YellowPages | sulekha/sulekha/spiders/sulekha_spider.py | 2,377 | Python |
from datetime import datetime, timedelta
from uuid import uuid4
class Uploader:
def generate_token(self):
pass
def generate_download_link(self, object_name, filename) -> (dict, str):
pass
def object_name(self) -> str:
return str(uuid4())
class MockUploader(Uploader):
def __init__(self, config):
self.config = config
def get_token(self):
return ({}, self.object_name())
def generate_download_link(self, object_name, filename):
return ""
class AzureUploader(Uploader):
def __init__(self, config):
self.account_name = config["AZURE_ACCOUNT_NAME"]
self.storage_key = config["AZURE_STORAGE_KEY"]
self.container_name = config["AZURE_TO_BUCKET_NAME"]
self.timeout = timedelta(seconds=config["PERMANENT_SESSION_LIFETIME"])
from azure.storage.common import CloudStorageAccount
from azure.storage.blob import BlobPermissions
self.CloudStorageAccount = CloudStorageAccount
self.BlobPermissions = BlobPermissions
def get_token(self):
"""
Generates an Azure SAS token for pre-authorizing a file upload.
Returns a tuple in the following format: (token_dict, object_name), where
- token_dict has a `token` key which contains the SAS token as a string
- object_name is a string
"""
account = self.CloudStorageAccount(
account_name=self.account_name, account_key=self.storage_key
)
bbs = account.create_block_blob_service()
object_name = self.object_name()
sas_token = bbs.generate_blob_shared_access_signature(
self.container_name,
object_name,
permission=self.BlobPermissions.CREATE,
expiry=datetime.utcnow() + self.timeout,
protocol="https",
)
return ({"token": sas_token}, object_name)
def generate_download_link(self, object_name, filename):
account = self.CloudStorageAccount(
account_name=self.account_name, account_key=self.storage_key
)
bbs = account.create_block_blob_service()
sas_token = bbs.generate_blob_shared_access_signature(
self.container_name,
object_name,
permission=self.BlobPermissions.READ,
expiry=datetime.utcnow() + self.timeout,
content_disposition=f"attachment; filename={filename}",
protocol="https",
)
return bbs.make_blob_url(
self.container_name, object_name, protocol="https", sas_token=sas_token
)
| 33.525641 | 83 | 0.656214 | [
"MIT"
] | philip-dds/atst | atst/domain/csp/file_uploads.py | 2,615 | Python |
from Bio.Seq import Seq
def cMers(tSeq, k):
kFreq = {}
for x in range(0, len(tSeq) - k + 1):
kMer = tSeq[x : x+k]
if kMer in kFreq:
kFreq[kMer] += 1
else:
kFreq[kMer] = 1
return kFreq
f = open('dataSeq.txt', 'r')
mySeq = Seq(f.read())
cKmer = cMers(mySeq, 3)
print(cKmer) | 16.85 | 41 | 0.510386 | [
"MIT"
] | AhmedNasser1601/AUG-Problem-Solving-For-Bioinformatics-Level-1- | Session 7/K-mers.py | 337 | Python |
import os
import os.path
import torch
import numpy as np
import pandas
import csv
import random
from collections import OrderedDict
from .base_video_dataset import BaseVideoDataset
from ltr.data.image_loader import jpeg4py_loader
from ltr.admin.environment import env_settings
class Lasot(BaseVideoDataset):
""" LaSOT dataset.
Publication:
LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking
Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling
CVPR, 2019
https://arxiv.org/pdf/1809.07845.pdf
Download the dataset from https://cis.temple.edu/lasot/download.html
"""
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):
"""
args:
root - path to the lasot dataset.
image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)
is used by default.
vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the
videos with subscripts -1, -3, and -5 from each class will be used for training.
split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of
vid_ids or split option can be used at a time.
data_fraction - Fraction of dataset to be used. The complete dataset is used by default
"""
root = env_settings().lasot_dir if root is None else root
super().__init__('LaSOT', root, image_loader)
# Keep a list of all classes
self.class_list = [f for f in os.listdir(self.root)]
self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}
self.sequence_list = self._build_sequence_list(vid_ids, split)
if data_fraction is not None:
self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))
self.seq_per_class = self._build_class_list()
def _build_sequence_list(self, vid_ids=None, split=None):
if split is not None:
if vid_ids is not None:
raise ValueError('Cannot set both split_name and vid_ids.')
ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
if split == 'train':
file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')
else:
raise ValueError('Unknown split name.')
sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()
elif vid_ids is not None:
sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]
else:
raise ValueError('Set either split_name or vid_ids.')
return sequence_list
def _build_class_list(self):
seq_per_class = {}
for seq_id, seq_name in enumerate(self.sequence_list):
class_name = seq_name.split('-')[0]
if class_name in seq_per_class:
seq_per_class[class_name].append(seq_id)
else:
seq_per_class[class_name] = [seq_id]
return seq_per_class
def get_name(self):
return 'lasot'
def has_class_info(self):
return True
def has_occlusion_info(self):
return True
def get_num_sequences(self):
return len(self.sequence_list)
def get_num_classes(self):
return len(self.class_list)
def get_sequences_in_class(self, class_name):
return self.seq_per_class[class_name]
def _read_bb_anno(self, seq_path):
bb_anno_file = os.path.join(seq_path, "groundtruth.txt")
gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values
return torch.tensor(gt)
def _read_target_visible(self, seq_path):
# Read full occlusion and out_of_view
occlusion_file = os.path.join(seq_path, "full_occlusion.txt")
out_of_view_file = os.path.join(seq_path, "out_of_view.txt")
with open(occlusion_file, 'r', newline='') as f:
occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])
with open(out_of_view_file, 'r') as f:
out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])
target_visible = ~occlusion & ~out_of_view
return target_visible
def _get_sequence_path(self, seq_id):
seq_name = self.sequence_list[seq_id]
class_name = seq_name.split('-')[0]
vid_id = seq_name.split('-')[1]
return os.path.join(self.root, class_name, class_name + '-' + vid_id)
def get_sequence_info(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
bbox = self._read_bb_anno(seq_path)
valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)
visible = self._read_target_visible(seq_path) & valid.byte()
return {'bbox': bbox, 'valid': valid, 'visible': visible}
def _get_frame_path(self, seq_path, frame_id):
return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1
def _get_frame(self, seq_path, frame_id):
return self.image_loader(self._get_frame_path(seq_path, frame_id))
def _get_class(self, seq_path):
raw_class = seq_path.split('/')[-2]
return raw_class
def get_class_name(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
return obj_class
def get_frames(self, seq_id, frame_ids, anno=None):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]
if anno is None:
anno = self.get_sequence_info(seq_id)
anno_frames = {}
for key, value in anno.items():
anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]
object_meta = OrderedDict({'object_class_name': obj_class,
'motion_class': None,
'major_class': None,
'root_class': None,
'motion_adverb': None})
return frame_list, anno_frames, object_meta
| 38.680473 | 130 | 0.634389 | [
"Apache-2.0"
] | 2021-DGSW-Ensemble/Ensemble-AI | Stark-main/external/AR/ltr/dataset/lasot.py | 6,537 | Python |
## 2. Introduction to the Data ##
import pandas as pd
all_ages = pd.read_csv('all-ages.csv')
recent_grads = pd.read_csv('recent-grads.csv')
print(all_ages.head())
print(recent_grads.head())
## 3. Summarizing Major Categories ##
# Unique values in Major_category column.
print(all_ages['Major_category'].unique())
aa_cat_counts = dict()
rg_cat_counts = dict()
def cat_summary(data,category):
subset = data[data['Major_category']==category]
total = subset['Total'].sum()
return(total)
for cat in all_ages['Major_category'].unique():
aa_cat_counts[cat] = cat_summary(all_ages,cat)
for cat in recent_grads['Major_category'].unique():
rg_cat_counts[cat] = cat_summary(recent_grads,cat)
## 4. Low-Wage Job Rates ##
low_wage_percent = 0.0
low_wage_percent = recent_grads['Low_wage_jobs'].sum()/recent_grads['Total'].sum()
## 5. Comparing Data Sets ##
# All majors, common to both DataFrames
majors = recent_grads['Major'].unique()
rg_lower_count = 0
for item in majors:
grad_subset = recent_grads[recent_grads['Major']==item]
all_subset = all_ages[all_ages['Major']==item]
if(grad_subset['Unemployment_rate'].values[0] < all_subset['Unemployment_rate'].values[0]):
rg_lower_count +=1
print(rg_lower_count) | 30.707317 | 95 | 0.719619 | [
"MIT"
] | vipmunot/Data-Analysis-using-Python | Data Analysis with Pandas Intermediate/Challenge_ Summarizing Data-112.py | 1,259 | Python |
import pytest
from chispa import assert_df_equality
from cishouseholds.pipeline.generate_outputs import configure_outputs
def test_configure_outputs(spark_session):
input_df = spark_session.createDataFrame(
data=[
("England", 6, 2, "02-6SY"),
("NI", 9, 5, "02-6SY"),
("Scotland", 11, 7, "07SY-11SY"),
("Wales", 15, 10, "07SY-11SY"),
("Wales", 15, 6, "07SY-11SY"),
("Scotland", 15, 6, None),
("England", 17, 12, "12SY-24"),
("NI", 18, 13, "12SY-24"),
("England", 25, 12, "25-34"),
("NI", 55, 79, "50-69"),
("NI", 88, 1, "70+"),
],
schema="country string, age integer, school_year integer, output string",
)
expected_df1 = spark_session.createDataFrame(
data=[
("England", 6, 2, "trumpet"),
("NI", 9, 5, "trumpet"),
("Scotland", 11, 7, "07SY-11SY"),
("Wales", 15, 10, "07SY-11SY"),
("Wales", 15, 6, "07SY-11SY"),
("Scotland", 15, 6, None),
("England", 17, 12, "12SY-24"),
("NI", 18, 13, "12SY-24"),
("England", 25, 12, "25-34"),
("NI", 55, 79, "50-69"),
("NI", 88, 1, "gibberish"),
],
schema="country string, age integer, renamed integer, output string",
)
expected_df2 = spark_session.createDataFrame(
data=[
("Wales", 2),
("NI", 4),
("England", 3),
("Scotland", 2),
],
schema="country string, test long",
)
expected_df3 = spark_session.createDataFrame(
data=[
(3, 2),
(1, 4),
(2, 3),
(4, 2),
],
schema="country integer, test long",
)
# test mapping functionality with complete map off
output_df1 = configure_outputs(
input_df,
selection_columns=["country", "age", "school_year", "output"],
name_map={"school_year": "renamed"},
value_map={"output": {"70+": "gibberish", "02-6SY": "trumpet"}},
)
output_df5 = configure_outputs(
input_df,
selection_columns=["country", "age", "school_year"],
group_by_columns="country",
aggregate_function="count",
aggregate_column_name="test",
value_map={"country": {"NI": 1, "England": 2, "Wales": 3, "Scotland": 4}},
complete_map=True,
)
# test correct grouping functionality
output_df2 = configure_outputs(
input_df, group_by_columns="country", aggregate_function="count", aggregate_column_name="test"
)
assert_df_equality(output_df1, expected_df1, ignore_nullable=True, ignore_row_order=True)
assert_df_equality(output_df2, expected_df2, ignore_nullable=True, ignore_row_order=True)
assert_df_equality(output_df5, expected_df3, ignore_nullable=True, ignore_row_order=True)
# test function dissalows using functions on non-selected columns
with pytest.raises(IndexError):
configure_outputs(input_df, group_by_columns="country", selection_columns="age")
# test function raises readable error for column not existing on dataframe
with pytest.raises(AttributeError):
configure_outputs(input_df, selection_columns="nothing")
# test incomplete maps raise index error
with pytest.raises(LookupError):
configure_outputs(
input_df,
selection_columns=["country", "age", "school_year", "output"],
name_map={"school_year": "renamed"},
value_map={"output": {"70+": "gibberish", "02-6SY": "trumpet"}},
complete_map=True,
)
| 36.95 | 102 | 0.567524 | [
"MIT"
] | ONS-SST/cis_households | tests/pipeline/test_configure_outputs.py | 3,695 | Python |
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_application_group
short_description: Configure firewall application groups.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
application_group:
description: the top level parameters set
required: false
type: dict
suboptions:
application:
description: no description
type: int
category:
description: no description
type: int
comment:
type: str
description: 'Comment'
name:
type: str
description: 'Application group name.'
type:
type: str
description: 'Application group type.'
choices:
- 'application'
- 'category'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure firewall application groups.
fmgr_application_group:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
application_group:
application: <value of integer>
category: <value of integer>
comment: <value of string>
name: <value of string>
type: <value in [application, category]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/application/group',
'/pm/config/global/obj/application/group'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/application/group/{group}',
'/pm/config/global/obj/application/group/{group}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'application_group': {
'required': False,
'type': 'dict',
'options': {
'application': {
'required': False,
'type': 'int'
},
'category': {
'required': False,
'type': 'int'
},
'comment': {
'required': False,
'type': 'str'
},
'name': {
'required': True,
'type': 'str'
},
'type': {
'required': False,
'choices': [
'application',
'category'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'application_group'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 31.968992 | 153 | 0.590204 | [
"MIT"
] | DiptoChakrabarty/nexus | venv/lib/python3.7/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_application_group.py | 8,248 | Python |
from __future__ import print_function, division
import os
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from mypath import Path
from torchvision import transforms
from dataloaders import custom_transforms as tr
from dataloaders import sp_transforms as tr_sp
class VOCSegmentation(Dataset):
"""
PascalVoc dataset
"""
NUM_CLASSES = 21
def __init__(self,
args,
base_dir=Path.db_root_dir('pascal'),
split='train',
):
"""
:param base_dir: path to VOC dataset directory
:param split: train/val
:param transform: transform to apply
"""
super().__init__()
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClass')
self._sp_dir = os.path.join(self._base_dir, 'super_pixel')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.args = args
_splits_dir = os.path.join(self._base_dir, 'ImageSets', 'Segmentation')
self.im_ids = []
self.images = []
self.categories = []
self.super_pixel = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
_image = os.path.join(self._image_dir, line + ".jpg")
_cat = os.path.join(self._cat_dir, line + ".png")
_sp = os.path.join(self._sp_dir, line + ".ppm.jpg")
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
assert os.path.isfile(_sp)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
self.super_pixel.append(_sp)
assert (len(self.images) == len(self.categories))
# Display stats
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
_img, _target, _sp = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target, 'super_pixel':_sp}
for split in self.split:
if split == "train":
return self.transform_tr(sample)
elif split == 'val':
return self.transform_val(sample)
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
_target = Image.open(self.categories[index])
_sp = Image.open(self.super_pixel[index])
return _img, _target, _sp
def transform_tr(self, sample):
if len(sample) == 2:
composed_transforms = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
else:
composed_transforms = transforms.Compose([
tr_sp.RandomHorizontalFlip(),
tr_sp.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size),
tr_sp.RandomGaussianBlur(),
tr_sp.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr_sp.ToTensor()])
return composed_transforms(sample)
def transform_val(self, sample):
if len(sample) == 2:
composed_transforms = transforms.Compose([
tr.FixScaleCrop(crop_size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
return composed_transforms(sample)
else:
composed_transforms = transforms.Compose([
tr_sp.FixScaleCrop(crop_size=self.args.crop_size),
tr_sp.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr_sp.ToTensor()])
return composed_transforms(sample)
def __str__(self):
return 'VOC2012(split=' + str(self.split) + ')'
if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513
voc_train = VOCSegmentation(args, split='train')
dataloader = DataLoader(voc_train, batch_size=5, shuffle=True, num_workers=0)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
sps = sample['super_pixel'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='pascal')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
sp = np.array(sps[jj])
sp *= 255.0
sp = sp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(sp)
if ii == 1:
break
plt.show(block=True)
| 33.643678 | 100 | 0.571575 | [
"MIT"
] | ChenyanWu/seg_super_pixel | dataloaders/datasets/pascal.py | 5,854 | Python |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""spaCy ANN Linker, a pipeline component for generating spaCy KnowledgeBase Alias Candidates for Entity Linking."""
__version__ = '0.1.10'
from .ann_linker import AnnLinker
from .remote_ann_linker import RemoteAnnLinker
# TODO: Uncomment (and probably fix a bit) once this PR is merged upstream
# https://github.com/explosion/spaCy/pull/4988 to enable kb registry with
# customizable `get_candidates` function
#
# from spacy.kb import KnowledgeBase
# from spacy.tokens import Span
# from spacy.util import registry
# @registry.kb.register("get_candidates")
# def get_candidates(kb: KnowledgeBase, ent: Span):
# alias = ent._.alias_candidates[0] if ent._.alias_candidates else ent.text
# return kb.get_candidates(alias)
| 34.583333 | 116 | 0.772289 | [
"MIT"
] | jjjamie/spacy-ann-linker | spacy_ann/__init__.py | 830 | Python |
from scipy.optimize import minimize
import warnings
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.nddata import NDData
from photutils.psf import extract_stars
from astropy.stats import gaussian_sigma_to_fwhm
from ..core import Block
import matplotlib.pyplot as plt
from collections import OrderedDict
from ..utils import fast_binning
def image_psf(image, stars, size=15, normalize=False, return_cutouts=False):
"""
Get global psf from image using photutils routines
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels)
normalize: bool, optional
weather to normalize the cutout, default is False
Returns
-------
np.ndarray of shape (size, size)
"""
_, cuts = cutouts(image, stars, size=size)
cuts = cuts.data
if normalize:
cuts = [c/np.sum(c) for c in cuts]
if return_cutouts:
return np.median(cuts, axis=0), cuts
else:
return np.median(cuts, axis=0)
def cutouts(image, stars, size=15):
"""Custom version to extract stars cutouts
Parameters
----------
Parameters
----------
image: np.ndarray or path
stars: np.ndarray
stars positions with shape (n,2)
size: int
size of the cuts around stars (in pixels), by default 15
Returns
-------
np.ndarray of shape (size, size)
"""
if isinstance(image, str):
image = fits.getdata(image)
warnings.simplefilter("ignore")
if np.shape(stars) > (1,2):
stars_tbl = Table(
[stars[:, 0], stars[:, 1], np.arange(len(stars))],
names=["x", "y", "id"])
stars = extract_stars(NDData(data=image), stars_tbl, size=size)
idxs = np.array([s.id_label for s in stars])
return idxs, stars
else:
stars_tbl = Table(
data=np.array([stars[0][0], stars[0][1]]),
names=["x", "y"])
stars = extract_stars(NDData(data=image), stars_tbl, size=size)
return stars
def good_cutouts(image, xy, r=30, upper=40000, lower=1000, trim=100):
idxs, _cuts = cutouts(image, xy, r)
cuts = OrderedDict(zip(idxs, _cuts))
peaks = [cutout.data.max() for cutout in cuts.values()]
for i, cutout in cuts.copy().items():
if i in cuts:
peak = cutout.data.max()
center = cutout.center
# removing saturated and faint stars
if peak > upper or peak < lower:
del cuts[i]
# removing stars on borders
elif np.any(center < [trim, trim]) or np.any(center > np.array(image.shape) - trim):
del cuts[i]
# removing close stars
closest = idxs[np.nonzero(np.linalg.norm(center - xy[idxs], axis=1) < r)[0]]
if len(closest) > 1:
for j in closest:
if j in cuts:
del cuts[j]
return cuts
def moments(data):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution by calculating its
moments """
height = data.max()
background = data.min()
data = data-np.min(data)
total = data.sum()
x, y = np.indices(data.shape)
x = (x * data).sum() / total
y = (y * data).sum() / total
col = data[:, int(y)]
width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())
row = data[int(x), :]
width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())
width_x /= gaussian_sigma_to_fwhm
width_y /= gaussian_sigma_to_fwhm
return height, x, y, width_x, width_y, 0.0, background
class PSFModel(Block):
def __init__(self, cutout_size=21, save_cutouts=False, **kwargs):
super().__init__(**kwargs)
self.cutout_size = cutout_size
self.save_cutouts = save_cutouts
self.x, self.y = np.indices((self.cutout_size, self.cutout_size))
self.epsf = None
@property
def optimized_model(self):
return self.model(*self.optimized_params)
def build_epsf(self, image, stars):
return image_psf(image, stars.copy(), size=self.cutout_size, return_cutouts=self.save_cutouts)
def model(self):
raise NotImplementedError("")
def nll(self, p):
ll = np.sum(np.power((self.model(*p) - self.epsf), 2) * self.epsf)
return ll if np.isfinite(ll) else 1e25
def optimize(self):
raise NotImplementedError("")
def sigma_to_fwhm(self, *args):
return gaussian_sigma_to_fwhm
def run(self, image):
if self.save_cutouts:
self.epsf, image.cutouts = self.build_epsf(image.data, image.stars_coords)
else:
self.epsf = self.build_epsf(image.data, image.stars_coords)
image.fwhmx, image.fwhmy, image.theta = self.optimize()
image.fwhm = np.mean([image.fwhmx, image.fwhmy])
image.psf_sigma_x = image.fwhmx / self.sigma_to_fwhm()
image.psf_sigma_y = image.fwhmy / self.sigma_to_fwhm()
image.header["FWHM"] = image.fwhm
image.header["FWHMX"] = image.fwhmx
image.header["FWHMY"] = image.fwhmy
image.header["PSFANGLE"] = image.theta
image.header["FWHMALG"] = self.__class__.__name__
def show_residuals(self):
plt.imshow(self.epsf - self.optimized_model)
plt.colorbar()
ax = plt.gca()
plt.text(0.05, 0.05, "$\Delta f=$ {:.2f}%".format(100*np.sum(np.abs(self.epsf - self.optimized_model))/np.sum(self.epsf)),
fontsize=14, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes, c="w")
def __call__(self, data):
self.epsf = data
return self.optimize()
class FWHM(PSFModel):
"""
Fast empirical FWHM (based on Arielle Bertrou-Cantou's idea)
"""
def __init__(self, cutout_size=51, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
Y, X = np.indices((self.cutout_size,self.cutout_size))
x = y = self.cutout_size/2
self.radii = (np.sqrt((X - x) ** 2 + (Y - y) ** 2)).flatten()
def optimize(self):
psf = self.epsf.copy()
psf -= np.min(psf)
pixels = psf.flatten()
binned_radii, binned_pixels, _ = fast_binning(self.radii, pixels, bins=1)
fwhm = 2*binned_radii[np.flatnonzero(binned_pixels > np.max(binned_pixels)/2)[-1]]
return fwhm, fwhm, 0
class FastGaussian(PSFModel):
"""
Fit a symetric 2D Gaussian model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, height, s, m):
dx = self.x - self.cutout_size/2
dy = self.y - self.cutout_size/2
psf = height * np.exp(-((dx/(2*s))**2 + (dy/(2*s))**2))
return psf + m
def optimize(self):
p0 = [np.max(self.epsf), 4, np.min(self.epsf)]
min_sigma = 0.5
bounds = [
(0, np.infty),
(min_sigma, np.infty),
(0, np.mean(self.epsf)),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
return params[1]*self.sigma_to_fwhm(), params[1]*self.sigma_to_fwhm(), 0
def citations(self):
return "scipy", "photutils"
class Gaussian2D(PSFModel):
"""
Fit an elliptical 2D Gaussian model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, height, xo, yo, sx, sy, theta, m):
dx = self.x - xo
dy = self.y - yo
a = (np.cos(theta)**2)/(2*sx**2) + (np.sin(theta)**2)/(2*sy**2)
b = -(np.sin(2*theta))/(4*sx**2) + (np.sin(2*theta))/(4*sy**2)
c = (np.sin(theta)**2)/(2*sx**2) + (np.cos(theta)**2)/(2*sy**2)
psf = height * np.exp(-(a * dx ** 2 + 2 * b * dx * dy + c * dy ** 2))
return psf + m
def optimize(self):
p0 = moments(self.epsf)
x0, y0 = p0[1], p0[2]
min_sigma = 0.5
bounds = [
(0, np.infty),
(x0 - 3, x0 + 3),
(y0 - 3, y0 + 3),
(min_sigma, np.infty),
(min_sigma, np.infty),
(0, 4),
(0, np.mean(self.epsf)),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
return params[3]*self.sigma_to_fwhm(), params[4]*self.sigma_to_fwhm(), params[-2]
def citations(self):
return "scipy", "photutils"
class Moffat2D(PSFModel):
"""
Fit an elliptical 2D Moffat model to an image effective PSF
"""
def __init__(self, cutout_size=21, **kwargs):
super().__init__(cutout_size=cutout_size, **kwargs)
def model(self, a, x0, y0, sx, sy, theta, b, beta):
# https://pixinsight.com/doc/tools/DynamicPSF/DynamicPSF.html
dx_ = self.x - x0
dy_ = self.y - y0
dx = dx_*np.cos(theta) + dy_*np.sin(theta)
dy = -dx_*np.sin(theta) + dy_*np.cos(theta)
return b + a / np.power(1 + (dx/sx)**2 + (dy/sy)**2, beta)
def sigma_to_fwhm(self):
return 2*np.sqrt(np.power(2, 1/self.optimized_params[-1]) - 1)
def optimize(self):
p0 = list(moments(self.epsf))
p0.append(1)
x0, y0 = p0[1], p0[2]
min_sigma = 0.5
bounds = [
(0, np.infty),
(x0 - 3, x0 + 3),
(y0 - 3, y0 + 3),
(min_sigma, np.infty),
(min_sigma, np.infty),
(0, 4),
(0, np.mean(self.epsf)),
(1, 8),
]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
params = minimize(self.nll, p0, bounds=bounds).x
self.optimized_params = params
sm = self.sigma_to_fwhm()
return params[3]*sm, params[4]*sm, params[-2]
def citations(self):
return "scipy", "photutils"
class KeepGoodStars(Block):
def __init__(self, n=-1, **kwargs):
super().__init__(**kwargs)
self.n = n
def run(self, image, n=-1):
good_stars = self(image.data, image.stars_coords)
image.stars_coords = good_stars
def __call__(self, data, stars):
i, _stars = cutouts(data, stars, size=21)
#good = np.array([shapiro(s.data).statistic for s in _stars]) > 0.33
good = np.array([np.std(s.data) for s in _stars]) > 1000
return stars[i][np.argwhere(good).squeeze()][0:self.n] | 32.358209 | 131 | 0.577768 | [
"MIT"
] | lgrcia/prose | prose/blocks/psf.py | 10,840 | Python |
import sys
from solc import link_code
from os.path import join
from web3 import Web3
from cobra.project.configuration import Configuration
from cobra.utils import file_reader, yaml_loader, json_loader
from cobra.utils.console_log import console_log
class Interfaces(Configuration):
def __init__(self, web3: Web3, yaml_file, more=False):
super().__init__()
self.web3 = web3
self.contracts = dict()
self.yaml_file = yaml_file
self.more = more
def get_interfaces(self):
readied_yaml = file_reader(self.yaml_file)
loaded_yaml = yaml_loader(readied_yaml)
if 'test' in loaded_yaml:
test_yaml = loaded_yaml['test']
configurations_yaml = self.test(test_yaml)
for configuration_yaml in configurations_yaml:
artifact_json = join(configuration_yaml['artifact_path'],
configuration_yaml['artifact'])
readied_artifact_json = file_reader(artifact_json)
loaded_artifact_json = json_loader(readied_artifact_json)
if configuration_yaml['links'] is None:
self.test_with_out_link(loaded_artifact_json)
else:
self.test_with_link(loaded_artifact_json, configuration_yaml['links'])
return self.contracts
else:
console_log("test in cobra.yaml", "error", "NotFound")
sys.exit()
def get_links_address(self, links):
contract_name_and_address = dict()
for link in links:
for contract in self.contracts.keys():
contract = contract.split(":")
if contract[0] == link[:-5]:
contract_name_and_address.setdefault(link[:-5], contract[1])
elif contract[0] == link:
contract_name_and_address.setdefault(link, contract[1])
else:
continue
return contract_name_and_address
def test_with_link(self, artifact, links):
unlinked_bytecode = artifact['bin']
get_link_address = self.get_links_address(links)
linked_bytecode = link_code(unlinked_bytecode, get_link_address)
try:
contract_factory = self.web3.eth.contract(abi=artifact['abi'], bytecode=linked_bytecode)
except ValueError as valueError:
value_error = str(valueError.args.__getitem__(0))
if "'" in value_error and not self.more:
error = str(value_error).split("'")
console_log(str(error[0]), "error", "ValueError")
elif "'" in value_error and self.more:
console_log(
str(value_error), "error", "ValueError")
elif not self.more:
console_log(
str(value_error).strip('\n')[0], "error", "ValueError")
elif self.more:
console_log(
str(value_error), "error", "ValueError")
sys.exit()
# Get transaction hash
tx_hash = contract_factory.constructor().transact()
address = self.web3.eth.getTransactionReceipt(tx_hash)['contractAddress']
contract = {"abi": artifact['abi'], "bytecode": linked_bytecode}
contract_name_and_address = artifact['contractName'] + ":" + str(address)
self.contracts.setdefault(contract_name_and_address, contract)
def test_with_out_link(self, artifact):
try:
contract_factory = self.web3.eth.contract(abi=artifact['abi'],
bytecode=artifact['bin'])
except ValueError as valueError:
value_error = str(valueError.args.__getitem__(0))
if "'" in value_error and not self.more:
error = str(value_error).split("'")
console_log(str(error[0]), "error", "ValueError")
elif "'" in value_error and self.more:
console_log(
str(value_error), "error", "ValueError")
elif not self.more:
console_log(
str(value_error).strip('\n')[0], "error", "ValueError")
elif self.more:
console_log(
str(value_error), "error", "ValueError")
sys.exit()
# Get transaction hash
tx_hash = contract_factory.constructor().transact()
address = self.web3.eth.getTransactionReceipt(tx_hash)['contractAddress']
contract = {"abi": artifact['abi'], "bytecode": artifact['bin']}
contract_name_and_address = artifact['contractName'] + ":" + str(address)
self.contracts.setdefault(contract_name_and_address, contract)
| 42.801802 | 100 | 0.594401 | [
"MIT"
] | Koritsuki/cobra | cobra/test/interfaces.py | 4,751 | Python |
from django.contrib import admin
from .models import PhoneModel, Phone, Part, Storage, Device
admin.site.register(PhoneModel)
admin.site.register(Phone)
admin.site.register(Part)
admin.site.register(Storage)
admin.site.register(Device)
| 23.8 | 60 | 0.810924 | [
"Unlicense",
"MIT"
] | SashaPoraiko/academy-storage | storage/admin.py | 238 | Python |
connection = {
"id": "1",
"name": "sample name",
"resource_name": "resource name",
"schema_name": "salesforce",
"db_key": "DATABASE_URL",
"state": "IDLE",
"mappings": [{"id": "XYZ", "object_name": "Account", "state": "SCHEMA_CHANGED"}],
}
connections = {"count": 1, "results": [connection]}
| 26.666667 | 85 | 0.575 | [
"Apache-2.0"
] | Thermondo/django-heroku-connect | tests/fixtures.py | 320 | Python |
# -*- coding: utf-8 -*-
from raw._ebutts import *
| 16.666667 | 25 | 0.6 | [
"BSD-3-Clause"
] | ebu/ebu-tt-live-toolk | ebu_tt_live/bindings/_ebutts.py | 50 | Python |
import glob, os
from .exac_parser import load_data
import biothings.hub.dataload.uploader as uploader
from hub.dataload.uploader import SnpeffPostUpdateUploader
class ExacBaseUploader(SnpeffPostUpdateUploader):
__metadata__ = {"mapper" : 'observed',
"assembly" : "hg19",
"src_meta" : {
"url" : "http://exac.broadinstitute.org/",
"license" : "ODbL",
"license_url" : "http://exac.broadinstitute.org/terms",
"license_url_short": "https://goo.gl/MH8b34"
}
}
class ExacUploader(ExacBaseUploader):
name = "exac"
main_source= "exac"
def load_data(self,data_folder):
content = glob.glob(os.path.join(data_folder,"ExAC.r*.vcf"))
if len(content) != 1:
raise uploader.ResourceError("Expecting one single vcf file, got: %s" % repr(content))
input_file = content.pop()
self.logger.info("Load data from file '%s'" % input_file)
return load_data(self.__class__.name, input_file)
@classmethod
def get_mapping(klass):
mapping = {
"exac" : {
"properties": {
"chrom": {
"type": "text",
"analyzer": "string_lowercase"
},
"pos": {
"type": "long"
},
"ref": {
"type": "text",
"analyzer": "string_lowercase"
},
"filter": {
"type": "text",
"analyzer": "string_lowercase"
},
"alt": {
"type": "text",
"analyzer": "string_lowercase"
},
"multi-allelic": {
"type": "text",
"analyzer": "string_lowercase"
},
"alleles": {
"type": "text",
"analyzer": "string_lowercase"
},
"type": {
"type": "text",
"analyzer": "string_lowercase"
},
"qual": {
"type": "float"
},
"filter": {
"type": "text",
"analyzer": "string_lowercase"
},
"ac": {
"properties": {
"ac": {
"type": "integer"
},
"ac_afr": {
"type": "integer"
},
"ac_amr": {
"type": "integer"
},
"ac_adj": {
"type": "integer"
},
"ac_eas": {
"type": "integer"
},
"ac_fin": {
"type": "integer"
},
"ac_nfe": {
"type": "integer"
},
"ac_oth": {
"type": "integer"
},
"ac_sas": {
"type": "integer"
},
"ac_male": {
"type": "integer"
},
"ac_female": {
"type": "integer"
},
"ac_hom": {
"type": "integer"
}
}
},
"af": {
"type": "float"
},
"an": {
"properties": {
"an": {
"type": "integer"
},
"an_afr": {
"type": "integer"
},
"an_amr": {
"type": "integer"
},
"an_adj": {
"type": "integer"
},
"an_eas": {
"type": "integer"
},
"an_fin": {
"type": "integer"
},
"an_nfe": {
"type": "integer"
},
"an_oth": {
"type": "integer"
},
"an_sas": {
"type": "integer"
},
"an_female": {
"type": "integer"
},
"an_male": {
"type": "integer"
}
}
},
"baseqranksum": {
"type": "float"
},
"clippingranksum": {
"type": "float"
},
"fs": {
"type": "float"
},
"dp": {
"type": "long"
},
"het": {
"properties": {
"het_afr": {
"type": "integer"
},
"het_amr": {
"type": "integer"
},
"het_eas": {
"type": "integer"
},
"het_fin": {
"type": "integer"
},
"het_nfe": {
"type": "integer"
},
"het_oth": {
"type": "integer"
},
"het_sas": {
"type": "integer"
},
"ac_het": {
"type": "integer"
}
}
},
"hom": {
"properties": {
"hom_afr": {
"type": "integer"
},
"hom_amr": {
"type": "integer"
},
"hom_eas": {
"type": "integer"
},
"hom_fin": {
"type": "integer"
},
"hom_nfe": {
"type": "integer"
},
"hom_oth": {
"type": "integer"
},
"hom_sas": {
"type": "integer"
}
}
},
"inbreedingcoeff": {
"type": "float"
},
"mq": {
"properties": {
"mq": {
"type": "float"
},
"mq0": {
"type": "integer"
},
"mqranksum": {
"type": "float"
}
}
},
"ncc": {
"type": "long"
},
"qd": {
"type": "float"
},
"readposranksum": {
"type": "float"
},
"vqslod": {
"type": "float"
},
"culprit": {
"type": "text",
"analyzer": "string_lowercase"
}
}
},
}
return mapping
class ExacNonTCGAUploader(ExacBaseUploader):
name = "exac_nontcga"
main_source= "exac"
def load_data(self,data_folder):
content = glob.glob(os.path.join(data_folder,"ExAC_nonTCGA.r*.vcf"))
if len(content) != 1:
raise uploader.ResourceError("Expecting one single vcf file, got: %s" % repr(content))
input_file = content.pop()
self.logger.info("Load data from file '%s'" % input_file)
return load_data(self.__class__.name, input_file)
@classmethod
def get_mapping(klass):
mapping = {
"exac_nontcga": {
"properties": {
"chrom": {
"type": "text",
"analyzer": "string_lowercase"
},
"pos": {
"type": "long"
},
"ref": {
"type": "text",
"analyzer": "string_lowercase"
},
"alt": {
"type": "text",
"analyzer": "string_lowercase"
},
"multi-allelic": {
"type": "text",
"analyzer": "string_lowercase"
},
"alleles": {
"type": "text",
"analyzer": "string_lowercase"
},
"type": {
"type": "text",
"analyzer": "string_lowercase"
},
"qual": {
"type": "float"
},
"filter": {
"type": "text",
"analyzer": "string_lowercase"
},
"ac": {
"properties": {
"ac": {
"type": "integer"
},
"ac_afr": {
"type": "integer"
},
"ac_amr": {
"type": "integer"
},
"ac_adj": {
"type": "integer"
},
"ac_eas": {
"type": "integer"
},
"ac_fin": {
"type": "integer"
},
"ac_nfe": {
"type": "integer"
},
"ac_oth": {
"type": "integer"
},
"ac_sas": {
"type": "integer"
},
"ac_male": {
"type": "integer"
},
"ac_female": {
"type": "integer"
},
"ac_hom": {
"type": "integer"
}
}
},
"af": {
"type": "float"
},
"an": {
"properties": {
"an": {
"type": "integer"
},
"an_afr": {
"type": "integer"
},
"an_amr": {
"type": "integer"
},
"an_adj": {
"type": "integer"
},
"an_eas": {
"type": "integer"
},
"an_fin": {
"type": "integer"
},
"an_nfe": {
"type": "integer"
},
"an_oth": {
"type": "integer"
},
"an_sas": {
"type": "integer"
},
"an_female": {
"type": "integer"
},
"an_male": {
"type": "integer"
}
}
},
"baseqranksum": {
"type": "float"
},
"clippingranksum": {
"type": "float"
},
"fs": {
"type": "float"
},
"dp": {
"type": "long"
},
"het": {
"properties": {
"het_afr": {
"type": "integer"
},
"het_amr": {
"type": "integer"
},
"het_eas": {
"type": "integer"
},
"het_fin": {
"type": "integer"
},
"het_nfe": {
"type": "integer"
},
"het_oth": {
"type": "integer"
},
"het_sas": {
"type": "integer"
},
"ac_het": {
"type": "integer"
}
}
},
"hom": {
"properties": {
"hom_afr": {
"type": "integer"
},
"hom_amr": {
"type": "integer"
},
"hom_eas": {
"type": "integer"
},
"hom_fin": {
"type": "integer"
},
"hom_nfe": {
"type": "integer"
},
"hom_oth": {
"type": "integer"
},
"hom_sas": {
"type": "integer"
}
}
},
"inbreedingcoeff": {
"type": "float"
},
"mq": {
"properties": {
"mq": {
"type": "float"
},
"mq0": {
"type": "integer"
},
"mqranksum": {
"type": "float"
}
}
},
"ncc": {
"type": "long"
},
"qd": {
"type": "float"
},
"readposranksum": {
"type": "float"
},
"vqslod": {
"type": "float"
},
"culprit": {
"type": "text",
"analyzer": "string_lowercase"
}
}
}
}
return mapping
| 36.25355 | 98 | 0.205002 | [
"Apache-2.0"
] | raymond301/myvariant.info | src/hub/dataload/sources/exac/exac_upload.py | 17,873 | Python |
"""
"run_HAC.py" executes the training schedule for the agent. By default, the agent will alternate between exploration and testing phases. The number of episodes in the exploration phase can be configured in section 3 of "design_agent_and_env2.py" file. If the user prefers to only explore or only test, the user can enter the command-line options ""--train_only" or "--test", respectively. The full list of command-line options is available in the "options.py" file.
"""
import pickle as cpickle
import agent as Agent
from utils import print_summary
NUM_BATCH = 1000
TEST_FREQ = 2
num_test_episodes = 100
def run_HAC(FLAGS,env,agent):
# Print task summary
print_summary(FLAGS,env)
# Determine training mode. If not testing and not solely training, interleave training and testing to track progress
mix_train_test = False
if not FLAGS.test and not FLAGS.train_only:
mix_train_test = True
for batch in range(NUM_BATCH):
num_episodes = agent.other_params["num_exploration_episodes"]
# Evaluate policy every TEST_FREQ batches if interleaving training and testing
if mix_train_test and batch % TEST_FREQ == 0:
print("\n--- TESTING ---")
agent.FLAGS.test = True
num_episodes = num_test_episodes
# Reset successful episode counter
successful_episodes = 0
for episode in range(num_episodes):
print("\nBatch %d, Episode %d" % (batch, episode))
# Train for an episode
success = agent.train(env, episode)
if success:
print("Batch %d, Episode %d End Goal Achieved\n" % (batch, episode))
# Increment successful episode counter if applicable
if mix_train_test and batch % TEST_FREQ == 0:
successful_episodes += 1
# Save agent
agent.save_model(episode)
# Finish evaluating policy if tested prior batch
if mix_train_test and batch % TEST_FREQ == 0:
# Log performance
success_rate = successful_episodes / num_test_episodes * 100
print("\nTesting Success Rate %.2f%%" % success_rate)
agent.log_performance(success_rate)
agent.FLAGS.test = False
print("\n--- END TESTING ---\n")
| 37.84375 | 468 | 0.631296 | [
"MIT"
] | erick-alv/Hierarchical-Actor-Critc-HAC- | run_HAC.py | 2,422 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
from marionette.by import By
from gaiatest.apps.base import Base
class Ftu(Base):
name = 'FTU'
_next_button_locator = (By.ID, 'forward')
# Step Languages section
_section_languages_locator = (By.ID, 'languages')
_listed_languages_locator = (By.CSS_SELECTOR, "#languages ul li input[name='language.current']")
_language_locator = (By.CSS_SELECTOR, "#languages ul li input[name='language.current'][value='%s'] ~ p")
_language_input_locator = (By.CSS_SELECTOR,
"#languages ul li input[name='language.current'][value='%s']")
_selected_language_input_locator = (By.CSS_SELECTOR, "#languages ul li input:checked")
# Step Cell data section
_section_cell_data_locator = (By.ID, 'data_3g')
_enable_data_checkbox_locator = (By.ID, 'data-connection-switch')
# Step Wifi
_section_wifi_locator = (By.ID, 'wifi')
_found_wifi_networks_locator = (By.CSS_SELECTOR, 'ul#networks-list li')
_password_input_locator = (By.ID, 'wifi_password')
_join_network_locator = (By.ID, 'wifi-join-button')
_progress_activity_locator = (By.ID, 'progress-activity')
# Step Date & Time
_section_date_time_locator = (By.ID, 'date_and_time')
_timezone_continent_locator = (By.CSS_SELECTOR, '#time-form li:nth-child(1) > .change.icon.icon-dialog')
_timezone_city_locator = (By.CSS_SELECTOR, '#time-form li:nth-child(2) > .change.icon.icon-dialog')
_time_zone_title_locator = (By.ID, 'time-zone-title')
# Step Geolocation
_section_geolocation_locator = (By.ID, 'geolocation')
_enable_geolocation_checkbox_locator = (By.CSS_SELECTOR, '#geolocation-switch > label')
# Section Import contacts
_section_import_contacts_locator = (By.ID, 'import_contacts')
_import_from_sim_locator = (By.ID, 'sim-import-button')
_sim_import_feedback_locator = (By.ID, 'statusMsg')
# Step Firefox Accounts
_section_firefox_accounts_locator = (By.ID, 'firefox_accounts')
# Section Welcome Browser
_section_welcome_browser_locator = (By.ID, 'welcome_browser')
_enable_statistic_checkbox_locator = (By.ID, 'form_share_statistics')
_statistic_checkbox_locator = (By.ID, 'share-performance')
# Section Privacy Choices
_section_browser_privacy_locator = (By.ID, 'browser_privacy')
_email_field_locator = (By.CSS_SELECTOR, 'input[type="email"]')
# Section Finish
_section_finish_locator = (By.ID, 'finish-screen')
_skip_tour_button_locator = (By.ID, 'skip-tutorial-button')
_take_tour_button_locator = (By.ID, 'lets-go-button')
# Section Tour
_step_header_locator = (By.ID, 'tutorial-step-title')
_tour_next_button_locator = (By.ID, 'forward-tutorial')
_tour_back_button_locator = (By.ID, 'back-tutorial')
# Section Tutorial Finish
_section_tutorial_finish_locator = (By.ID, 'tutorial-finish-tiny')
_lets_go_button_locator = (By.ID, 'tutorialFinished')
# Pattern for import sim contacts message
_pattern_contacts = re.compile("^No contacts detected on SIM to import$|^Imported one contact$|^Imported [0-9]+ contacts$")
_pattern_contacts_0 = re.compile("^No contacts detected on SIM to import$")
_pattern_contacts_1 = re.compile("^Imported one contact$")
_pattern_contacts_N = re.compile("^Imported ([0-9]+) contacts$")
def launch(self):
Base.launch(self)
self.wait_for_element_displayed(*self._section_languages_locator)
@property
def languages_list(self):
return len(self.marionette.find_elements(*self._listed_languages_locator))
@property
def selected_language(self):
return self.marionette.find_element(*self._selected_language_input_locator).get_attribute(
'value')
def tap_language(self, language):
self.marionette.find_element(self._language_locator[0], self._language_locator[1] % language).tap()
def a11y_click_language(self, language):
self.accessibility.click(self.marionette.find_element(self._language_input_locator[0],
self._language_input_locator[1] % language))
def tap_next(self):
self.marionette.find_element(*self._next_button_locator).tap()
def a11y_click_next(self):
self.accessibility.click(self.marionette.find_element(*self._next_button_locator))
def tap_next_to_cell_data_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_cell_data_locator)
def a11y_click_next_to_cell_data_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_cell_data_locator)
def enable_data(self):
self.wait_for_element_displayed(*self._enable_data_checkbox_locator)
self.marionette.find_element(*self._enable_data_checkbox_locator).tap()
def a11y_enable_data(self):
self.wait_for_element_displayed(*self._enable_data_checkbox_locator)
self.accessibility.click(self.marionette.find_element(*self._enable_data_checkbox_locator))
def tap_next_to_wifi_section(self):
self.tap_next()
self.wait_for_condition(lambda m: not self.is_element_displayed(*self._progress_activity_locator))
self.wait_for_element_displayed(*self._section_wifi_locator)
def a11y_click_next_to_wifi_section(self):
self.a11y_click_next()
self.wait_for_condition(lambda m: not self.is_element_displayed(
*self._progress_activity_locator))
self.wait_for_element_displayed(*self._section_wifi_locator)
def wait_for_networks_available(self):
self.wait_for_condition(lambda m: len(m.find_elements(*self._found_wifi_networks_locator)) > 0, message='No networks listed on screen')
def find_wifi_network(self, network_ssid):
wifi_network_locator = (By.CSS_SELECTOR, '#networks-list li[data-ssid="%s"]' % network_ssid)
wifi_network = self.wait_for_element_present(*wifi_network_locator)
self.marionette.execute_script("arguments[0].scrollIntoView(false);", [wifi_network])
return wifi_network
def connect_to_wifi(self, network_ssid, password, key_management=None):
wifi_network = self.find_wifi_network(network_ssid)
wifi_network.tap()
# This is in the event we are using a Wifi Network that requires a password
# We cannot be sure of this thus need the logic
if key_management:
self.wait_for_element_displayed(*self._password_input_locator)
self.marionette.find_element(*self._password_input_locator).send_keys(password)
self.marionette.find_element(*self._join_network_locator).tap()
def a11y_connect_to_wifi(self, network_ssid, password, key_management=None):
wifi_network = self.find_wifi_network(network_ssid)
self.accessibility.click(wifi_network)
# This is in the event we are using a Wifi Network that requires a password
# We cannot be sure of this thus need the logic
if key_management:
self.wait_for_element_displayed(*self._password_input_locator)
self.marionette.find_element(*self._password_input_locator).send_keys(password)
self.accessibility.click(self.marionette.find_element(*self._join_network_locator))
def tap_next_to_timezone_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_date_time_locator)
def a11y_click_next_to_timezone_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_date_time_locator)
def set_timezone_continent(self, continent):
self.wait_for_element_displayed(*self._timezone_continent_locator)
self.marionette.find_element(*self._timezone_continent_locator).tap()
self.select(continent)
def a11y_set_timezone_continent(self, continent):
self.wait_for_element_displayed(*self._timezone_continent_locator)
self.accessibility.click(self.marionette.find_element(*self._timezone_continent_locator))
self.a11y_select(continent)
def set_timezone_city(self, city):
self.wait_for_element_displayed(*self._timezone_city_locator)
self.marionette.find_element(*self._timezone_city_locator).tap()
self.select(city)
def a11y_set_timezone_city(self, city):
self.wait_for_element_displayed(*self._timezone_city_locator)
self.accessibility.click(self.marionette.find_element(*self._timezone_city_locator))
self.a11y_select(city)
@property
def timezone_title(self):
return self.marionette.find_element(*self._time_zone_title_locator).text
def tap_next_to_geolocation_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_geolocation_locator)
def a11y_click_next_to_geolocation_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_geolocation_locator)
def disable_geolocation(self):
self.wait_for_element_displayed(*self._enable_geolocation_checkbox_locator)
# TODO: Remove y parameter when Bug 932804 is fixed
self.marionette.find_element(*self._enable_geolocation_checkbox_locator).tap(y=30)
def a11y_disable_geolocation(self):
self.wait_for_element_displayed(*self._enable_geolocation_checkbox_locator)
self.accessibility.click(self.marionette.find_element(
*self._enable_geolocation_checkbox_locator))
def tap_next_to_import_contacts_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_import_contacts_locator)
def a11y_click_next_to_import_contacts_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_import_contacts_locator)
def tap_import_from_sim(self):
self.marionette.find_element(*self._import_from_sim_locator).tap()
def wait_for_contacts_imported(self):
self.wait_for_condition(lambda m: self._pattern_contacts.match(m.find_element(*self._sim_import_feedback_locator).text) is not None,
message='Contact did not import from sim before timeout')
@property
def count_imported_contacts(self):
import_sim_message = self.marionette.find_element(*self._sim_import_feedback_locator).text
import_sim_count = None
if self._pattern_contacts_0.match(import_sim_message) is not None:
import_sim_count = 0
elif self._pattern_contacts_1.match(import_sim_message) is not None:
import_sim_count = 1
elif self._pattern_contacts_N.match(import_sim_message) is not None:
count = self._pattern_contacts_N.match(import_sim_message).group(1)
import_sim_count = int(count)
return import_sim_count
def tap_next_to_firefox_accounts_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_firefox_accounts_locator)
def a11y_click_next_to_firefox_accounts_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_firefox_accounts_locator)
def tap_next_to_welcome_browser_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_welcome_browser_locator)
def a11y_click_next_to_welcome_browser_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_welcome_browser_locator)
def tap_statistics_checkbox(self):
self.marionette.find_element(*self._enable_statistic_checkbox_locator).tap()
def a11y_click_statistics_checkbox(self):
self.accessibility.click(self.marionette.find_element(*self._statistic_checkbox_locator))
def tap_next_to_privacy_browser_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_browser_privacy_locator)
def a11y_click_next_to_privacy_browser_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_browser_privacy_locator)
def enter_email_address(self, email):
# TODO assert that this is preserved in the system somewhere. Currently it is not used
self.marionette.find_element(*self._email_field_locator).send_keys(email)
def tap_next_to_finish_section(self):
self.tap_next()
self.wait_for_element_displayed(*self._section_finish_locator)
def a11y_click_next_to_finish_section(self):
self.a11y_click_next()
self.wait_for_element_displayed(*self._section_finish_locator)
def tap_skip_tour(self):
self.marionette.find_element(*self._skip_tour_button_locator).tap()
def a11y_click_skip_tour(self):
self.accessibility.click(self.marionette.find_element(*self._skip_tour_button_locator))
def run_ftu_setup_with_default_values(self):
count =0
while not self.is_element_displayed(*self._take_tour_button_locator):
if self.is_element_displayed(*self._next_button_locator):
self.tap_next()
else:
count=count+1
if count > 5:
break
def tap_take_tour(self):
self.marionette.find_element(*self._take_tour_button_locator).tap()
@property
def step1_header_text(self):
self.wait_for_element_displayed(*self._step_header_locator)
return self.marionette.find_element(*self._step_header_locator).text
def tap_tour_next(self):
self.wait_for_element_displayed(*self._tour_next_button_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
def tap_back(self):
self.wait_for_element_displayed(*self._tour_next_button_locator)
self.marionette.find_element(*self._tour_back_button_locator).tap()
@property
def step2_header_text(self):
self.wait_for_element_displayed(*self._step_header_locator)
return self.marionette.find_element(*self._step_header_locator).text
@property
def step3_header_text(self):
self.wait_for_element_displayed(*self._step_header_locator)
return self.marionette.find_element(*self._step_header_locator).text
@property
def step4_header_text(self):
self.wait_for_element_displayed(*self._step_header_locator)
return self.marionette.find_element(*self._step_header_locator).text
@property
def step5_header_text(self):
self.wait_for_element_displayed(*self._step_header_locator)
return self.marionette.find_element(*self._step_header_locator).text
@property
def step6_header_text(self):
self.wait_for_element_displayed(*self._step_header_locator)
return self.marionette.find_element(*self._step_header_locator).text
def wait_for_finish_tutorial_section(self):
self.wait_for_element_displayed(*self._section_tutorial_finish_locator)
def tap_lets_go_button(self):
self.marionette.find_element(*self._lets_go_button_locator).tap()
| 43.454286 | 143 | 0.738444 | [
"Apache-2.0"
] | EragonJ/gaia | tests/python/gaia-ui-tests/gaiatest/apps/ftu/app.py | 15,209 | Python |
import json
import os
from pyramid.settings import aslist
from kinto.core.decorators import cache_forever
HERE = os.path.abspath(os.path.dirname(__file__))
# Configured home page
@cache_forever
def admin_home_view(request):
settings = {
"authMethods": aslist(request.registry.settings.get('multiauth.policies'))
}
globalSettings = "<script>window.globalSettings = %s;</script>" % json.dumps(settings)
# Update the file built by react-scripts to load the globalSettings.
with open(os.path.join(HERE, 'build/index.html')) as f:
return f.read().replace('<script', globalSettings + '<script')
| 28.681818 | 90 | 0.721078 | [
"Apache-2.0"
] | g-k/kinto | kinto/plugins/admin/views.py | 631 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import unittest
import mock
import os
import time
import tempfile
import requests
import datetime
from azure_devtools.scenario_tests import AllowLargeResponse, record_only
from azure.cli.testsdk import (ScenarioTest, LocalContextScenarioTest, LiveScenarioTest, ResourceGroupPreparer,
StorageAccountPreparer, JMESPathCheck, live_only)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# pylint: disable=line-too-long
# In the future, for any reasons the repository get removed, the source code is under "sample-repo-for-deployment-test"
# you can use to rebuild the repository
TEST_REPO_URL = 'https://github.com/yugangw-msft/azure-site-test.git'
WINDOWS_ASP_LOCATION_WEBAPP = 'japanwest'
WINDOWS_ASP_LOCATION_FUNCTIONAPP = 'francecentral'
LINUX_ASP_LOCATION_WEBAPP = 'eastus2'
LINUX_ASP_LOCATION_FUNCTIONAPP = 'ukwest'
class WebappBasicE2ETest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_e2e(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-e2e', length=24)
plan = self.create_random_name(prefix='webapp-e2e-plan', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].perSiteScaling', False)
])
# test idempotency
self.cmd(
'appservice plan create -g {} -n {} --per-site-scaling'.format(resource_group, plan))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].sku.tier', 'Basic'),
JMESPathCheck('[0].sku.name', 'B1'),
JMESPathCheck('[0].perSiteScaling', True)
])
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck("length([?name=='{}' && resourceGroup=='{}'])".format(
plan, resource_group), 1)
])
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('name', plan)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp_name, plan)) # test idempotency
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', webapp_name),
JMESPathCheck('[0].hostNames[0]', webapp_name +
'.azurewebsites.net')
])
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
result = self.cmd('webapp deployment source config-local-git -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
self.assertTrue(result['url'].endswith(webapp_name + '.git'))
self.cmd('webapp deployment source show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck(
'repoUrl', 'https://{}.scm.azurewebsites.net'.format(webapp_name))
])
# turn on diagnostics
test_cmd = ('webapp log config -g {} -n {} --level verbose'.format(resource_group, webapp_name) + ' '
'--application-logging true --detailed-error-messages true --failed-request-tracing true --web-server-logging filesystem')
self.cmd(test_cmd)
self.cmd('webapp log show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('detailedErrorMessages.enabled', True),
JMESPathCheck('failedRequestsTracing.enabled', True)
])
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('detailedErrorLoggingEnabled', True),
JMESPathCheck('httpLoggingEnabled', True),
JMESPathCheck('scmType', 'LocalGit'),
JMESPathCheck('requestTracingEnabled', True)
# TODO: contact webapp team for where to retrieve 'level'
])
# show publish profile info
result = self.cmd('webapp deployment list-publishing-profiles -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
self.assertTrue(result[1]['publishUrl'].startswith('ftp://'))
self.cmd('webapp stop -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('state', 'Stopped'),
JMESPathCheck('name', webapp_name)
])
self.cmd('webapp start -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name)
])
# show publishing credentials
result = self.cmd('webapp deployment list-publishing-credentials -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
self.assertTrue('scm' in result['scmUri'])
# verify httpsOnly is false
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('httpsOnly', False),
])
# verify creating an non node app using --runtime
self.cmd(
'webapp create -g {} -n {} --plan {} -r "php|7.3"'.format(resource_group, webapp_name, plan))
# TODO: Bug #14409. Re-enable check after fixing https://github.com/Azure/azure-cli/issues/14409
# self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
# JMESPathCheck('phpVersion', '7.3')
# ])
def test_webapp_runtimes(self):
self.cmd('webapp list-runtimes')
class WebappQuickCreateTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_win_webapp_quick_create(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-quick', length=24)
plan = self.create_random_name(prefix='plan-quick', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
r = self.cmd('webapp create -g {} -n {} --plan {} --deployment-local-git'.format(
resource_group, webapp_name, plan)).get_output_in_json()
self.assertTrue(r['ftpPublishingUrl'].startswith('ftp://'))
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('[0].name', 'WEBSITE_NODE_DEFAULT_VERSION'),
JMESPathCheck('[0].value', '10.14'),
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_win_webapp_quick_create_runtime(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-quick', length=24)
plan = self.create_random_name(prefix='plan-quick', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
r = self.cmd('webapp create -g {} -n {} --plan {} --deployment-local-git -r "node|10.15"'.format(
resource_group, webapp_name, plan)).get_output_in_json()
self.assertTrue(r['ftpPublishingUrl'].startswith('ftp://'))
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('[0].name', 'WEBSITE_NODE_DEFAULT_VERSION'),
JMESPathCheck('[0].value', '10.14'),
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_win_webapp_quick_create_cd(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-quick-cd', length=24)
plan = self.create_random_name(prefix='plan-quick', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --deployment-source-url {} -r "node|10.15"'.format(
resource_group, webapp_name, plan, TEST_REPO_URL))
# 30 seconds should be enough for the deployment finished(Skipped under playback mode)
time.sleep(30)
r = requests.get('http://{}.azurewebsites.net'.format(webapp_name))
# verify the web page
self.assertTrue('Hello world' in str(r.content))
@ResourceGroupPreparer(location='canadacentral')
def test_linux_webapp_quick_create(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-quick-linux', length=24)
plan = self.create_random_name(prefix='plan-quick-linux', length=24)
self.cmd(
'appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -i patle/ruby-hello'.format(
resource_group, webapp_name, plan))
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Ruby on Rails in Web Apps on Linux' in str(r.content))
# verify app settings
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('[0].name', 'WEBSITES_ENABLE_APP_SERVICE_STORAGE'),
JMESPathCheck('[0].value', 'false'),
])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_multicontainer_create(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-linux-multi', length=24)
plan = self.create_random_name(prefix='plan-linux-multi', length=24)
config_file = os.path.join(TEST_DIR, 'sample-compose.yml')
self.cmd(
'appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd("webapp create -g {} -n {} --plan {} --multicontainer-config-file \"{}\" "
"--multicontainer-config-type COMPOSE".format(resource_group, webapp_name, plan, config_file))
last_number_seen = 99999999
for x in range(0, 10):
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Hello World! I have been seen' in str(r.content))
current_number = [int(s)
for s in r.content.split() if s.isdigit()][0]
self.assertNotEqual(current_number, last_number_seen)
last_number_seen = current_number
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_quick_create_cd(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-linux-cd', length=24)
plan = 'plan-quick-linux-cd'
self.cmd(
'appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -u {} -r "node|10.14"'.format(resource_group, webapp_name,
plan, TEST_REPO_URL))
# 45 seconds should be enough for the deployment finished(Skipped under playback mode)
time.sleep(45)
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
if 'Hello world' not in str(r.content):
# dump out more info for diagnose
self.fail(
"'Hello world' is not found in the web page. We get instead:" + str(r.content))
@ResourceGroupPreparer(parameter_name='resource_group', parameter_name_for_location='resource_group_location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(parameter_name='resource_group2', parameter_name_for_location='resource_group_location2', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_create_in_different_group(self, resource_group, resource_group_location, resource_group2, resource_group_location2):
plan = 'planInOneRG'
self.cmd('group create -n {} -l {}'.format(resource_group2,
resource_group_location))
plan_id = self.cmd('appservice plan create -g {} -n {}'.format(
resource_group, plan)).get_output_in_json()['id']
self.cmd('webapp create -g {} -n webInOtherRG --plan {}'.format(resource_group2, plan_id), checks=[
JMESPathCheck('name', 'webInOtherRG')
])
class BackupWithName(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_backup_with_name(self, resource_group):
plan = self.create_random_name(prefix='plan-backup', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan))
webapp = self.create_random_name(prefix='backup-webapp', length=24)
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp, plan))
storage_Account = self.create_random_name(prefix='backup', length=24)
self.cmd('storage account create -n {} -g {} --location {}'.format(storage_Account, resource_group, WINDOWS_ASP_LOCATION_WEBAPP))
container = self.create_random_name(prefix='backupcontainer', length=24)
self.cmd('storage container create --account-name {} --name {}'.format(storage_Account, container))
expirydate = (datetime.datetime.now() + datetime.timedelta(days=1, hours=3)).strftime("\"%Y-%m-%dT%H:%MZ\"")
sastoken = self.cmd('storage container generate-sas --account-name {} --name {} --expiry {} --permissions rwdl'.format(storage_Account, container, expirydate))
sasurl = '\"https://{}.blob.core.windows.net/{}?{}\"'.format(storage_Account, container, sastoken)
backup_name = self.create_random_name(prefix='backup-name', length=24)
self.cmd('webapp config backup create -g {} --webapp-name {} --backup-name {} --container-url {}'.format(resource_group, webapp, backup_name, sasurl), checks=[
JMESPathCheck('backupItemName', backup_name)
])
# Test Framework is not able to handle binary file format, hence, only run live
class AppServiceLogTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_download_win_web_log(self, resource_group):
import zipfile
webapp_name = self.create_random_name(
prefix='webapp-win-log', length=24)
plan = self.create_random_name(prefix='win-log', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --deployment-source-url {} -r "node|10.15"'.format(
resource_group, webapp_name, plan, TEST_REPO_URL))
# 30 seconds should be enough for the deployment finished(Skipped under playback mode)
time.sleep(30)
# sanity check the traces
_, log_file = tempfile.mkstemp()
log_dir = log_file + '-dir'
self.cmd('webapp log download -g {} -n {} --log-file "{}"'.format(
resource_group, webapp_name, log_file))
zip_ref = zipfile.ZipFile(log_file, 'r')
zip_ref.extractall(log_dir)
self.assertTrue(os.path.isdir(os.path.join(
log_dir, 'LogFiles', 'kudu', 'trace')))
@unittest.skip("Cannot pass under python3. Needs fixing.")
@ResourceGroupPreparer(location='canadacentral')
def test_download_linux_web_log(self, resource_group):
import zipfile
webapp_name = self.create_random_name(
prefix='webapp-linux-log', length=24)
plan = self.create_random_name(prefix='linux-log', length=24)
self.cmd('appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -i patle/ruby-hello'.format(
resource_group, webapp_name, plan))
# load the site to produce a few traces
requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# sanity check the traces
_, log_file = tempfile.mkstemp()
log_dir = log_file + '-dir'
self.cmd('webapp log download -g {} -n {} --log-file "{}"'.format(
resource_group, webapp_name, log_file))
zip_ref = zipfile.ZipFile(log_file, 'r')
zip_ref.extractall(log_dir)
self.assertTrue(os.path.isdir(os.path.join(
log_dir, 'LogFiles', 'kudu', 'trace')))
class AppServicePlanScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_retain_plan(self, resource_group):
webapp_name = self.create_random_name('web', 24)
plan = self.create_random_name('web-plan', 24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {} --keep-dns-registration --keep-empty-plan --keep-metrics'.format(resource_group, webapp_name))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].name', plan)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_auto_delete_plan(self, resource_group):
webapp_name = self.create_random_name('web-del-test', 24)
plan = self.create_random_name('web-del-plan', 24)
self.cmd(
'appservice plan create -g {} -n {} -l {}'.format(resource_group, plan, WINDOWS_ASP_LOCATION_WEBAPP))
self.cmd('appservice plan update -g {} -n {} --sku S1'.format(resource_group, plan),
checks=[JMESPathCheck('name', plan),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.name', 'S1')])
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
# test empty service plan should be automatically deleted.
self.cmd('appservice plan list -g {}'.format(resource_group),
checks=[JMESPathCheck('length(@)', 0)])
class WebappConfigureTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_config', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_config(self, resource_group):
webapp_name = self.create_random_name('webapp-config-test', 40)
plan_name = self.create_random_name('webapp-config-plan', 40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# verify the baseline
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('alwaysOn', True),
JMESPathCheck('autoHealEnabled', False),
JMESPathCheck('phpVersion', '5.6'),
JMESPathCheck('netFrameworkVersion', 'v4.0'),
JMESPathCheck('pythonVersion', ''),
JMESPathCheck('use32BitWorkerProcess', True),
JMESPathCheck('webSocketsEnabled', False),
JMESPathCheck('minTlsVersion', '1.2'),
JMESPathCheck('ftpsState', 'AllAllowed')])
# update and verify
checks = [
JMESPathCheck('alwaysOn', True),
JMESPathCheck('autoHealEnabled', True),
JMESPathCheck('phpVersion', '7.2'),
JMESPathCheck('netFrameworkVersion', 'v3.0'),
JMESPathCheck('pythonVersion', '3.4'),
JMESPathCheck('use32BitWorkerProcess', False),
JMESPathCheck('webSocketsEnabled', True),
JMESPathCheck('minTlsVersion', '1.0'),
JMESPathCheck('http20Enabled', True),
JMESPathCheck('ftpsState', 'Disabled')]
self.cmd('webapp config set -g {} -n {} --always-on true --auto-heal-enabled true --php-version 7.2 '
'--net-framework-version v3.5 --python-version 3.4 --use-32bit-worker-process=false '
'--web-sockets-enabled=true --http20-enabled true --min-tls-version 1.0 --ftps-state Disabled'.format(resource_group, webapp_name)).assert_with_checks(checks)
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)) \
.assert_with_checks(checks)
# site appsettings testing
# update through key value pairs
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=foo s2=bar s3=bar2'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck("length([?name=='s1'])", 1),
JMESPathCheck("length([?name=='s2'])", 1),
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?value=='foo'])", 1),
JMESPathCheck("length([?value=='bar'])", 1),
JMESPathCheck("length([?value=='bar2'])", 1)
])
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
# delete
self.cmd('webapp config appsettings delete -g {} -n {} --setting-names s1 s2'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?name=='s1'])", 0),
JMESPathCheck("length([?name=='s2'])", 0)])
# hostnames
self.cmd('webapp config hostname list -g {} --webapp-name {}'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', '{0}.azurewebsites.net'.format(webapp_name))])
# site azure storage account configurations tests
runtime = 'node|10.16'
linux_plan = self.create_random_name(
prefix='webapp-linux-plan', length=24)
linux_webapp = self.create_random_name(
prefix='webapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} -l eastus2 --sku S1 --is-linux'.format(resource_group, linux_plan),
checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(resource_group, linux_webapp, linux_plan, runtime),
checks=[
JMESPathCheck('name', linux_webapp),
])
# add
self.cmd(('webapp config storage-account add -g {} -n {} --custom-id Id --storage-type AzureFiles --account-name name '
'--share-name sharename --access-key key --mount-path /path/to/mount').format(resource_group, linux_webapp))
self.cmd('webapp config storage-account list -g {} -n {}'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck("[?name=='Id']|[0].value.type", "AzureFiles"),
JMESPathCheck("[?name=='Id']|[0].value.accountName", "name"),
JMESPathCheck("[?name=='Id']|[0].value.shareName", "sharename"),
JMESPathCheck("[?name=='Id']|[0].value.accessKey", "key"),
JMESPathCheck("[?name=='Id']|[0].value.mountPath", "/path/to/mount")])
# update
self.cmd('webapp config storage-account update -g {} -n {} --custom-id Id --mount-path /different/path'
.format(resource_group, linux_webapp))
self.cmd('webapp config storage-account list -g {} -n {}'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck("length(@)", 1),
JMESPathCheck("[?name=='Id']|[0].value.type", "AzureFiles"),
JMESPathCheck("[?name=='Id']|[0].value.accountName", "name"),
JMESPathCheck("[?name=='Id']|[0].value.shareName", "sharename"),
JMESPathCheck("[?name=='Id']|[0].value.accessKey", "key"),
JMESPathCheck("[?name=='Id']|[0].value.mountPath", "/different/path")])
# list
self.cmd('webapp config storage-account list -g {} -n {}'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck("length(@)", 1),
JMESPathCheck('[0].name', 'Id')])
# delete
self.cmd('webapp config storage-account delete -g {} -n {} --custom-id Id'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck("length(@)", 0)])
# site connection string tests
self.cmd('webapp config connection-string set -t mysql -g {} -n {} --settings c1="conn1" c2=conn2 '
'--slot-settings c3=conn3'.format(resource_group, linux_webapp))
self.cmd('webapp config connection-string list -g {} -n {}'
.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck('length([])', 3),
JMESPathCheck("[?name=='c1']|[0].slotSetting", False),
JMESPathCheck("[?name=='c1']|[0].type", 'MySql'),
JMESPathCheck("[?name=='c1']|[0].value", 'conn1'),
JMESPathCheck("[?name=='c2']|[0].slotSetting", False),
JMESPathCheck("[?name=='c3']|[0].slotSetting", True)])
self.cmd('webapp config connection-string delete -g {} -n {} --setting-names c1 c3'
.format(resource_group, linux_webapp))
self.cmd('webapp config connection-string list -g {} -n {}'
.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].slotSetting', False),
JMESPathCheck('[0].name', 'c2')])
# see deployment user; just make sure the command does return something
self.assertTrue(
self.cmd('webapp deployment user show').get_output_in_json()['type'])
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_webapp_config_appsettings', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_config_appsettings(self, resource_group):
webapp_name = self.create_random_name('webapp-config-appsettings-test', 40)
plan_name = self.create_random_name('webapp-config-appsettings-plan', 40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# site appsettings testing
# update through key value pairs
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=foo s2=bar s3=bar2'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck("length([?name=='s1'])", 1),
JMESPathCheck("length([?name=='s2'])", 1),
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?value=='foo'])", 1),
JMESPathCheck("length([?value=='bar'])", 1),
JMESPathCheck("length([?value=='bar2'])", 1)
])
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
@ResourceGroupPreparer(name_prefix='cli_test_webapp_json', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_update_webapp_settings_thru_json(self, resource_group):
webapp_name = self.create_random_name('webapp-config-test', 40)
plan_name = self.create_random_name('webapp-config-plan', 40)
# update through a json file with key value pair
_, settings_file = tempfile.mkstemp()
with open(settings_file, 'w+') as file:
file.write(json.dumps({'s2': 'value2'}))
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
output = self.cmd('webapp config appsettings set -g {} -n {} --settings s=value "@{}"'.format(
resource_group, webapp_name, settings_file)).get_output_in_json()
output = [s for s in output if s['name'] in ['s', 's2']]
output.sort(key=lambda s: s['name'])
self.assertEqual(output[0], {
'name': 's',
'value': 'value',
'slotSetting': False
})
self.assertEqual(output[1], {
'name': 's2',
'value': 'value2',
'slotSetting': False
})
# output using the output of the set/list command
output.append({
'name': 's3',
'value': 'value3',
'slotSetting': True
})
with open(settings_file, 'w') as file:
file.write(json.dumps(output))
output = self.cmd('webapp config appsettings set -g {} -n {} --settings "@{}"'.format(
resource_group, webapp_name, settings_file)).get_output_in_json()
output = [s for s in output if s['name'] in ['s', 's2', 's3']]
output.sort(key=lambda s: s['name'])
self.assertEqual(output[0], {
'name': 's',
'value': 'value',
'slotSetting': False
})
self.assertEqual(output[1], {
'name': 's2',
'value': 'value2',
'slotSetting': False
})
self.assertEqual(output[2], {
'name': 's3',
'value': 'value3',
'slotSetting': True
})
# update site config
site_configs = {
"requestTracingEnabled": True,
"alwaysOn": True
}
with open(settings_file, 'w') as file:
file.write(json.dumps(site_configs))
self.cmd('webapp config set -g {} -n {} --generic-configurations "@{}"'.format(resource_group, webapp_name, settings_file)).assert_with_checks([
JMESPathCheck("requestTracingEnabled", True),
JMESPathCheck("alwaysOn", True),
])
class WebappScaleTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_scale(self, resource_group):
plan = self.create_random_name(prefix='scale-plan', length=24)
# start with shared sku
self.cmd('appservice plan create -g {} -n {} --sku SHARED'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'D1'),
JMESPathCheck('sku.tier', 'Shared'),
JMESPathCheck('sku.size', 'D1'),
JMESPathCheck('sku.family', 'D'),
# 0 means the default value: 1 instance
JMESPathCheck('sku.capacity', 0)
])
# scale up
self.cmd(
'appservice plan update -g {} -n {} --sku S2'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S2'),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.size', 'S2'),
JMESPathCheck('sku.family', 'S')
])
# scale down
self.cmd(
'appservice plan update -g {} -n {} --sku B1'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'B1'),
JMESPathCheck('sku.tier', 'Basic'),
JMESPathCheck('sku.size', 'B1'),
JMESPathCheck('sku.family', 'B')
])
# scale out
self.cmd(
'appservice plan update -g {} -n {} --number-of-workers 2'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'B1'),
JMESPathCheck('sku.tier', 'Basic'),
JMESPathCheck('sku.size', 'B1'),
JMESPathCheck('sku.family', 'B'),
JMESPathCheck('sku.capacity', 2)
])
class AppServiceBadErrorPolishTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group', location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(parameter_name='resource_group2', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_appservice_error_polish(self, resource_group, resource_group2):
plan = self.create_random_name(prefix='web-error-plan', length=24)
webapp_name = self.create_random_name(prefix='web-error', length=24)
self.cmd('group create -n {} -l {}'.format(resource_group2, WINDOWS_ASP_LOCATION_WEBAPP))
self.cmd(
'appservice plan create -g {} -n {} --sku b1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd(
'appservice plan create -g {} -n {} --sku b1'.format(resource_group2, plan))
# we will try to produce an error by try creating 2 webapp with same name in different groups
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group2,
webapp_name, plan), expect_failure=True)
# TODO: ensure test fx can capture error details for us to verify
# allowed_exceptions='Website with given name {} already exists'.format(webapp_name)
# this test doesn't contain the ultimate verification which you need to manually load the frontpage in a browser
class LinuxWebappScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp(self, resource_group):
runtime = 'node|10.16'
plan = self.create_random_name(prefix='webapp-linux-plan', length=24)
webapp = self.create_random_name(prefix='webapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(resource_group, webapp, plan, runtime), checks=[
JMESPathCheck('name', webapp),
])
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck('windowsFxVersion', None)
])
# workaround the fact that a new linux web's "kind" won't be settled instantatest_linux_webapp_remote_sshneously
time.sleep(30)
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', webapp),
JMESPathCheck('[0].kind', 'app,linux')
])
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck('name', webapp),
JMESPathCheck('kind', 'app,linux')
])
self.cmd('webapp config set -g {} -n {} --startup-file {}'.format(resource_group, webapp, 'process.json'), checks=[
JMESPathCheck('appCommandLine', 'process.json')
])
result = self.cmd('webapp deployment container config -g {} -n {} --enable-cd true'.format(
resource_group, webapp)).get_output_in_json()
self.assertTrue(result['CI_CD_URL'].startswith('https://'))
self.assertTrue(result['CI_CD_URL'].endswith(
'.scm.azurewebsites.net/docker/hook'))
result = self.cmd('webapp config container set -g {} -n {} --docker-custom-image-name {} --docker-registry-server-password {} --docker-registry-server-user {} --docker-registry-server-url {} --enable-app-service-storage {}'.format(
resource_group, webapp, 'foo-image', 'foo-password', 'foo-user', 'foo-url', 'false')).get_output_in_json()
self.assertEqual(set(x['value'] for x in result if x['name'] ==
'DOCKER_REGISTRY_SERVER_PASSWORD'), set([None])) # we mask the password
result = self.cmd('webapp config container show -g {} -n {} '.format(
resource_group, webapp)).get_output_in_json()
self.assertEqual(set(x['name'] for x in result), set(['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_CUSTOM_IMAGE_NAME', 'DOCKER_REGISTRY_SERVER_PASSWORD', 'WEBSITES_ENABLE_APP_SERVICE_STORAGE']))
self.assertEqual(set(x['value'] for x in result if x['name'] ==
'DOCKER_REGISTRY_SERVER_PASSWORD'), set([None])) # we mask the password
sample = next(
(x for x in result if x['name'] == 'DOCKER_REGISTRY_SERVER_URL'))
self.assertEqual(sample, {
'name': 'DOCKER_REGISTRY_SERVER_URL', 'slotSetting': False, 'value': 'foo-url'})
sample = next(
(x for x in result if x['name'] == 'WEBSITES_ENABLE_APP_SERVICE_STORAGE'))
self.assertEqual(sample, {
'name': 'WEBSITES_ENABLE_APP_SERVICE_STORAGE', 'slotSetting': False, 'value': 'false'})
self.cmd(
'webapp config container delete -g {} -n {}'.format(resource_group, webapp))
result2 = self.cmd('webapp config container show -g {} -n {} '.format(
resource_group, webapp)).get_output_in_json()
self.assertEqual(result2, [])
@unittest.skip('This is failing on windows OS. Rised a bug #12844 to fix in future releases')
class LinuxWebappSSHScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_ssh(self, resource_group):
# On Windows, test 'webapp ssh' throws error
import platform
if platform.system() == "Windows":
from azure.cli.core.util import CLIError
with self.assertRaises(CLIError):
self.cmd('webapp ssh -g {} -n {} --timeout 5'.format("foo", "bar"))
return
runtime = 'node|12-lts'
plan = self.create_random_name(prefix='webapp-ssh-plan', length=24)
webapp = self.create_random_name(prefix='webapp-ssh', length=24)
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
time.sleep(30)
requests.get('http://{}.azurewebsites.net'.format(webapp), timeout=240)
time.sleep(30)
self.cmd('webapp ssh -g {} -n {} --timeout 5'.format(resource_group, webapp))
time.sleep(30)
class LinuxWebappRemoteSSHScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_remote_ssh(self, resource_group):
runtime = 'node|12-lts'
plan = self.create_random_name(
prefix='webapp-remote-ssh-plan', length=40)
webapp = self.create_random_name(prefix='webapp-remote-ssh', length=40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
time.sleep(30)
requests.get('http://{}.azurewebsites.net'.format(webapp), timeout=240)
time.sleep(30)
self.cmd(
'webapp create-remote-connection -g {} -n {} --timeout 5'.format(resource_group, webapp))
time.sleep(30)
class LinuxWebappRemoteDebugScenarioTest(ScenarioTest):
@unittest.skip("Bug #14427. Re-enable test after fixing https://github.com/Azure/azure-cli/issues/14427")
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_remote_debug(self, resource_group):
runtime = 'node|12-lts'
plan = self.create_random_name(
prefix='webapp-remote-debug-plan', length=40)
webapp = self.create_random_name(
prefix='webapp-remote-debug', length=40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
time.sleep(30)
self.cmd(
'webapp config set --remote-debugging-enabled true -g {} -n {}'.format(resource_group, webapp))
requests.get('http://{}.azurewebsites.net'.format(webapp), timeout=240)
time.sleep(30)
self.cmd(
'webapp create-remote-connection -g {} -n {} --timeout 5'.format(resource_group, webapp))
time.sleep(30)
class LinuxWebappMulticontainerSlotScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_multicontainer_slot(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-linux-multi', length=24)
plan = self.create_random_name(prefix='plan-linux-multi', length=24)
config_file = os.path.join(TEST_DIR, 'sample-compose.yml')
slot = "stage"
slot_webapp_name = "{}-{}".format(webapp_name, slot)
slot_config_file = os.path.join(TEST_DIR, 'sample-compose-slot.yml')
self.cmd(
'appservice plan create -g {} -n {} --is-linux --sku S1'.format(resource_group, plan))
self.cmd("webapp create -g {} -n {} --plan {} --multicontainer-config-file \"{}\" "
"--multicontainer-config-type COMPOSE".format(resource_group, webapp_name, plan, config_file))
last_number_seen = 99999999
for x in range(0, 10):
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Hello World! I have been seen' in str(r.content))
current_number = [int(s)
for s in r.content.split() if s.isdigit()][0]
self.assertNotEqual(current_number, last_number_seen)
last_number_seen = current_number
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(
resource_group, webapp_name, slot))
self.cmd("webapp config container set -g {} -n {} --slot {} --multicontainer-config-file \"{}\" "
"--multicontainer-config-type COMPOSE".format(resource_group, webapp_name, slot, slot_config_file))
last_number_seen = 99999999
for x in range(0, 10):
r = requests.get(
'http://{}.azurewebsites.net'.format(slot_webapp_name), timeout=240)
# verify the web page
self.assertTrue(
'Hello from a slot! I have been seen' in str(r.content))
current_number = [int(s)
for s in r.content.split() if s.isdigit()][0]
self.assertNotEqual(current_number, last_number_seen)
last_number_seen = current_number
class WebappACRScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_acr_integration(self, resource_group):
plan = self.create_random_name(prefix='acrtestplan', length=24)
webapp = self.create_random_name(prefix='webappacrtest', length=24)
runtime = 'node|10.16'
acr_registry_name = webapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
creds = self.cmd('acr credential show -n {} -g {}'.format(
acr_registry_name, resource_group)).get_output_in_json()
self.cmd('webapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, webapp, acr_registry_name), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
class FunctionappACRScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='northeurope')
@StorageAccountPreparer()
@AllowLargeResponse()
def test_acr_integration_function_app(self, resource_group, storage_account):
plan = self.create_random_name(prefix='acrtestplanfunction', length=24)
functionapp = self.create_random_name(
prefix='functionappacrtest', length=24)
runtime = 'node'
acr_registry_name = functionapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('functionapp create -g {} -n {} -s {} --plan {} --runtime {}'.format(
resource_group, functionapp, storage_account, plan, runtime))
creds = self.cmd('acr credential show -n {} -g {}'.format(
acr_registry_name, resource_group)).get_output_in_json()
self.cmd('functionapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, functionapp, acr_registry_name), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
self.cmd('functionapp config container show -g {} -n {} '.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username']),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_URL']|[0].name", 'DOCKER_REGISTRY_SERVER_URL')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node'),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME'].value|[0]", creds['username'])
])
self.cmd(
'functionapp config container delete -g {} -n {} '.format(resource_group, functionapp))
json_result = self.cmd('functionapp config appsettings list -g {} -n {}'.format(
resource_group, functionapp)).get_output_in_json()
all_settings = [setting['name'] for setting in json_result]
# Make sure the related settings are deleted
self.assertNotIn('DOCKER_REGISTRY_SERVER_USERNAME', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_URL', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_PASSWORD', all_settings)
self.assertIn('FUNCTIONS_WORKER_RUNTIME', all_settings)
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
class FunctionAppCreateUsingACR(ScenarioTest):
@ResourceGroupPreparer(location='brazilsouth')
@StorageAccountPreparer(name_prefix='clitestacr')
@AllowLargeResponse()
def test_acr_create_function_app(self, resource_group, storage_account):
plan = self.create_random_name(prefix='acrtestplanfunction', length=24)
functionapp = self.create_random_name(
prefix='functionappacrtest', length=24)
runtime = 'node'
acr_registry_name = functionapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
acr_creds = self.cmd('acr credential show -n {} -g {}'.format(
acr_registry_name, resource_group)).get_output_in_json()
username = acr_creds['username']
password = acr_creds['passwords'][0]['value']
self.cmd(
'functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan))
self.cmd('functionapp create -g {} -n {} -s {} --plan {} --runtime {}'
' --deployment-container-image-name {}.azurecr.io/image-name:latest --docker-registry-server-user {}'
' --docker-registry-server-password {}'.format(resource_group, functionapp, storage_account, plan, runtime,
acr_registry_name, username, password))
self.cmd('functionapp config container show -g {} -n {} '.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", username),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_URL']|[0].name", 'DOCKER_REGISTRY_SERVER_URL')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node'),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME'].value|[0]", username)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'DOCKER|{}.azurecr.io/image-name:latest'.format(acr_registry_name))])
self.cmd(
'functionapp config container delete -g {} -n {} '.format(resource_group, functionapp))
json_result = self.cmd(
'functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).get_output_in_json()
all_settings = [setting['name'] for setting in json_result]
# Make sure the related settings are deleted
self.assertNotIn('DOCKER_REGISTRY_SERVER_USERNAME', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_URL', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_PASSWORD', all_settings)
self.assertIn('FUNCTIONS_WORKER_RUNTIME', all_settings)
class FunctionappACRDeploymentScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='brazilsouth')
@StorageAccountPreparer(name_prefix='clitestacrdeploy')
def test_acr_deployment_function_app(self, resource_group, storage_account):
plan = self.create_random_name(prefix='acrtestplanfunction', length=24)
functionapp = self.create_random_name(
prefix='functionappacrtest', length=24)
runtime = 'node'
acr_registry_name = functionapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('functionapp create -g {} -n {} -s {} --plan {} --runtime {}'.format(
resource_group, functionapp, storage_account, plan, runtime))
creds = self.cmd('acr credential show -g {} -n {}'.format(
resource_group, acr_registry_name)).get_output_in_json()
self.cmd('functionapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, functionapp, acr_registry_name), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
result = self.cmd('functionapp deployment container config -g {} -n {} --enable-cd true'.format(resource_group,
functionapp)).get_output_in_json()
self.assertTrue(result['CI_CD_URL'].startswith('https://'))
self.assertTrue(result['CI_CD_URL'].endswith(
'.scm.azurewebsites.net/docker/hook'))
# verify that show-cd-url works the same way
show_result = self.cmd('functionapp deployment container show-cd-url -g {} -n {}'.format(resource_group,
functionapp)).get_output_in_json()
self.assertTrue(show_result['CI_CD_URL'].startswith('https://'))
self.assertTrue(show_result['CI_CD_URL'].endswith(
'.scm.azurewebsites.net/docker/hook'))
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
class FunctionAppReservedInstanceTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_reserved_instance(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithreservedinstance', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config set -g {} -n {} --prewarmed-instance-count 4'
.format(resource_group, functionapp_name)).assert_with_checks([
JMESPathCheck('preWarmedInstanceCount', 4)])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
class WebappGitScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_git(self, resource_group):
plan = self.create_random_name(prefix='webapp-git-plan5', length=24)
webapp = self.create_random_name(prefix='web-git-test2', length=24)
# You can create and use any repros with the 3 files under "./sample_web"
test_git_repo = 'https://github.com/yugangw-msft/azure-site-test'
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp, plan))
self.cmd('webapp deployment source config -g {} -n {} --repo-url {} --branch {} --manual-integration'.format(resource_group, webapp, test_git_repo, 'master'), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('isMercurial', False),
JMESPathCheck('branch', 'master')
])
self.cmd('webapp deployment source show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('isMercurial', False),
JMESPathCheck('branch', 'master')
])
self.cmd(
'webapp deployment source delete -g {} -n {}'.format(resource_group, webapp))
self.cmd('webapp deployment source show -g {} -n {}'.format(resource_group, webapp),
checks=JMESPathCheck('repoUrl', None))
class WebappSlotScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_slot(self, resource_group):
plan = self.create_random_name(prefix='slot-test-plan', length=24)
webapp = self.create_random_name(prefix='slot-test-web', length=24)
plan_result = self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp, plan_result['name']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
slot2 = 'dev'
test_git_repo = 'https://github.com/yugangw-msft/azure-site-test'
test_php_version = '5.6'
# create a few app-settings to test they can be cloned
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=v1 --slot-settings s2=v2'.format(resource_group, webapp))
# create an empty slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck('name', slot)
])
self.cmd('webapp deployment source config -g {} -n {} --repo-url {} --branch {} -s {} --manual-integration'.format(resource_group, webapp, test_git_repo, slot, slot), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('branch', slot)
])
# swap with prod and verify the git branch also switched
self.cmd(
'webapp deployment slot swap -g {} -n {} -s {}'.format(resource_group, webapp, slot))
result = self.cmd('webapp config appsettings list -g {} -n {} -s {}'.format(
resource_group, webapp, slot)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 'WEBSITE_NODE_DEFAULT_VERSION']))
# create a new slot by cloning from prod slot
self.cmd('webapp config set -g {} -n {} --php-version {}'.format(
resource_group, webapp, test_php_version))
self.cmd('webapp deployment slot create -g {} -n {} --slot {} --configuration-source {}'.format(
resource_group, webapp, slot2, webapp))
self.cmd('webapp config show -g {} -n {} --slot {}'.format(resource_group, webapp, slot2), checks=[
JMESPathCheck("phpVersion", test_php_version),
])
self.cmd('webapp config appsettings set -g {} -n {} --slot {} --settings s3=v3 --slot-settings s4=v4'.format(resource_group, webapp, slot2), checks=[
JMESPathCheck("[?name=='s4']|[0].slotSetting", True),
JMESPathCheck("[?name=='s3']|[0].slotSetting", False),
])
self.cmd('webapp config connection-string set -g {} -n {} -t mysql --slot {} --settings c1=connection1 --slot-settings c2=connection2'.format(resource_group, webapp, slot2))
# verify we can swap with non production slot
self.cmd('webapp deployment slot swap -g {} -n {} --slot {} --target-slot {}'.format(
resource_group, webapp, slot, slot2))
result = self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot2)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's4', 'WEBSITE_NODE_DEFAULT_VERSION']))
result = self.cmd('webapp config connection-string list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot2)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(['c2']))
result = self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot)).get_output_in_json()
self.assertTrue(set(['s3']).issubset(set([x['name'] for x in result])))
result = self.cmd('webapp config connection-string list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(['c1']))
self.cmd('webapp deployment slot list -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck("length([])", 2),
JMESPathCheck("length([?name=='{}'])".format(slot2), 1),
JMESPathCheck("length([?name=='{}'])".format(slot), 1),
])
self.cmd(
'webapp deployment slot delete -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
# try another way to delete a slot and exercise all options
self.cmd('webapp delete -g {} -n {} --slot {} --keep-dns-registration --keep-empty-plan --keep-metrics'.format(resource_group, webapp, slot2))
class WebappSlotTrafficRouting(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_traffic_routing(self, resource_group):
plan = self.create_random_name(prefix='slot-traffic-plan', length=24)
webapp = self.create_random_name(prefix='slot-traffic-web', length=24)
plan_result = self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp, plan_result['name']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
# create an empty slot
self.cmd(
'webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
self.cmd('webapp traffic-routing set -g {} -n {} -d {}=15'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[0].actionHostName", webapp +
'-' + slot + '.azurewebsites.net'),
JMESPathCheck("[0].reroutePercentage", 15.0)
])
self.cmd('webapp traffic-routing show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck("[0].actionHostName", webapp +
'-' + slot + '.azurewebsites.net'),
JMESPathCheck("[0].reroutePercentage", 15.0)
])
self.cmd(
'webapp traffic-routing clear -g {} -n {}'.format(resource_group, webapp))
class AppServiceCors(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_cors(self, resource_group):
self.kwargs.update({
'plan': self.create_random_name(prefix='slot-traffic-plan', length=24),
'web': self.create_random_name(prefix='slot-traffic-web', length=24),
'slot': 'slot1'
})
self.cmd('appservice plan create -g {rg} -n {plan} --sku S1')
self.cmd('webapp create -g {rg} -n {web} --plan {plan}')
self.cmd(
'webapp cors add -g {rg} -n {web} --allowed-origins https://msdn.com https://msn.com')
self.cmd('webapp cors show -g {rg} -n {web}',
checks=self.check('allowedOrigins', ['https://msdn.com', 'https://msn.com']))
self.cmd(
'webapp cors remove -g {rg} -n {web} --allowed-origins https://msn.com')
self.cmd('webapp cors show -g {rg} -n {web}',
checks=self.check('allowedOrigins', ['https://msdn.com']))
self.cmd(
'webapp deployment slot create -g {rg} -n {web} --slot {slot}')
self.cmd(
'webapp cors add -g {rg} -n {web} --slot {slot} --allowed-origins https://foo.com')
self.cmd('webapp cors show -g {rg} -n {web} --slot {slot}',
checks=self.check('allowedOrigins', ['https://foo.com']))
self.cmd(
'webapp cors remove -g {rg} -n {web} --slot {slot} --allowed-origins https://foo.com')
self.cmd('webapp cors show -g {rg} -n {web} --slot {slot}',
checks=self.check('allowedOrigins', []))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_cors(self, resource_group, storage_account):
self.kwargs.update({
'plan': self.create_random_name(prefix='slot-traffic-plan', length=24),
'function': self.create_random_name(prefix='slot-traffic-web', length=24),
'storage': self.create_random_name(prefix='storage', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan} --sku S1')
self.cmd(
'storage account create --name {storage} -g {rg} --sku Standard_LRS')
self.cmd(
'functionapp create -g {rg} -n {function} --plan {plan} -s {storage}')
self.cmd(
'functionapp cors add -g {rg} -n {function} --allowed-origins https://msdn.com https://msn.com')
result = self.cmd(
'functionapp cors show -g {rg} -n {function}').get_output_in_json()['allowedOrigins']
# functionapp has pre-defined cors. We verify the ones we added are in the list
self.assertTrue(
set(['https://msdn.com', 'https://msn.com']).issubset(set(result)))
class WebappSlotSwapScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_slot_swap(self, resource_group):
plan = self.create_random_name(prefix='slot-swap-plan', length=24)
webapp = self.create_random_name(prefix='slot-swap-web', length=24)
plan_result = self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp, plan_result['name']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
self.cmd(
'webapp config appsettings set -g {} -n {} --slot-settings s1=prod'.format(resource_group, webapp))
# create an empty slot
self.cmd(
'webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings set -g {} -n {} --slot-settings s1=slot --slot {}'.format(
resource_group, webapp, slot))
# swap with preview
self.cmd('webapp deployment slot swap -g {} -n {} -s {} --action preview'.format(
resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[?name=='s1']|[0].value", 'prod')
])
# complete the swap
self.cmd(
'webapp deployment slot swap -g {} -n {} -s {}'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[?name=='s1']|[0].value", 'slot')
])
# reset
self.cmd('webapp deployment slot swap -g {} -n {} -s {} --action reset'.format(
resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[?name=='s1']|[0].value", 'slot')
])
class WebappSSLCertTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_ssl(self, resource_group, resource_group_location):
plan = self.create_random_name(prefix='ssl-test-plan', length=24)
webapp_name = self.create_random_name(prefix='web-ssl-test', length=20)
slot_name = self.create_random_name(prefix='slot-ssl-test', length=20)
# Cert Generated using
# https://docs.microsoft.com/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = '9E9735C45C792B03B3FFCCA614852B32EE71AD6B'
# we configure tags here in a hope to capture a repro for https://github.com/Azure/azure-cli/issues/6929
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --tags plan=plan1'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group,
plan), self.check('tags.plan', 'plan1'))
self.cmd('webapp create -g {} -n {} --plan {} --tags web=web1'.format(
resource_group, webapp_name, plan))
self.cmd('webapp config ssl upload -g {} -n {} --certificate-file "{}" --certificate-password {}'.format(resource_group, webapp_name, pfx_file, cert_password), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name), cert_thumbprint)
])
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
self.cmd('webapp config ssl unbind -g {} -n {} --certificate-thumbprint {}'.format(resource_group, webapp_name, cert_thumbprint), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'Disabled'),
])
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
self.cmd('webapp config ssl delete -g {} --certificate-thumbprint {}'.format(
resource_group, cert_thumbprint))
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
# test with slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(
resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl upload -g {} -n {} --certificate-file "{}" --certificate-password {} -s {}'.format(resource_group, webapp_name, pfx_file, cert_password, slot_name), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {} -s {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI', slot_name), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}-{}.azurewebsites.net']|[0].sslState".format(
webapp_name, slot_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}-{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name, slot_name), cert_thumbprint)
])
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl unbind -g {} -n {} --certificate-thumbprint {} -s {}'.format(resource_group, webapp_name, cert_thumbprint, slot_name), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}-{}.azurewebsites.net']|[0].sslState".format(
webapp_name, slot_name), 'Disabled'),
])
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl delete -g {} --certificate-thumbprint {}'.format(
resource_group, cert_thumbprint))
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
class WebappSSLImportCertTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_ssl_import(self, resource_group):
plan_name = self.create_random_name(prefix='ssl-test-plan', length=24)
webapp_name = self.create_random_name(prefix='web-ssl-test', length=20)
kv_name = self.create_random_name(prefix='kv-ssl-test', length=20)
# Cert Generated using
# https://docs.microsoft.com/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = '9E9735C45C792B03B3FFCCA614852B32EE71AD6B'
cert_name = 'test-cert'
# we configure tags here in a hope to capture a repro for https://github.com/Azure/azure-cli/issues/6929
self.cmd(
'appservice plan create -g {} -n {} --sku B1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('keyvault create -g {} -n {}'.format(resource_group, kv_name))
self.cmd('keyvault set-policy -g {} --name {} --spn {} --secret-permissions get'.format(
resource_group, kv_name, 'Microsoft.Azure.WebSites'))
self.cmd('keyvault certificate import --name {} --vault-name {} --file "{}" --password {}'.format(
cert_name, kv_name, pfx_file, cert_password))
self.cmd('webapp config ssl import --resource-group {} --name {} --key-vault {} --key-vault-certificate-name {}'.format(resource_group, webapp_name, kv_name, cert_name), checks=[
JMESPathCheck('keyVaultSecretStatus', 'Initialized'),
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name), cert_thumbprint)
])
@ResourceGroupPreparer(parameter_name='kv_resource_group', location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_ssl_import_crossrg(self, resource_group, kv_resource_group):
plan_name = self.create_random_name(prefix='ssl-test-plan', length=24)
webapp_name = self.create_random_name(prefix='web-ssl-test', length=20)
kv_name = self.create_random_name(prefix='kv-ssl-test', length=20)
# Cert Generated using
# https://docs.microsoft.com/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = '9E9735C45C792B03B3FFCCA614852B32EE71AD6B'
cert_name = 'test-cert'
# we configure tags here in a hope to capture a repro for https://github.com/Azure/azure-cli/issues/6929
self.cmd(
'appservice plan create -g {} -n {} --sku B1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
kv_id = self.cmd('keyvault create -g {} -n {}'.format(kv_resource_group, kv_name)).get_output_in_json()['id']
self.cmd('keyvault set-policy -g {} --name {} --spn {} --secret-permissions get'.format(
kv_resource_group, kv_name, 'Microsoft.Azure.WebSites'))
self.cmd('keyvault certificate import --name {} --vault-name {} --file "{}" --password {}'.format(
cert_name, kv_name, pfx_file, cert_password))
self.cmd('webapp config ssl import --resource-group {} --name {} --key-vault {} --key-vault-certificate-name {}'.format(resource_group, webapp_name, kv_id, cert_name), checks=[
JMESPathCheck('keyVaultSecretStatus', 'Initialized'),
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name), cert_thumbprint)
])
class WebappUndeleteTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_deleted_list(self, resource_group):
plan = self.create_random_name(prefix='delete-me-plan', length=24)
webapp_name = self.create_random_name(
prefix='delete-me-web', length=24)
self.cmd(
'appservice plan create -g {} -n {} --sku B1 --tags plan=plan1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp deleted list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].deletedSiteName', webapp_name)
])
class FunctionAppWithPlanE2ETest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@ResourceGroupPreparer(parameter_name='resource_group2', location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_e2e(self, resource_group, resource_group2):
functionapp_name, functionapp_name2 = self.create_random_name(
'func-e2e', 24), self.create_random_name('func-e2e', 24)
plan = self.create_random_name('func-e2e-plan', 24)
storage, storage2 = 'functionappplanstorage', 'functionappplanstorage2'
plan_id = self.cmd('appservice plan create -g {} -n {}'.format(
resource_group, plan)).get_output_in_json()['id']
self.cmd('appservice plan list -g {}'.format(resource_group))
self.cmd(
'storage account create --name {} -g {} -l {} --sku Standard_LRS'.format(storage, resource_group, WINDOWS_ASP_LOCATION_FUNCTIONAPP))
storage_account_id2 = self.cmd('storage account create --name {} -g {} -l {} --sku Standard_LRS'.format(
storage2, resource_group2, WINDOWS_ASP_LOCATION_FUNCTIONAPP)).get_output_in_json()['id']
self.cmd('functionapp create -g {} -n {} -p {} -s {}'.format(resource_group, functionapp_name, plan, storage), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]',
functionapp_name + '.azurewebsites.net')
])
self.cmd('functionapp create -g {} -n {} -p {} -s {}'.format(resource_group2,
functionapp_name2, plan_id, storage_account_id2))
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_app_service_java(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime java --functions-version 3'
.format(resource_group, functionapp, plan, storage_account),
checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Java|8')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_app_service_java_with_runtime_version(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime java --runtime-version 11 --functions-version 3'
.format(resource_group, functionapp, plan, storage_account),
checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Java|11')])
class FunctionUpdatePlan(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_move_plan_to_elastic(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappelastic', 40)
ep_plan_name = self.create_random_name('somerandomplan', 40)
second_plan_name = self.create_random_name('secondplan', 40)
s1_plan_name = self.create_random_name('ab1planname', 40)
plan_result = self.cmd('functionapp plan create -g {} -n {} --sku EP1'.format(resource_group, ep_plan_name), checks=[
JMESPathCheck('sku.name', 'EP1')
]).get_output_in_json()
self.cmd('functionapp plan create -g {} -n {} --sku EP1'.format(resource_group, second_plan_name), checks=[
JMESPathCheck('sku.name', 'EP1')
]).get_output_in_json()
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, s1_plan_name), checks=[
JMESPathCheck('sku.name', 'S1')
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {}'
.format(resource_group, functionapp_name, second_plan_name, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp update -g {} -n {} --plan {}'
.format(resource_group, functionapp_name, ep_plan_name)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('serverFarmId', plan_result['id']),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
# Moving to and from an App Service plan (not Elastic Premium) is not allowed right now
self.cmd('functionapp update -g {} -n {} --plan {}'
.format(resource_group, functionapp_name, s1_plan_name), expect_failure=True)
class FunctionAppWithConsumptionPlanE2ETest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-c-e2e', location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_consumption_e2e(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].kind', 'functionapp'),
JMESPathCheck('[0].name', functionapp_name)
])
self.cmd('functionapp show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('name', functionapp_name)
])
self.cmd('functionapp update -g {} -n {} --set clientAffinityEnabled=true'.format(resource_group, functionapp_name),
checks=[self.check('clientAffinityEnabled', True)]
)
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-c-e2e-ragrs', location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer(sku='Standard_RAGRS')
def test_functionapp_consumption_ragrs_storage_e2e(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('name', functionapp_name)
])
class FunctionAppWithLinuxConsumptionPlanTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-linux', location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_consumption_linux(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionapplinuxconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Linux --runtime node'
.format(resource_group, functionapp_name, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('reserved', True),
JMESPathCheck('kind', 'functionapp,linux'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node')])
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-linux', location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_consumption_linux_java(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionapplinuxconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Linux --runtime java --functions-version 3'
.format(resource_group, functionapp_name, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('reserved', True),
JMESPathCheck('kind', 'functionapp,linux'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'java')])
class FunctionAppOnWindowsWithRuntime(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime node'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_java(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime java'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'java')])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('javaVersion', '1.8')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_powershell(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime powershell'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'powershell')])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('powerShellVersion', '~6')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_version(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime node --runtime-version 8'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node'),
JMESPathCheck("[?name=='WEBSITE_NODE_DEFAULT_VERSION'].value|[0]", '~8')])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_version_invalid(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} '
'--os-type Windows --runtime node --runtime-version 8.2'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account), expect_failure=True)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_functions_version(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --os-type Windows --runtime node'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3'),
JMESPathCheck("[?name=='WEBSITE_NODE_DEFAULT_VERSION'].value|[0]", '~12')])
class FunctionAppOnWindowsWithoutRuntime(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_without_runtime(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowswithoutruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
class FunctionAppWithAppInsightsKey(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_with_app_insights_key(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithappinsights', 40)
app_insights_key = '00000000-0000-0000-0000-123456789123'
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
' --app-insights-key {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account, app_insights_key)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name)).assert_with_checks([
JMESPathCheck(
"[?name=='APPINSIGHTS_INSTRUMENTATIONKEY'].value|[0]", app_insights_key)
])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
class FunctionAppWithAppInsightsDefault(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_with_default_app_insights(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithappinsights', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
app_set = self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group,
functionapp_name)).get_output_in_json()
self.assertTrue('APPINSIGHTS_INSTRUMENTATIONKEY' in [
kp['name'] for kp in app_set])
self.assertTrue('AzureWebJobsDashboard' not in [
kp['name'] for kp in app_set])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_with_no_default_app_insights(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithappinsights', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --disable-app-insights'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
app_set = self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group,
functionapp_name)).get_output_in_json()
self.assertTrue('APPINSIGHTS_INSTRUMENTATIONKEY' not in [
kp['name'] for kp in app_set])
self.assertTrue('AzureWebJobsDashboard' in [
kp['name'] for kp in app_set])
class FunctionAppOnLinux(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp, plan, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|10')])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_version(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node --runtime-version 10'
.format(resource_group, functionapp, plan, storage_account),
checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|10')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_version_consumption(self, resource_group, storage_account):
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type linux --runtime python --runtime-version 3.7'
.format(resource_group, functionapp, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Python|3.7')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_version_error(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime python --runtime-version 3.8'
.format(resource_group, functionapp, plan, storage_account), expect_failure=True)
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_functions_version(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1')
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --functions-version 3 --runtime node'
.format(resource_group, functionapp, plan, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|12')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3')
])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_functions_version_consumption(self, resource_group, storage_account):
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --runtime node --os-type linux'
.format(resource_group, functionapp, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|12')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3')
])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_dotnet_consumption(self, resource_group, storage_account):
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --runtime dotnet --os-type linux'
.format(resource_group, functionapp, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'dotnet|3.1')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3')
])
class FunctionAppServicePlan(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_app_service_plan(self, resource_group):
plan = self.create_random_name(prefix='funcappplan', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1' .format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_elastic_plan(self, resource_group):
plan = self.create_random_name(prefix='funcappplan', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku EP1 --min-instances 4 --max-burst 12' .format(resource_group, plan), checks=[
JMESPathCheck('maximumElasticWorkerCount', 12),
JMESPathCheck('sku.name', 'EP1'),
JMESPathCheck('sku.capacity', 4)
])
self.cmd('functionapp plan update -g {} -n {} --min-instances 5 --max-burst 11' .format(resource_group, plan), checks=[
JMESPathCheck('maximumElasticWorkerCount', 11),
JMESPathCheck('sku.name', 'EP1'),
JMESPathCheck('sku.capacity', 5)
])
self.cmd('functionapp plan show -g {} -n {} '.format(resource_group, plan), checks=[
JMESPathCheck('maximumElasticWorkerCount', 11),
JMESPathCheck('sku.name', 'EP1'),
JMESPathCheck('sku.capacity', 5)
])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, plan))
class FunctionAppServicePlanLinux(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_app_service_plan_linux(self, resource_group):
plan = self.create_random_name(prefix='funcappplan', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
JMESPathCheck('kind', 'linux')
])
class FunctionAppSlotTests(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_slot_creation(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcappplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-slot', length=24)
slotname = self.create_random_name(prefix='slotname', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp, plan,
storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, functionapp, slotname),
checks=[
JMESPathCheck('name', slotname),
JMESPathCheck('type', 'Microsoft.Web/sites/slots'),
])
pre_slot_list = self.cmd('functionapp deployment slot list -g {} -n {}'.format(resource_group, functionapp),
checks=[
JMESPathCheck("[?name=='{}'].type|[0]".format(
slotname), 'Microsoft.Web/sites/slots')
]).get_output_in_json()
self.assertEqual(len(pre_slot_list), 1)
self.cmd('functionapp deployment slot delete -g {} -n {} --slot {}'.format(
resource_group, functionapp, slotname))
deleted_slot_list = self.cmd('functionapp deployment slot list -g {} -n {}'.format(
resource_group, functionapp)).get_output_in_json()
self.assertEqual(len(deleted_slot_list), 0)
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_slot_appsetting_update(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcappplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-slot', length=24)
slotname = self.create_random_name(prefix='slotname', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp, plan,
storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, functionapp, slotname), checks=[
JMESPathCheck('name', slotname)
])
self.cmd('functionapp config appsettings set -g {} -n {} --slot {} --slot-settings FOO=BAR'.format(resource_group, functionapp,
slotname), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR'),
JMESPathCheck("[?name=='FOO'].slotSetting|[0]", True)
])
self.cmd('functionapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, functionapp, slotname), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR'),
JMESPathCheck("[?name=='FOO'].slotSetting|[0]", True)
])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_slot_swap(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcappplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-slot', length=24)
slotname = self.create_random_name(prefix='slotname', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp,
plan,
storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, functionapp,
slotname), checks=[
JMESPathCheck('name', slotname)
])
self.cmd('functionapp config appsettings set -g {} -n {} --slot {} --settings FOO=BAR'.format(resource_group, functionapp,
slotname), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR')
])
self.cmd('functionapp deployment slot swap -g {} -n {} --slot {} --action swap'.format(
resource_group, functionapp, slotname))
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR')
])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
class WebappAuthenticationTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_authentication', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_authentication(self, resource_group):
webapp_name = self.create_random_name('webapp-authentication-test', 40)
plan_name = self.create_random_name('webapp-authentication-plan', 40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# testing show command for newly created app and initial fields
self.cmd('webapp auth show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('unauthenticatedClientAction', None),
JMESPathCheck('defaultProvider', None),
JMESPathCheck('enabled', False),
JMESPathCheck('tokenStoreEnabled', None),
JMESPathCheck('allowedExternalRedirectUrls', None),
JMESPathCheck('tokenRefreshExtensionHours', None),
JMESPathCheck('runtimeVersion', None),
JMESPathCheck('clientId', None),
JMESPathCheck('clientSecret', None),
JMESPathCheck('allowedAudiences', None),
JMESPathCheck('issuer', None),
JMESPathCheck('facebookAppId', None),
JMESPathCheck('facebookAppSecret', None),
JMESPathCheck('facebookOauthScopes', None)
])
# update and verify
result = self.cmd('webapp auth update -g {} -n {} --enabled true --action LoginWithFacebook '
'--token-store false --token-refresh-extension-hours 7.2 --runtime-version 1.2.8 '
'--aad-client-id aad_client_id --aad-client-secret aad_secret '
'--aad-allowed-token-audiences https://audience1 --aad-token-issuer-url https://issuer_url '
'--facebook-app-id facebook_id --facebook-app-secret facebook_secret '
'--facebook-oauth-scopes public_profile email'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck(
'unauthenticatedClientAction', 'RedirectToLoginPage'),
JMESPathCheck('defaultProvider', 'Facebook'),
JMESPathCheck('enabled', True),
JMESPathCheck('tokenStoreEnabled', False),
JMESPathCheck('tokenRefreshExtensionHours', 7.2),
JMESPathCheck('runtimeVersion', '1.2.8'),
JMESPathCheck('clientId', 'aad_client_id'),
JMESPathCheck('clientSecret', 'aad_secret'),
JMESPathCheck('issuer', 'https://issuer_url'),
JMESPathCheck('facebookAppId', 'facebook_id'),
JMESPathCheck('facebookAppSecret', 'facebook_secret')]).get_output_in_json()
self.assertIn('https://audience1', result['allowedAudiences'])
self.assertIn('email', result['facebookOauthScopes'])
self.assertIn('public_profile', result['facebookOauthScopes'])
class WebappUpdateTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_update(self, resource_group):
webapp_name = self.create_random_name('webapp-update-test', 40)
plan_name = self.create_random_name('webapp-update-plan', 40)
self.cmd('appservice plan create -g {} -n {} --sku S1'
.format(resource_group, plan_name))
self.cmd('webapp create -g {} -n {} --plan {}'
.format(resource_group, webapp_name, plan_name)).assert_with_checks([
JMESPathCheck('clientAffinityEnabled', True)])
# testing update command with --set
self.cmd('webapp update -g {} -n {} --client-affinity-enabled false --set tags.foo=bar'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('name', webapp_name),
JMESPathCheck('tags.foo', 'bar'),
JMESPathCheck('clientAffinityEnabled', False)])
# try out on slots
self.cmd(
'webapp deployment slot create -g {} -n {} -s s1'.format(resource_group, webapp_name))
self.cmd('webapp update -g {} -n {} -s s1 --client-affinity-enabled true'.format(resource_group, webapp_name), checks=[
self.check('clientAffinityEnabled', True)
])
class WebappZipDeployScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_zipDeploy', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_deploy_zip(self, resource_group):
webapp_name = self.create_random_name('webapp-zipDeploy-test', 40)
plan_name = self.create_random_name('webapp-zipDeploy-plan', 40)
zip_file = os.path.join(TEST_DIR, 'test.zip')
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('message', 'Created via a push deployment'),
JMESPathCheck('complete', True)
])
# Disabled due to issue https://github.com/Azure/azure-cli/issues/10705
# class FunctionappRemoteBuildScenarioTest(ScenarioTest):
# @ResourceGroupPreparer()
# @StorageAccountPreparer()
# def test_functionapp_remote_build(self, resource_group, storage_account):
# functionapp_name = self.create_random_name(prefix='faremotebuildapp', length=24)
# plan_name = self.create_random_name(prefix='faremotebuildplan', length=24)
# zip_file = os.path.join(TEST_DIR, 'test_remote_build.zip')
# self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux true'.format(resource_group, plan_name))
# self.cmd('functionapp create -g {} -n {} --plan {} -s {} --os-type Linux --runtime python'.format(resource_group, functionapp_name, plan_name, storage_account))
# self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
# JMESPathCheck('status', 4),
# JMESPathCheck('deployer', 'Push-Deployer'),
# JMESPathCheck('message', 'Created via a push deployment'),
# JMESPathCheck('complete', True)
# ])
class WebappImplictIdentityTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_assign_system_identity(self, resource_group):
scope = '/subscriptions/{}/resourcegroups/{}'.format(
self.get_subscription_id(), resource_group)
role = 'Reader'
plan_name = self.create_random_name('web-msi-plan', 20)
webapp_name = self.create_random_name('web-msi', 20)
self.cmd(
'appservice plan create -g {} -n {}'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
with mock.patch('azure.cli.core.commands.arm._gen_guid', side_effect=self.create_guid):
result = self.cmd('webapp identity assign -g {} -n {} --role {} --scope {}'.format(
resource_group, webapp_name, role, scope)).get_output_in_json()
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', result['principalId'])
])
self.cmd('role assignment list -g {} --assignee {}'.format(resource_group, result['principalId']), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].roleDefinitionName', role)
])
self.cmd('webapp identity show -g {} -n {}'.format(resource_group,
webapp_name), checks=self.check('principalId', result['principalId']))
self.cmd(
'webapp identity remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp identity show -g {} -n {}'.format(resource_group,
webapp_name), checks=self.is_empty())
class WebappListLocationsFreeSKUTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_list-locations-free-sku-test')
def test_webapp_list_locations_free_sku(self, resource_group):
asp_F1 = self.cmd(
'appservice list-locations --sku F1').get_output_in_json()
result = self.cmd(
'appservice list-locations --sku Free').get_output_in_json()
self.assertEqual(asp_F1, result)
class WebappTriggeredWebJobListTest(ScenarioTest):
@record_only()
@ResourceGroupPreparer()
def test_webapp_triggeredWebjob_list(self, resource_group):
# testing this using a webjob already created
# given there is no create command inorder to re-record please create a webjob before
# recording this. Once the create command is available, please remove the "record_only" flag
resource_group_name = 'cliTestApp'
webapp_name = 'cliTestApp'
webjob_name = 'test-triggered'
# list test
self.cmd('webapp webjob triggered list -g {} -n {}'
.format(resource_group_name, webapp_name)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck(
'[0].name', '{}/{}'.format(webapp_name, webjob_name)),
JMESPathCheck('[0].type', 'Microsoft.Web/sites/triggeredwebjobs')])
class WebappContinuousWebJobE2ETest(ScenarioTest):
@ResourceGroupPreparer()
@record_only()
def test_webapp_continuousWebjob_e2e(self, resource_group):
# testing this using a webjob already created
# given there is no create command inorder to re-record please create a webjob before
# recording this. Once the create command is available, please remove the "record_only" flag
resource_group_name = 'cliTestApp'
webapp_name = 'cliTestApp'
webjob_name = 'test-continuous'
# list test
self.cmd('webapp webjob continuous list -g {} -n {}'
.format(resource_group_name, webapp_name)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck(
'[0].name', '{}/{}'.format(webapp_name, webjob_name)),
JMESPathCheck('[0].type', 'Microsoft.Web/sites/continuouswebjobs')])
# start
self.cmd('webapp webjob continuous start -g {} -n {} -w {}'
.format(resource_group_name, webapp_name, webjob_name)).assert_with_checks([
JMESPathCheck('status', 'Running')])
# stop
self.cmd('webapp webjob continuous stop -g {} -n {} -w {}'
.format(resource_group_name, webapp_name, webjob_name)).assert_with_checks([
JMESPathCheck('status', 'Disabling')])
class WebappWindowsContainerBasicE2ETest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='webapp_hyperv_e2e', location='eastus')
def test_webapp_hyperv_e2e(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-hyperv-e2e', length=24)
plan = self.create_random_name(prefix='webapp-hyperv-plan', length=24)
self.cmd(
'appservice plan create -g {} -n {} --hyper-v --sku PC2'.format(resource_group, plan))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].sku.tier', 'PremiumContainer'),
JMESPathCheck('[0].sku.name', 'PC2')
])
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck("length([?name=='{}' && resourceGroup=='{}'])".format(
plan, resource_group), 1)
])
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('name', plan)
])
self.cmd('webapp create -g {} -n {} --plan {} --deployment-container-image-name "DOCKER|microsoft/iis:nanoserver-sac2016"'.format(resource_group, webapp_name, plan), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', webapp_name),
JMESPathCheck('[0].hostNames[0]', webapp_name +
'.azurewebsites.net')
])
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('windowsFxVersion',
"DOCKER|microsoft/iis:nanoserver-sac2016"),
JMESPathCheck('linuxFxVersion', "")
])
self.cmd('webapp config set -g {} -n {} --windows-fx-version "DOCKER|microsoft/iis"'.format(
resource_group, webapp_name))
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('windowsFxVersion', "DOCKER|microsoft/iis"),
JMESPathCheck('linuxFxVersion', "")
])
# Always on is not supported on all SKUs this is to test that we don't fail create trying to enable AlwaysOn
@ResourceGroupPreparer(name_prefix='cli_test_webapp_alwaysOn', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_create_noAlwaysOn(self, resource_group):
webapp_name = self.create_random_name('webapp-create-alwaysOn-e2e', 44)
plan = self.create_random_name('plan-create-alwaysOn-e2e', 44)
self.cmd(
'appservice plan create -g {} -n {} --sku SHARED'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
# verify alwaysOn
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('alwaysOn', False)])
@ResourceGroupPreparer(name_prefix='cli_test_webapp_linux_free', location=LINUX_ASP_LOCATION_WEBAPP)
def test_webapp_create_linux_free(self, resource_group):
webapp_name = self.create_random_name('webapp-linux-free', 24)
plan = self.create_random_name('plan-linux-free', 24)
self.cmd('appservice plan create -g {} -n {} --sku F1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'F1')])
self.cmd('webapp create -g {} -n {} --plan {} -u {} -r "node|10.14"'.format(resource_group, webapp_name, plan,
TEST_REPO_URL))
# verify alwaysOn
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('alwaysOn', False)])
class WebappNetworkConnectionTests(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_hybridconnectionE2E(self, resource_group):
webapp_name = self.create_random_name('hcwebapp', 24)
plan = self.create_random_name('hcplan', 24)
namespace_name = self.create_random_name('hcnamespace', 24)
hyco_name = self.create_random_name('hcname', 24)
um = "[{{\\\"key\\\":\\\"endpoint\\\",\\\"value\\\":\\\"vmsq1:80\\\"}}]"
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd(
'relay namespace create -g {} --name {}'.format(resource_group, namespace_name))
self.cmd('relay hyco create -g {} --namespace-name {} --name {} --user-metadata {}'.format(
resource_group, namespace_name, hyco_name, um))
self.cmd('webapp hybrid-connection add -g {} -n {} --namespace {} --hybrid-connection {}'.format(
resource_group, webapp_name, namespace_name, hyco_name))
self.cmd('webapp hybrid-connection list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', hyco_name)
])
self.cmd('webapp hybrid-connection remove -g {} -n {} --namespace {} --hybrid-connection {}'.format(
resource_group, webapp_name, namespace_name, hyco_name))
self.cmd('webapp hybrid-connection list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_vnetE2E(self, resource_group):
webapp_name = self.create_random_name('swiftwebapp', 24)
plan = self.create_random_name('swiftplan', 24)
subnet_name = self.create_random_name('swiftsubnet', 24)
vnet_name = self.create_random_name('swiftname', 24)
self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group, vnet_name, subnet_name))
self.cmd(
'appservice plan create -g {} -n {} --sku P1V2'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet_name, subnet_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name)
])
self.cmd(
'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_vnetDelegation(self, resource_group):
webapp_name = self.create_random_name('swiftwebapp', 24)
plan = self.create_random_name('swiftplan', 24)
subnet_name = self.create_random_name('swiftsubnet', 24)
vnet_name = self.create_random_name('swiftname', 24)
self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group, vnet_name, subnet_name))
self.cmd('network vnet subnet update -g {} --vnet {} --name {} --delegations Microsoft.Web/serverfarms --service-endpoints Microsoft.Storage'.format(
resource_group, vnet_name, subnet_name))
self.cmd(
'appservice plan create -g {} -n {} --sku P1V2'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet_name, subnet_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name)
])
self.cmd(' network vnet subnet show -g {} -n {} --vnet-name {}'.format(resource_group, subnet_name, vnet_name), checks=[
JMESPathCheck('serviceEndpoints[0].service', "Microsoft.Storage")
])
self.cmd(
'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_vnetSameName(self, resource_group):
resource_group_2 = self.create_random_name('swiftwebapp', 24)
webapp_name = self.create_random_name('swiftwebapp', 24)
plan = self.create_random_name('swiftplan', 24)
subnet_name = self.create_random_name('swiftsubnet', 24)
subnet_name_2 = self.create_random_name('swiftsubnet', 24)
vnet_name = self.create_random_name('swiftname', 24)
self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group, vnet_name, subnet_name))
self.cmd('group create -n {} -l {}'.format(resource_group_2, WINDOWS_ASP_LOCATION_WEBAPP))
vnet = self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group_2, vnet_name, subnet_name_2)).get_output_in_json()
self.cmd(
'appservice plan create -g {} -n {} --sku P1V2'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
# Add vnet integration where theres two vnets of the same name. Chosen vnet should default to the one in the same RG
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet_name, subnet_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name)
])
self.cmd(
'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
# Add vnet integration using vnet resource ID
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet['newVNet']['id'], subnet_name_2))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name_2)
])
# self.cmd(
# 'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
# self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
# JMESPathCheck('length(@)', 0)
# ])
# LiveScenarioTest due to issue https://github.com/Azure/azure-cli/issues/10705
class FunctionappDeploymentLogsScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_show_deployment_logs(self, resource_group, storage_account):
functionapp_name = self.create_random_name(prefix='show-deployment-functionapp', length=40)
plan_name = self.create_random_name(prefix='show-deployment-functionapp', length=40)
zip_file = os.path.join(TEST_DIR, 'sample_dotnet_function/sample_dotnet_function.zip')
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp log deployment show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
deployment_1 = self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)
]).get_output_in_json()
self.cmd('functionapp log deployment show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@) > `0`', True)
])
self.cmd('functionapp log deployment show -g {} -n {} --deployment-id={}'.format(resource_group, functionapp_name, deployment_1['id']), checks=[
JMESPathCheck('length(@) > `0`', True)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_list_deployment_logs(self, resource_group, storage_account):
functionapp_name = self.create_random_name(prefix='show-deployment-funcapp', length=40)
plan_name = self.create_random_name(prefix='show-deployment-funcapp', length=40)
zip_file = os.path.join(TEST_DIR, 'sample_dotnet_function/sample_dotnet_function.zip')
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp log deployment list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
deployment_1 = self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)
]).get_output_in_json()
self.cmd('functionapp log deployment list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].id', deployment_1['id']),
])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)
]).get_output_in_json()
self.cmd('functionapp log deployment list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 2)
])
class WebappDeploymentLogsScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_show_deployment_logs(self, resource_group):
webapp_name = self.create_random_name('show-deployment-webapp', 40)
plan_name = self.create_random_name('show-deployment-plan', 40)
zip_file = os.path.join(TEST_DIR, 'test.zip')
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp log deployment show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
deployment_1 = self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).get_output_in_json()
self.cmd('webapp log deployment show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@) > `0`', True),
])
self.cmd('webapp log deployment show -g {} -n {} --deployment-id={}'.format(resource_group, webapp_name, deployment_1['id']), checks=[
JMESPathCheck('length(@) > `0`', True),
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_list_deployment_logs(self, resource_group):
webapp_name = self.create_random_name('list-deployment-webapp', 40)
plan_name = self.create_random_name('list-deployment-plan', 40)
zip_file = os.path.join(TEST_DIR, 'test.zip')
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp log deployment list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
deployment_1 = self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).get_output_in_json()
self.cmd('webapp log deployment list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].id', deployment_1['id']),
])
self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).get_output_in_json()
self.cmd('webapp log deployment list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 2)
])
class WebappLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_local_context(self, resource_group):
from knack.util import CLIError
self.kwargs.update({
'plan_name': self.create_random_name(prefix='webapp-plan-', length=24),
'webapp_name': self.create_random_name(prefix='webapp-', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('appservice plan show')
with self.assertRaises(CLIError):
self.cmd('appservice plan delete')
self.cmd('webapp create -n {webapp_name}')
self.cmd('webapp show')
with self.assertRaises(CLIError):
self.cmd('webapp delete')
self.cmd('webapp delete -n {webapp_name}')
self.cmd('appservice plan delete -n {plan_name} -y')
class FunctionappLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_local_context(self, resource_group, storage_account):
from knack.util import CLIError
self.kwargs.update({
'plan_name': self.create_random_name(prefix='functionapp-plan-', length=24),
'functionapp_name': self.create_random_name(prefix='functionapp-', length=24),
'storage_account': storage_account
})
self.cmd('functionapp plan create -g {rg} -n {plan_name} --sku B2')
self.cmd('functionapp plan show')
with self.assertRaises(CLIError):
self.cmd('functionapp plan delete')
self.cmd('functionapp create -n {functionapp_name} --storage-account {storage_account}')
self.cmd('functionapp show')
with self.assertRaises(CLIError):
self.cmd('functionapp delete')
self.cmd('functionapp delete -n {functionapp_name}')
self.cmd('functionapp plan delete -n {plan_name} -y')
if __name__ == '__main__':
unittest.main()
| 56.776769 | 239 | 0.623646 | [
"MIT"
] | kceiw/azure-cli | src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_webapp_commands.py | 150,061 | Python |
'''
Timestamp util for parsing logs
'''
import datetime,sys
from dateutil.parser import parser
class TimeUtil:
def __init__(self, start_win=None, end_win=None):
self.parser = parser()
try:
self.start_win = datetime.datetime.fromtimestamp(start_win)
self.end_win = datetime.datetime.fromtimestamp(end_win)
except TypeError:
try:
self.start_win = self.parser.parse(start_win, fuzzy=True)
self.end_win = self.parser.parse(end_win, fuzzy=True)
except Exception, err:
sys.stderr.write("Invalid window, start: %s, end: %s, error: %s\n"
% (start_win, end_win, err))
if self.start_win > self.end_win:
sys.stderr.write("Bad window, start: %s, end: %s, start > end\n"
% (start_win, end_win))
def print_window(self):
print "Window start: %s, end: %s" % (self.start_win, self.end_win)
def is_before_window(self, timestamp):
if type(timestamp) is datetime.datetime:
time = timestamp
else:
time = self.parse(timestamp)
if time is not None:
try:
if self.start_win.utctimetuple() > time.utctimetuple():
return True
except Exception, err:
return False
return False
def is_after_window(self, timestamp):
if type(timestamp) is datetime.datetime:
time = timestamp
else:
time = self.parse(timestamp)
if time is not None:
try:
if self.end_win.utctimetuple() < time.utctimetuple():
return True
except Exception, err:
return False
return False
def is_in_window(self, timestamp):
if type(timestamp) is datetime.datetime:
time = timestamp
else:
time = self.parse(timestamp)
if time is not None:
try:
if self.start_win.utctimetuple() <= time.utctimetuple() \
and time.utctimetuple() <= self.end_win.utctimetuple():
return True
except Exception:
return False
return False
def is_in_window_or_unsure(self, timestamp):
if type(timestamp) is datetime.datetime:
time = timestamp
else:
time = self.parse(timestamp)
time = self.parse(timestamp)
if time is not None:
try:
if self.start_win.utctimetuple() > time.utctimetuple() \
or time.utctimetuple() > self.end_win.utctimetuple():
return False
except Exception:
return True
return True
def is_timestamp(self, timestamp):
try:
res = self.parser.parse(timestamp)
except Exception, err:
return False
return True
def parse(self, timestamp):
try:
res = self.parser.parse(timestamp)
except Exception, err:
return None
else:
return res | 33.938144 | 83 | 0.523694 | [
"MIT"
] | ksang/error-extractor | lib/timestamp.py | 3,292 | Python |
"""
[1,0,1,0,1] -> correct
[0,0,1,0] -> return 1
[1,1,0,0,1] -> return 2
"""
def solution2(S):
ans1, ans2 = 0, 0
check1, check2 = 0, 1
for i in S:
if i != check1:
ans1 +=1
if i != check2:
ans2 += 1
check1 = 0 if check1 == 1 else 1
check2 = 0 if check2 == 1 else 1
assert(check1 != check2)
print("ans1 : {}, ans2 : {}".format(ans1, ans2))
return min(ans1, ans2)
def solution(S):
ans1, ans2 = 0, 0
S1 = S.copy()
S2 = S.copy()
leng = len(S)
if leng == 1:
return 0
if leng == 2:
if S[0] != S[1]:
return 0
else:
return 1
# Forward
for idx in range(leng):
if idx == 0 or idx == leng-1:
continue
# [0,0,0] or [1,1,1]
if S1[idx] == S1[idx-1] and S1[idx] == S1[idx+1]:
if S1[idx] == 1:
S1[idx] = 0
else:
S1[idx] = 1
ans1 += 1
# [0, 0, 1]
if S1[idx] == S1[idx-1] and S1[idx] != S1[idx+1]:
if S1[idx-1] == 1:
S1[idx-1] = 0
else:
S1[idx-1] = 1
ans1 += 1
# [1,0,0]
if S1[idx] != S1[idx-1] and S1[idx] == S1[idx+1]:
if S1[idx+1] == 1:
S1[idx+1] = 0
else:
S1[idx+1] = 1
ans1 += 1
# backwards
for idx in range(leng-1,-1,-1):
if idx == 0 or idx == leng-1:
continue
# [0,0,0] or [1,1,1] back
if S2[idx] == S2[idx-1] and S2[idx] == S2[idx+1]:
if S2[idx] == 1:
S2[idx] = 0
else:
S2[idx] = 1
ans2 += 1
# [0, 0, 1]
if S2[idx] == S2[idx-1] and S2[idx] != S2[idx+1]:
if S2[idx-1] == 1:
S2[idx-1] = 0
else:
S2[idx-1] = 1
ans2 += 1
# [1,0,0]
if S2[idx] != S2[idx-1] and S2[idx] == S2[idx+1]:
if S2[idx+1] == 1:
S2[idx+1] = 0
else:
S2[idx+1] = 1
ans2 += 1
return min(ans1, ans2)
# print(solution([0,1,0,1,0,0,0,0,0,1]))
# print(solution([1,0,0,0,0,1,0]))
# print(solution([1,0,0]))
# print(solution([0,0,1]))
# print(solution([1,0,1]))
# print(solution([0,1,0]))
# print(solution2([0,1,0,1,0,0,0,0,0,1]))
print(solution([0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]))
print(solution2([0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]))
# print(solution2([1,0,0]))
# print(solution2([0,0,1]))
# print(solution2([1,0,1]))
# print(solution2([0,1,0])) | 25.292453 | 57 | 0.396121 | [
"MIT"
] | joshiaj7/CodingChallenges | python3/coin_flip.py | 2,681 | Python |
DEPS = [
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
]
| 18 | 26 | 0.697531 | [
"BSD-3-Clause"
] | 1208460349/depot_tools | recipes/recipe_modules/gerrit/__init__.py | 162 | Python |
# Copyright 2015-2021 SWIM.AI inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swimai.recon._parsers import _OutputMessage
from swimai.recon._writers import _BoolWriter, _IdentWriter, _NumberWriter, _StringWriter, _SlotWriter, _ReconWriter, \
_AttrWriter, _BlockWriter
from swimai.structures import Text, Slot, Attr, Num, Bool
from swimai.structures._structs import _Extant, _Absent, _Record
from test.utils import CustomItem
class TestWriters(unittest.TestCase):
def test_ident_writer_str(self):
# Given
message = 'hello'
# When
actual = _IdentWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('hello', actual._value)
def test_ident_writer_empty(self):
# Given
message = ''
# When
actual = _IdentWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('', actual._value)
def test_bool_writer_true(self):
# Given
message = True
# When
actual = _BoolWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('true', actual._value)
def test_bool_writer_false(self):
# Given
message = False
# When
actual = _BoolWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('false', actual._value)
def test_bool_writer_none(self):
# Given
message = None
# When
actual = _BoolWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('false', actual._value)
def test_number_writer_zero(self):
# Given
message = 0
# When
actual = _NumberWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('0', actual._value)
def test_number_writer_int(self):
# Given
message = 25
# When
actual = _NumberWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('25', actual._value)
def test_number_writer_float(self):
# Given
message = 0.02
# When
actual = _NumberWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('0.02', actual._value)
def test_number_writer_none(self):
# Given
message = None
# When
actual = _NumberWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('', actual._value)
def test_string_writer_str(self):
# Given
message = 'This is dog'
# When
actual = _StringWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('"This is dog"', actual._value)
def test_string_writer_empty(self):
# Given
message = None
# When
actual = _StringWriter._write(message)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('""', actual._value)
def test_slot_writer_existing_key_and_value(self):
# Given
key = Text.create_from('animal')
value = Text.create_from('dog')
writer = _ReconWriter()
# When
actual = _SlotWriter._write(key=key, writer=writer, value=value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('animal:dog', actual._value)
def test_slot_writer_existing_key_missing_value(self):
# Given
key = Text.create_from('animal')
writer = _ReconWriter()
# When
actual = _SlotWriter._write(key=key, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('animal:', actual._value)
def test_slot_writer_missing_key_existing_value(self):
# Given
value = Text.create_from('dog')
writer = _ReconWriter()
# When
actual = _SlotWriter._write(value=value, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(':dog', actual._value)
def test_slot_writer_missing_key_and_value(self):
# Given
writer = _ReconWriter()
# When
actual = _SlotWriter._write(writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(':', actual._value)
def test_attr_writer_existing_key_and_value_text(self):
# Given
key = Text.create_from('bird')
value = Text.create_from('chirp')
writer = _ReconWriter()
# When
actual = _AttrWriter._write(key=key, writer=writer, value=value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@bird(chirp)', actual._message)
def test_attr_writer_existing_key_and_value_slot(self):
# Given
key = Text.create_from('animal')
value = _Record.create()
value.add(Slot.create_slot(Text.create_from('dog'), Text.create_from('bark')))
writer = _ReconWriter()
# When
actual = _AttrWriter._write(key=key, writer=writer, value=value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@animal(dog:bark)', actual._message)
def test_attr_writer_missing_key_existing_value(self):
# Given
value = Text.create_from('chirp')
writer = _ReconWriter()
# When
actual = _AttrWriter._write(writer=writer, value=value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@(chirp)', actual._message)
def test_attr_writer_existing_key_missing_value(self):
# Given
key = Text.create_from('bird')
writer = _ReconWriter()
# When
actual = _AttrWriter._write(key=key, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@bird', actual._message)
def test_attr_writer_existing_key_extant_value(self):
# Given
key = Text.create_from('bird')
value = _Extant._get_extant()
writer = _ReconWriter()
# When
actual = _AttrWriter._write(key=key, writer=writer, value=value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@bird', actual._message)
def test_attr_writer_missing_key_and_value(self):
# Given
writer = _ReconWriter()
# When
actual = _AttrWriter._write(writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@', actual._message)
def test_block_writer_attr(self):
# Given
items = list()
items.append(Attr.create_attr(Text.create_from('dog'), Text.create_from('bark')))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@dog(bark)', actual._message)
def test_block_writer_text_single(self):
# Given
items = list()
items.append(Text.create_from('Dead parrot'))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('"Dead parrot"', actual._message)
def test_block_writer_text_multiple(self):
# Given
items = list()
items.append(Text.create_from('foo_'))
items.append(Text.create_from('bar'))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('foo_bar', actual._message)
def test_block_writer_slot_single_first(self):
# Given
items = list()
items.append(Slot.create_slot(Text.create_from('cat'), Text.create_from('meow')))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=True)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('cat:meow', actual._message)
def test_block_writer_slot_single_not_first(self):
# Given
items = list()
items.append(Slot.create_slot(Text.create_from('cat'), Text.create_from('meow')))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=False)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(',cat:meow', actual._message)
def test_block_writer_slot_multiple(self):
# Given
items = list()
items.append(Slot.create_slot(Text.create_from('dog'), Text.create_from('bark')))
items.append(Slot.create_slot(Text.create_from('cat'), Text.create_from('meow')))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=True)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('dog:bark,cat:meow', actual._message)
def test_block_writer_slot_in_attr(self):
# Given
items = list()
record_map = _Record.create()
record_map.add(Slot.create_slot(Text.create_from('cat'), Text.create_from('meow')))
items.append(Attr.create_attr(Text.create_from('animal'), record_map))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=True)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@animal(cat:meow)', actual._message)
def test_block_writer_slot_in_attr_and_slot(self):
# Given
items = list()
record_map = _Record.create()
record_map.add(Slot.create_slot(Text.create_from('dog'), Text.create_from('bark')))
items.append(Attr.create_attr(Text.create_from('animal'), record_map))
items.append(Slot.create_slot(Text.create_from('cat'), Text.create_from('meow')))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=True)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@animal(dog:bark){cat:meow}', actual._message)
def test_block_writer_multiple_attributes(self):
# Given
items = list()
record_map = _Record.create()
dog_map = _Record.create()
dog_map.add(Slot.create_slot(Text.create_from('dog'), Text.create_from('bark')))
record_map.add(Attr.create_attr(Text.create_from('Animal'), dog_map))
cat_map = _Record.create()
cat_map.add(Slot.create_slot(Text.create_from('cat'), Text.create_from('meow')))
record_map.add(Attr.create_attr(Text.create_from('Animal'), cat_map))
bird_map = _Record.create()
bird_map.add(Slot.create_slot(Text.create_from('bird'), Text.create_from('chirp')))
record_map.add(Attr.create_attr(Text.create_from('Animal'), bird_map))
items.append(record_map)
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=True)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@Animal(dog:bark)@Animal(cat:meow)@Animal(bird:chirp)', actual._message)
def test_block_writer_nested_attributes(self):
# Given
items = list()
name_record = _Record.create()
name_record.add(Slot.create_slot(Text.create_from('Name'), Text.create_from('Collie')))
breed_record = _Record.create()
breed_record.add(Attr.create_attr(Text.create_from('Breed'), name_record))
dog_record = _Record.create()
dog_record.add(Slot.create_slot(Text.create_from('Dog'), breed_record))
species_record = _Record.create()
species_record.add(Attr.create_attr(Text.create_from('Species'), dog_record))
animals_record = _Record.create()
animals_record.add(Slot.create_slot(Text.create_from('Animals'), species_record))
items.append(Attr.create_attr(Text.create_from('Zoo'), animals_record))
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer, first=True)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('@Zoo(Animals:@Species(Dog:@Breed(Name:Collie)))', actual._message)
def test_block_writer_empty(self):
# Given
items = list()
writer = _ReconWriter()
# When
actual = _BlockWriter._write(items, writer=writer)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual('', actual._message)
def test_write_record_single(self):
# Given
record = _Record.create()
record.add(Text.create_from('Dog'))
writer = _ReconWriter()
# When
actual = writer._write_record(record)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(3, actual._size)
self.assertEqual('Dog', actual._value)
self.assertEqual('g', actual._last_char)
def test_write_record_multiple(self):
# Given
record = _Record.create()
record.add(Text.create_from('Dog'))
record.add(Text.create_from('Cat'))
writer = _ReconWriter()
# When
actual = writer._write_record(record)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(6, actual._size)
self.assertEqual('DogCat', actual._value)
self.assertEqual('t', actual._last_char)
def test_write_record_empty(self):
# Given
record = _Record.create()
writer = _ReconWriter()
# When
actual = writer._write_record(record)
# Then
self.assertIsNone(actual)
def test_write_value_record(self):
# Given
record = _Record.create()
record.add(Slot.create_slot(Text.create_from('Cow'), Text.create_from('Moo')))
writer = _ReconWriter()
# When
actual = writer._write_value(record)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(7, actual._size)
self.assertEqual('Cow:Moo', actual._value)
self.assertEqual('o', actual._last_char)
def test_write_value_text_ident(self):
# Given
ident = Text.create_from('Duck')
writer = _ReconWriter()
# When
actual = writer._write_value(ident)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(4, actual._size)
self.assertEqual('Duck', actual._value)
self.assertEqual('k', actual._last_char)
def test_write_value_text_string(self):
# Given
string = Text.create_from('$duck')
writer = _ReconWriter()
# When
actual = writer._write_value(string)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(7, actual._size)
self.assertEqual('"$duck"', actual._value)
self.assertEqual('"', actual._last_char)
def test_write_value_number(self):
# Given
number = Num.create_from(-13.1)
writer = _ReconWriter()
# When
actual = writer._write_value(number)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(5, actual._size)
self.assertEqual('-13.1', actual._value)
self.assertEqual('1', actual._last_char)
def test_write_value_bool(self):
# Given
boolean = Bool.create_from(False)
writer = _ReconWriter()
# When
actual = writer._write_value(boolean)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(5, actual._size)
self.assertEqual('false', actual._value)
self.assertEqual('e', actual._last_char)
def test_write_value_absent(self):
# Given
absent = _Absent._get_absent()
writer = _ReconWriter()
# When
actual = writer._write_value(absent)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(0, actual._size)
self.assertEqual('', actual._value)
def test_write_slot(self):
# Given
key = Text.create_from('Hello')
value = Text.create_from('Friend')
writer = _ReconWriter()
# When
actual = writer._write_slot(key, value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(12, actual._size)
self.assertEqual('Hello:Friend', actual._value)
self.assertEqual('d', actual._last_char)
def test_write_attr(self):
# Given
key = Text.create_from('Hello')
value = Text.create_from('Friend')
writer = _ReconWriter()
# When
actual = writer._write_attr(key, value)
# Then
self.assertIsInstance(actual, _OutputMessage)
self.assertEqual(14, actual._size)
self.assertEqual('@Hello(Friend)', actual._value)
self.assertEqual(')', actual._last_char)
def test_write_item_attr(self):
# Given
item = Attr.create_attr(Text.create_from('Cat'), Text.create_from('Meow'))
writer = _ReconWriter()
# When
actual = writer._write_item(item)
# Then
self.assertIsInstance(actual, str)
self.assertEqual('@Cat(Meow)', actual)
def test_write_item_slot(self):
# Given
item = Slot.create_slot(Text.create_from('Age'), Num.create_from(32))
writer = _ReconWriter()
# When
actual = writer._write_item(item)
# Then
self.assertIsInstance(actual, str)
self.assertEqual('Age:32', actual)
def test_write_item_value(self):
# Given
item = Text.create_from('Horse#')
writer = _ReconWriter()
# When
actual = writer._write_item(item)
# Then
self.assertIsInstance(actual, str)
self.assertEqual('"Horse#"', actual)
def test_write_item_invalid(self):
# Given
item = CustomItem()
writer = _ReconWriter()
# When
with self.assertRaises(TypeError) as error:
writer._write_item(item)
# Then
message = error.exception.args[0]
self.assertEqual('No Recon serialization for CustomItem!', message)
| 34.926259 | 119 | 0.63546 | [
"Apache-2.0"
] | DobromirM/swim-system-python | test/recon/test_writers.py | 19,419 | Python |
from django.contrib.gis import admin
from django import forms
from django.utils.translation import gettext_lazy as _
from treebeard.forms import movenodeform_factory
from froide.helper.admin_utils import ForeignKeyFilter
from froide.helper.forms import get_fk_raw_id_widget
from .models import GeoRegion
class GeoRegionAdminForm(movenodeform_factory(GeoRegion)):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
widget = get_fk_raw_id_widget(GeoRegion, admin.site, field_name='id')
self.fields['_ref_node_id'] = forms.IntegerField(
required=False, label=_("Relative to"),
widget=widget
)
@classmethod
def mk_dropdown_tree(cls, model, for_node=None):
return []
class GeoRegionMixin(object):
form = GeoRegionAdminForm
search_fields = ['name', 'region_identifier']
list_display = ('name', 'kind', 'kind_detail', 'region_identifier')
list_filter = (
'kind', 'kind_detail',
('part_of', ForeignKeyFilter),
)
raw_id_fields = ('part_of',)
readonly_fields = ('depth', 'numchild', 'path')
class GeoRegionAdmin(GeoRegionMixin, admin.GeoModelAdmin):
pass
admin.site.register(GeoRegion, GeoRegionAdmin)
| 26.617021 | 77 | 0.704237 | [
"MIT"
] | krmax44/froide | froide/georegion/admin.py | 1,251 | Python |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github pathlib-like util."""
import dataclasses
import functools
import os
import pathlib
import posixpath
from typing import Iterator, Mapping, MutableMapping, Optional, Set, Tuple
import requests
from tensorflow_datasets.core import utils
JsonValue = utils.JsonValue
_URI_PREFIX = 'github://'
def _get_token():
# Get the secret API token to avoid the 60 calls/hour limit
# To get the current quota or test the token:
# curl -H "Authorization: token ${GITHUB_TOKEN}" https://api.github.com/rate_limit # pylint: disable=line-too-long
return os.environ.get('GITHUB_TOKEN')
def get_content(url: str) -> bytes:
resp = requests.get(url)
if resp.status_code != 200:
raise FileNotFoundError(f'Request failed for {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}')
return resp.content
class GithubApi:
"""Class to issue calls to the Github API."""
def __init__(self, token: Optional[str] = None):
self._token = token or _get_token()
def query(self, url: str) -> JsonValue:
"""Launches a Github API query and returns the result."""
headers = {}
if self._token:
headers['Authorization'] = f'token {self._token}'
resp = requests.get(url, headers=headers)
if resp.status_code != 200:
raise FileNotFoundError(
f'Request failed:\n'
f' Request: {url}\n'
f' Error: {resp.status_code}\n'
f' Reason: {resp.content}',)
return resp.json()
def query_tree(self, repo: str, branch: str) -> JsonValue:
"""Queries a repository tree.
See https://docs.github.com/en/rest/reference/git#trees
Args:
repo: the repository
branch: the branch for which to get the tree
Returns:
JSON dict with the tree.
"""
url = f'https://api.github.com/repos/{repo}/git/trees/{branch}?recursive=1'
return self.query(url)
def _correct_folder(folder: str) -> str:
"""Ensures the folder follows a standard.
Pathlib.parent in the root folder results in '.', whereas in other places
we should use '' for the root folder. This function makes sure the root
folder is always empty string.
Args:
folder: the folder to be corrected.
Returns:
The corrected folder.
"""
if folder == '.':
return ''
return folder
def _get_parent_folder(path: pathlib.PurePosixPath) -> str:
return _correct_folder(os.fspath(path.parent))
@dataclasses.dataclass(frozen=True)
class _GithubElement:
"""Representation of an element in a Github tree (a file or folder).
Attributes:
parent_folder: the folder in which this element resides.
name: the name of this element, e.g. the file name or the folder name.
is_folder: whether this element is a folder or not.
"""
parent_folder: str
name: str
is_folder: bool
@classmethod
def from_path(cls, path: pathlib.PurePosixPath,
is_folder: bool) -> '_GithubElement':
parent_folder = _get_parent_folder(path)
name = path.name
return cls(parent_folder=parent_folder, name=name, is_folder=is_folder)
@dataclasses.dataclass(frozen=True)
class _GithubTree:
"""A Github tree of a repository."""
files_per_folder: Mapping[str, Set[_GithubElement]]
def is_folder(self, path: str) -> bool:
return _correct_folder(path) in self.files_per_folder
def is_file(self, path: pathlib.PurePosixPath) -> bool:
parent_folder = _get_parent_folder(path)
files = self.files_per_folder.get(parent_folder)
if not files:
return False
file = _GithubElement(
parent_folder=parent_folder, name=path.name, is_folder=False)
return file in files
@classmethod
def from_json(cls, value) -> '_GithubTree':
"""Parses a GithubTree from the given JSON."""
if not isinstance(value, dict) or 'tree' not in value:
raise ValueError(f'Github API response not supported: {value}')
files_per_folder: MutableMapping[str, Set[str]] = {}
for element in value['tree']:
github_element = _GithubElement.from_path(
path=pathlib.PurePosixPath(element['path']),
is_folder=(element['type'] == 'tree'))
if element['type'] in {'blob', 'tree'}:
files_per_folder.setdefault(github_element.parent_folder, set())
files_per_folder[github_element.parent_folder].add(github_element)
return _GithubTree(files_per_folder=files_per_folder)
@staticmethod
@functools.lru_cache(maxsize=None)
def from_cache(repo: str, branch: str) -> '_GithubTree':
"""Factory which caches the entire Github tree."""
tree_json = GithubApi().query_tree(repo, branch)
# If the tree is truncated, then we'll need a more sophisticated method to
# retrieve the whole tree. Since this is currently not supported, it raises
# an exception.
assert not tree_json.get('truncated', False)
return _GithubTree.from_json(tree_json)
@dataclasses.dataclass(frozen=True, eq=True)
class _PathMetadata:
"""Github metadata of a file or directory."""
path: str
repo: str # e.g. `tensorflow/datasets`
branch: str # e.g. `master`
subpath: str # e.g 'core/__init__.py'
@classmethod
def from_path(cls, path: str) -> '_PathMetadata':
repo, branch, subpath = _parse_github_path(path)
return cls(path=path, repo=repo, branch=branch, subpath=subpath)
@utils.register_pathlike_cls(_URI_PREFIX)
class GithubPath(pathlib.PurePosixPath):
"""`pathlib.Path` like object for manipulating Github paths.
Example:
```
path = GithubPath.from_repo('tensorflow/datasets')
path = path / 'docs' / 'catalog'
assert path.is_dir()
datasets = [
p.name for p in path.iterdir() if p.match('*.md')
]
path = GithubPath('github://tensorflow/datasets/tree/master/docs/README.md')
assert path.subpath == 'docs/README.md'
assert path.repo == 'tensorflow/datasets'
assert path.branch == 'master'
```
"""
def __new__(cls, *parts: utils.PathLike) -> 'GithubPath':
full_path = '/'.join(os.fspath(p) for p in parts)
_parse_github_path(full_path)
return super().__new__(cls, full_path.replace(_URI_PREFIX, '/github/', 1))
@utils.memoized_property
def _path_str(self) -> str:
return posixpath.join(_URI_PREFIX, *self.parts[2:])
def __fspath__(self) -> str:
return self._path_str
def __str__(self) -> str: # pylint: disable=invalid-str-returned
return self._path_str
@classmethod
def from_repo(cls, repo: str, branch: str = 'master') -> 'GithubPath':
"""Factory to creates a GithubPath from a repo name.
Args:
repo: Repo name (e.g. `tensorflow/datasets`)
branch: Branch name (e.g. `master`, 'v1.2.0', '0d240e8b85c'). Default to
master.
Returns:
github_path: The repository root dir at head
"""
return cls(f'github://{repo}/tree/{branch}')
@utils.memoized_property
def _metadata(self) -> _PathMetadata:
return _PathMetadata.from_path(os.fspath(self))
@property
def subpath(self) -> str:
"""The inner path (e.g. `core/__init__.py`)."""
return self._metadata.subpath
@property
def repo(self) -> str:
"""The repository identifier (e.g. `tensorflow/datasets`)."""
return self._metadata.repo
@property
def branch(self) -> str:
"""The branch (e.g. `master`, `v2`, `43bbad116df`,...)."""
return self._metadata.branch
@property
def github_tree(self) -> _GithubTree:
return _GithubTree.from_cache(self.repo, self.branch)
def as_raw_url(self) -> str:
"""Returns the raw content url (https://raw.githubusercontent.com)."""
return ('https://raw.githubusercontent.com/'
f'{self.repo}/{self.branch}/{self.subpath}')
def as_human_friendly_url(self) -> str:
"""Returns the human friendly url."""
return f'https://github.com/{self.repo}/blob/{self.branch}/{self.subpath}'
def iterdir(self) -> Iterator['GithubPath']:
"""Yields the sub-paths."""
if not self.is_dir():
raise NotADirectoryError(f'{self.subpath} is not a directory.')
for filename in self.github_tree.files_per_folder[self.subpath]:
yield self / filename.name
def is_dir(self) -> bool:
"""Returns True if the path is a directory or submodule."""
return self.github_tree.is_folder(self.subpath)
def is_file(self) -> bool:
"""Returns True if the path is a file."""
return self.github_tree.is_file(pathlib.PurePosixPath(self.subpath))
def exists(self) -> bool:
"""Returns True if the path exists."""
return self.is_dir() or self.is_file()
def read_bytes(self) -> bytes:
"""Returns the file content as bytes."""
# As the content is fetched during the Github API calls, we could cache it
# and return it directly here, rather than using an additional query.
# However this might have significant memory impact if many `GithubPath`
# are used, so would require some additional cleanup (weakref ?).
# Using raw_url doesn't count in the API calls quota and should works with
# arbitrary sized files.
url = self.as_raw_url()
return get_content(url)
def read_text(self, encoding: Optional[str] = None) -> str:
"""Returns the file content as string."""
return self.read_bytes().decode(encoding=encoding or 'utf-8')
def copy(
self,
dst: utils.PathLike,
overwrite: bool = False,
) -> utils.ReadWritePath:
"""Copy the current file to the given destination.
Args:
dst: Target file. It can be any PathLike compatible path (e.g. `gs://...`)
overwrite: Whether the file should be overwritten or not
Returns:
The new created file.
Raises:
FileExistsError: If `overwrite` is false and destination exists.
"""
dst = utils.as_path(dst)
if not overwrite and dst.exists():
raise FileExistsError(f'Cannot copy {self}. Destination {dst} exists.')
# Otherwise, copy src to dst
dst.write_bytes(self.read_bytes())
return dst
def _parse_github_path(path: str) -> Tuple[str, str, str]:
"""Parse the absolute github path.
Args:
path: The full github path.
Returns:
repo: The repository identifiant.
branch: Repository branch.
subpath: The inner path.
Raises:
ValueError: If the path is invalid
"""
err_msg = (f'Invalid github path: {path}. Expected format: '
'`github://<owner>/<name>/tree/<branch>[/<path>]`.')
if not path.startswith(_URI_PREFIX):
raise ValueError(err_msg)
if path.endswith('/'):
raise ValueError(err_msg + ' Trailing `/` not supported.')
parts = path[len(_URI_PREFIX):].split('/')
if len(parts) < 4:
raise ValueError(err_msg)
# 'tensorflow', 'datasets', 'tree', 'master', ...
owner, repo, tree, branch, *subpath = parts
if tree != 'tree':
raise ValueError(err_msg + '. `/blob/` isn\'t accepted. Only `/tree/`.')
return f'{owner}/{repo}', branch, '/'.join(subpath)
| 31.941504 | 117 | 0.680736 | [
"Apache-2.0"
] | YangDong2002/datasets | tensorflow_datasets/core/github_api/github_path.py | 11,467 | Python |
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
#
# Copyright (c) 2015 Juniper Networks, Inc.
# All rights reserved.
#
# Use is subject to license terms.
#
# Licensed under the Apache License, Version 2.0 (the ?License?); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import re
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from common.lib import imageUtils
from common.lib import libvirtUtils
from common.lib import openstackUtils
from common.lib import osUtils
from images.models import Image
from images.models import ImageBlankForm
from images.models import ImageForm
from images.models import ImageLocalForm
from wistar import configuration
from wistar import settings
logger = logging.getLogger(__name__)
def index(request):
image_list = imageUtils.get_local_image_list()
context = {'image_list': image_list}
return render(request, 'images/index.html', context)
def edit(request, image_id):
image = get_object_or_404(Image, pk=image_id)
# template = get_object_or_404(ConfigTemplate, pk=template_id)
# template_form = ConfigTemplateForm(instance=template)
image_form = ImageForm(instance=image)
return render(request, 'images/edit.html', {'image': image, 'image_form': image_form})
def update(request):
if "name" in request.POST:
image_id = request.POST["image_id"]
image_name = request.POST["name"]
image_description = request.POST["description"]
image_type = request.POST["type"]
image = get_object_or_404(Image, pk=image_id)
image.name = image_name
image.description = image_description
image.type = image_type
image.save()
messages.info(request, "Image updated")
return HttpResponseRedirect('/images/')
else:
if "image_id" in request.POST:
image_id = request.POST["image_id"]
image = get_object_or_404(Image, pk=image_id)
messages.info(request, "Image updated")
return render(request, 'edit.html', {'image': image})
else:
messages.info(request, "Could not update image! No name or ID found in request!")
return HttpResponseRedirect('/images/')
def new(request):
image_form = ImageForm()
context = {'image_form': image_form, "vm_types": configuration.vm_image_types}
return render(request, 'images/new.html', context)
def create(request):
try:
logger.debug('---- Create Image ----')
image_form = ImageForm(request.POST, request.FILES)
if not image_form.is_valid():
logger.error("Could not save image for some reason!")
context = {'image_form': image_form}
return render(request, 'images/new.html', context)
# if not osUtils.checkPath(image_form.cleaned_data['path']):
# logger.debug("PATH DOESN'T EXIST")
# context = {'error' : "PATH DOESNT EXIST"}
# return render(request, 'error.html', context)
logger.debug("Saving form")
orig_image = image_form.save()
messages.info(request, "Image uploaded successfully")
image_type = request.POST["type"]
image_name = request.POST["name"]
full_path = orig_image.filePath.path
if re.match(".*\.vmdk$", full_path):
# we need to convert this for KVM based deployments!
converted_image_path = re.sub("\.vmdk$", ".qcow2", full_path)
converted_image_file_name = converted_image_path.split('/')[-1]
if osUtils.convert_vmdk_to_qcow2(full_path, converted_image_path):
logger.info("Converted vmdk image to qcow2!")
orig_image.filePath = "user_images/%s" % converted_image_file_name
orig_image.save()
logger.debug("Removing original vmdk")
osUtils.remove_instance(full_path)
else:
logger.error("Could not convert vmdk!")
if image_type == "junos_vre_15" and "jinstall64-vmx-15.1" in full_path:
logger.debug("Creating RIOT image for Junos vMX 15.1")
# lets replace the last "." with "_riot."
if '.' in full_path:
new_image_path = re.sub(r"(.*)\.(.*)$", r"\1_riot.\2", full_path)
else:
# if there is no '.', let's just add one
new_image_path = full_path + "_riot.img"
new_image_file_name = new_image_path.split('/')[-1]
new_image_name = image_name + ' Riot PFE'
if osUtils.copy_image_to_clone(full_path, new_image_path):
logger.debug("Copied from %s" % full_path)
logger.debug("Copied to %s" % new_image_path)
image = Image()
image.name = new_image_name
image.type = "junos_riot"
image.description = orig_image.description + "\nRiot PFE"
image.filePath = "user_images/" + new_image_file_name
image.save()
return HttpResponseRedirect('/images')
except Exception as e:
logger.error(e)
messages.info(request, "Could not create image!")
return HttpResponseRedirect('/images/')
def blank(request):
image_form = ImageBlankForm()
context = {'image_form': image_form}
return render(request, 'images/new_blank.html', context)
def local(request):
image_form = ImageLocalForm()
context = {'image_form': image_form}
return render(request, 'images/new_local.html', context)
def create_blank(request):
image_form = ImageBlankForm(request.POST)
logger.debug(image_form)
logger.debug(str(image_form))
if image_form.is_valid():
name = request.POST["name"]
size = request.POST["size"]
description = request.POST["description"]
file_path = 'user_images/' + name
if ".img" not in file_path:
file_path += ".img"
full_path = settings.MEDIA_ROOT + "/" + file_path
if osUtils.create_blank_image(full_path, size + 'G'):
image = Image()
image.description = description
image.name = name
image.filePath = file_path
image.type = 'blank'
image.save()
# if not osUtils.checkPath(image_form.cleaned_data['path']):
# logger.debug("PATH DOESN'T EXIST")
# context = {'error' : "PATH DOESNT EXIST"}
# return render(request, 'error.html', context)
logger.debug("Saving form")
# image_form.save()
return HttpResponseRedirect('/images')
else:
context = {'image_form': image_form}
return render(request, 'images/new_blank.html', context)
def create_local(request):
name = request.POST["name"]
file_path = request.POST["filePath"]
description = request.POST["description"]
image_type = request.POST["type"]
try:
imageUtils.create_local_image(name, description, file_path, image_type)
except Exception as e:
context = {'error': str(e)}
return render(request, 'error.html', context)
messages.info(request, "Image Created!")
return HttpResponseRedirect('/images')
def block_pull(request, uuid):
domain = libvirtUtils.get_domain_by_uuid(uuid)
domain_name = domain.name()
image_path = libvirtUtils.get_image_for_domain(domain.UUIDString())
if osUtils.is_image_thin_provisioned(image_path):
logger.debug("Found thinly provisioned image, promoting...")
rv = libvirtUtils.promote_instance_to_image(domain_name)
if rv is None:
messages.info(request, "Image already promoted. Shut down the instance to perform a clone.")
elif rv:
messages.info(request, "Promoting thinly provisioned image")
else:
messages.info(request, "Error Promoting image!")
else:
messages.info(request, "Image is already promoted. You may now shutdown the image and perform a Clone")
logger.debug("Image is already promoted")
return HttpResponseRedirect('/ajax/manageHypervisor/')
def create_from_instance(request, uuid):
logger.debug("Creating new image from instance")
domain = libvirtUtils.get_domain_by_uuid(uuid)
logger.debug("got domain " + domain.name())
domain_image = libvirtUtils.get_image_for_domain(uuid)
logger.debug("got domain_image: " + domain_image)
if osUtils.is_image_thin_provisioned(domain_image):
logger.error("Cannot clone disk that is thinly provisioned! Please perform a block pull before continuing")
context = {'error': "Cannot Clone thinly provisioned disk! Please perform a block pull!"}
return render(request, 'error.html', context)
domain_name = domain.name()
# FIXME - make these variable names a bit more clear about their meaning
# we need to get the path of the image relative to the MEDIA_ROOT
media_root = settings.MEDIA_ROOT
media_root_array = media_root.split("/")
len_media_root = len(media_root_array)
full_path_array = domain_image.split("/")
full_path = "/".join(full_path_array[:full_path_array.index('instances')])
# grab the file path of the domain image without the MEDIA_ROOT prepended
file_path_array = domain_image.split('/')[len_media_root:]
images_dir = "/".join(file_path_array[:file_path_array.index('instances')])
new_relative_image_path = images_dir + "/image_" + str(domain.UUIDString()) + ".img"
new_full_image_path = full_path + "/image_" + str(domain.UUIDString()) + ".img"
if osUtils.check_path(new_full_image_path):
logger.info("Image has already been cloned")
context = {'error': "Instance has already been cloned!"}
return render(request, 'error.html', context)
logger.debug("Copying image from " + domain_image)
logger.debug("To " + new_full_image_path)
osUtils.copy_image_to_clone(domain_image, new_full_image_path)
image = Image()
image.name = "image_" + str(domain.UUIDString())
image.description = "Clone of " + domain_name
image.filePath = new_relative_image_path
image.save()
return HttpResponseRedirect('/images/')
def detail(request, image_id):
image = get_object_or_404(Image, pk=image_id)
vm_type = "N/A"
for vt in configuration.vm_image_types:
if vt["name"] == image.type:
vm_type = vt["description"]
break
glance_id = ""
image_state = ""
if configuration.deployment_backend == "openstack":
openstackUtils.connect_to_openstack()
glance_id = openstackUtils.get_image_id_for_name(image.name)
elif configuration.deployment_backend == "kvm" and image.filePath != "":
image_state = osUtils.is_image_thin_provisioned(image.filePath.path)
return render(request, 'images/details.html', {'image': image,
'state': image_state,
"vm_type": vm_type,
"settings": settings,
"glance_id": glance_id,
"use_openstack": configuration.use_openstack,
"openstack_host": configuration.openstack_host
})
def glance_detail(request):
"""
OpenStack specific action to get image details from Glance
:param request: HTTPRequest
:return: rendered HTML
"""
required_fields = set(['imageId'])
if not required_fields.issubset(request.POST):
return render(request, 'ajax/ajaxError.html', {'error': "Invalid Parameters in POST"})
image_id = request.POST["imageId"]
image = get_object_or_404(Image, pk=image_id)
if openstackUtils.connect_to_openstack():
glance_id = openstackUtils.get_image_id_for_name(image.name)
glance_json = dict()
if glance_id is not None:
glance_json = openstackUtils.get_glance_image_detail(glance_id)
logger.debug("glance json of %s is" % glance_id)
logger.debug(glance_json)
logger.debug("---")
return render(request, 'images/glance_detail.html', {'image': glance_json,
"image_id": image_id,
"glance_id": glance_id,
"openstack_host": configuration.openstack_host
})
else:
return render(request, 'error.html', {'error': "Could not connect to OpenStack"})
def glance_list(request):
image_list = imageUtils.get_glance_image_list()
context = {'image_list': image_list}
return render(request, 'images/glance_list.html', context)
def delete(request, image_id):
imageUtils.delete_image_by_id(image_id)
messages.info(request, "Image deleted!")
return HttpResponseRedirect('/images/')
def list_glance_images(request):
if openstackUtils.connect_to_openstack():
image_list = openstackUtils.list_glance_images()
context = {'error': image_list}
return render(request, 'error.html', context)
context = {'error': "Could not connect to OpenStack"}
return render(request, 'error.html', context)
def upload_to_glance(request, image_id):
if openstackUtils.connect_to_openstack():
image = get_object_or_404(Image, pk=image_id)
logger.debug("Uploading now!")
if osUtils.check_path(image.filePath.path):
openstackUtils.upload_image_to_glance(image.name, image.filePath.path)
logger.debug("All done")
return HttpResponseRedirect('/images/%s' % image_id)
def import_from_glance(request, glance_id):
"""
Creates a local db entry for the glance image
Everything in Wistar depends on a db entry in the Images table
If you have an existing openstack cluster, you may want to import those
images here without having to physically copy the images to local disk
:param request: HTTPRequest object
:param glance_id: id of the glance image to import
:return: redirect to /images/image_id
"""
if openstackUtils.connect_to_openstack():
image_details = openstackUtils.get_glance_image_detail(glance_id)
image = Image()
image.description = "Imported from Glance"
image.name = image_details["name"]
image.type = 'blank'
image.save()
logger.debug("All done")
return HttpResponseRedirect('/images/%s' % image.id)
context = {'error': "Could not connect to OpenStack"}
return render(request, 'error.html', context)
def error(request):
context = {'error': "Unknown Error"}
return render(request, 'error.html', context)
| 37.132212 | 115 | 0.645433 | [
"Apache-2.0"
] | Juniper/wistar | images/views.py | 15,447 | Python |
# ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# Available under the Microsoft PyKinect 1.0 Alpha license. See LICENSE.txt
# for more information.
#
# ###########################################################################/
"""defines the core data structures used for communicating w/ the Kinect APIs"""
from __future__ import division
from builtins import range
from past.utils import old_div
import ctypes
from ctypes import Array
from pykinect.nui import _NUIDLL
from future.utils import with_metaclass
NUI_SKELETON_COUNT = 6
class _EnumerationType(ctypes.c_int):
"""metaclass for an enumeration like type for ctypes"""
def __new__(metacls, name, bases, dict):
cls = ctypes.c_int.__new__(metacls, name, bases, dict)
for key, value in list(cls.__dict__.items()):
if key.startswith('_') and key.endswith('_'): continue
setattr(cls, key, cls(key, value))
return cls
class _Enumeration(ctypes.c_int):
"""base class for enumerations"""
__metaclass__ = _EnumerationType
def __init__(self, name, value):
self.name = name
ctypes.c_int.__init__(self, value)
def __hash__(self):
return self.value
def __int__(self):
return self.value
def __index__(self):
return self.value
def __repr__(self):
if hasattr(self, 'name'):
return "<%s.%s (%r)>" % (self.__class__.__name__, self.name, self.value)
name = '??'
for x in type(self).__dict__:
if x.startswith('_') and x.endswith('_'): continue
if getattr(self, x, None) == self.value:
name = x
break
return "<%s.%s (%r)>" % (self.__class__.__name__, name, self.value)
def __eq__(self, other):
if type(self) is not type(other):
return self.value == other
return self.value == other.value
def __ne__(self, other):
if type(self) is not type(other):
return self.value != other
return self.value != other.value
class Vector(ctypes.Structure):
"""Represents vector data."""
_fields_ = [('x', ctypes.c_float),
('y', ctypes.c_float),
('z', ctypes.c_float),
('w', ctypes.c_float)
]
def __init__(self, x = 0.0, y = 0.0, z = 0.0, w = 0.0):
self.x = x
self.y = y
self.z = z
self.w = w
def __eq__(self, other):
return (self.x == other.x and
self.y == other.y and
self.z == other.z and
self.w == other.w)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<x=%r, y=%r, z=%r, w=%r>' % (self.x, self.y, self.z, self.w)
class Matrix4(Array):
"""4x4 matrix. Can be accessed using matrix[0,0] ... matrix[3,3] or can be accessed using
matrix.M11 ... matrix.M44 for similarity to .NET and the C data structures. matrix[0,1] is
the same as matrix.M12.
Used to provide bone rotation information.
"""
_length_ = 16
_type_ = ctypes.c_float
def __getitem__(self, index):
return Array.__getitem__(self, index[1] + index[0] * 4)
def __setitem__(self, index, value):
return Array.__setitem__(self, index[1] + index[0] * 4, value)
def get_M11(self): return Array.__getitem__(0)
def set_M11(self, value): Array.__setitem__(0, value)
M11 = property(get_M11, set_M11)
def get_M12(self): return Array.__getitem__(1)
def set_M12(self, value): Array.__setitem__(1, value)
M12 = property(get_M12, set_M12)
def get_M13(self): return Array.__getitem__(2)
def set_M13(self, value): Array.__setitem__(2, value)
M13 = property(get_M13, set_M13)
def get_M14(self): return Array.__getitem__(3)
def set_M14(self, value): Array.__setitem__(3, value)
M14 = property(get_M14, set_M14)
def get_M21(self): return Array.__getitem__(4)
def set_M21(self, value): Array.__setitem__(4, value)
M21 = property(get_M21, set_M21)
def get_M22(self): return Array.__getitem__(5)
def set_M22(self, value): Array.__setitem__(5, value)
M22 = property(get_M22, set_M22)
def get_M23(self): return Array.__getitem__(6)
def set_M23(self, value): Array.__setitem__(6, value)
M23 = property(get_M23, set_M23)
def get_M24(self): return Array.__getitem__(7)
def set_M24(self, value): Array.__setitem__(7, value)
M24 = property(get_M24, set_M24)
def get_M31(self): return Array.__getitem__(8)
def set_M31(self, value): Array.__setitem__(8, value)
M31 = property(get_M31, set_M31)
def get_M32(self): return Array.__getitem__(9)
def set_M32(self, value): Array.__setitem__(9, value)
M32 = property(get_M32, set_M32)
def get_M33(self): return Array.__getitem__(10)
def set_M33(self, value): Array.__setitem__(10, value)
M33 = property(get_M33, set_M33)
def get_M34(self): return Array.__getitem__(11)
def set_M34(self, value): Array.__setitem__(11, value)
M34 = property(get_M34, set_M34)
def get_M41(self): return Array.__getitem__(12)
def set_M41(self, value): Array.__setitem__(12, value)
M41 = property(get_M41, set_M41)
def get_M42(self): return Array.__getitem__(13)
def set_M42(self, value): Array.__setitem__(13, value)
M42 = property(get_M42, set_M42)
def get_M43(self): return Array.__getitem__(14)
def set_M43(self, value): Array.__setitem__(14, value)
M43 = property(get_M43, set_M43)
def get_M44(self): return Array.__getitem__(15)
def set_M44(self, value): Array.__setitem__(15, value)
M44 = property(get_M44, set_M44)
class _NuiLockedRect(ctypes.Structure):
_fields_ = [('pitch', ctypes.c_int32),
('size', ctypes.c_int32),
('bits', ctypes.c_voidp)]
class _NuiSurfaceDesc(ctypes.Structure):
_fields_ = [('width', ctypes.c_uint32),
('height', ctypes.c_uint32)
]
class PlanarImage(ctypes.c_voidp):
"""Represents a video image."""
_BufferLen = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_int32)(3, 'BufferLen')
_Pitch = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_int32)(4, 'Pitch')
_LockRect = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint, ctypes.POINTER(_NuiLockedRect), ctypes.c_voidp, ctypes.c_uint32)(5, '_LockRect')
_GetLevelDesc = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.POINTER(_NuiSurfaceDesc))(6, '_GetLevelDesc')
_UnlockRect = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32)(7, '_UnlockRect')
@property
def width(self):
desc = _NuiSurfaceDesc()
PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc))
return desc.width
@property
def height(self):
desc = _NuiSurfaceDesc()
PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc))
return desc.height.value
@property
def bytes_per_pixel(self):
return old_div(self.pitch, self.width)
@property
def bits(self):
buffer = (ctypes.c_byte * self.buffer_length)()
self.copy_bits(buffer)
return buffer
def copy_bits(self, dest):
"""copies the bits of the image to the provided destination address"""
desc = _NuiSurfaceDesc()
PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc))
rect = _NuiLockedRect()
PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0)
ctypes.memmove(dest, rect.bits, desc.height * rect.pitch)
PlanarImage._UnlockRect(self, 0)
@property
def buffer_length(self):
return self.width * self.height * self.bytes_per_pixel
@property
def pitch(self):
rect = _NuiLockedRect()
PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0)
res = rect.pitch
PlanarImage._UnlockRect(self, 0)
return res
class ImageType(_Enumeration):
"""Specifies an image type. """
depth_and_player_index = DepthAndPlayerIndex = 0 # USHORT
color = Color = 1 # RGB32 data
color_yuv = ColorYuv = 2 # YUY2 stream from camera h/w, but converted to RGB32 before user getting it.
color_yuv_raw = ColorYuvRaw = 3 # YUY2 stream from camera h/w.
depth = Depth = 4 # USHORT
class ImageResolution(_Enumeration):
"""Specifies image resolution."""
invalid = Invalid = -1
resolution_80x60 = Resolution80x60 = 0
resolution_320x240 = Resolution320x240 = 1
resolution_640x480 = Resolution640x480 = 2
resolution_1280x1024 = Resolution1280x1024 = 3 # for hires color only
class SkeletonTracking(_Enumeration):
suppress_no_frame_data = 0x00000001 # Prevents NuiSkeletonGetNextFrame from returning E_NUI_FRAME_NO_DATA errors. Instead, calls to NuiSkeletonGetNextFrame block until data is available or the timeout period passes.
title_sets_tracked_skeletons = 0x00000002 # Disables the default player selection mode and enables the title to manage which players have tracked skeletons.
enable_seated_support = 0x00000004 # Uses seated skeleton tracking mode. The 10 lower-body joints of each skeleton will not be tracked.
enable_in_near_range = 0x00000008
class ImageDigitalZoom(_Enumeration):
"""Specifies the zoom factor."""
zoom_1x = Zoom1x = 0 # A zoom factor of 1.0.
zoom_2x = Zoom2x = 1 # A zoom factor of 2.0.
class ImageViewArea(ctypes.Structure):
"""Specifies the image view area. """
_fields_ = [('Zoom', ctypes.c_int), # An ImageDigitalZoom value that specifies the zoom factor.
('CenterX', ctypes.c_long), # The horizontal offset from center, for panning.
('CenterY', ctypes.c_long) # The vertical offset from center, for panning.
]
def get_zoom(self):
return self.Zoom
def set_zoom(self, value):
self.Zoom = value
zoom = property(get_zoom, set_zoom)
def get_center_x(self):
return self.CenterX
def set_center_x(self, value):
self.CenterX = value
def get_center_y(self):
return self.CenterY
center_x = property(get_center_x, set_center_x)
def set_center_y(self, value):
self.CenterY = value
center_y = property(get_center_y, set_center_y)
class ImageFrame(ctypes.Structure):
_fields_ = [('timestamp', ctypes.c_longlong), # The timestamp (in milliseconds) of the most recent frame. The clock starts when you call Initialize.
('frame_number', ctypes.c_uint32), # Returns the frame number
('type', ImageType), # An ImageType value that specifies the image type.
('resolution', ImageResolution), # An ImageResolution value that specifies the image resolution.
('image', PlanarImage), # A PlanarImage object that represents the image.
('flags', ctypes.c_uint32), # flags, not used
('view_area', ImageViewArea), # An ImageViewArea value that specifies the view area.
]
class JointId(_Enumeration):
"""Specifies the various skeleton joints. """
hip_center = HipCenter = 0
spine = Spine = 1
shoulder_center = ShoulderCenter = 2
head = Head = 3
shoulder_left = ShoulderLeft = 4
elbow_left = ElbowLeft = 5
wrist_left = WristLeft = 6
hand_left = HandLeft = 7
shoulder_right = ShoulderRight = 8
elbow_right = ElbowRight = 9
wrist_right = WristRight = 10
hand_right = HandRight = 11
hip_left = HipLeft = 12
knee_left = KneeLeft = 13
ankle_left = AnkleLeft = 14
foot_left = FootLeft = 15
hip_right = HipRight = 16
knee_right = KneeRight = 17
ankle_right = AnkleRight = 18
foot_right = FootRight = 19
count = Count = 20
class SkeletonBoneRotation(ctypes.Structure):
_fields_ = [('rotation_matrix', Matrix4),
('rotation_quaternion', Vector)]
def __repr__(self):
return '<SkeletonBoneRotation(%r, %r)>' % (self.rotation_matrix, self.rotation_quaternion)
class SkeletonBoneOrientation(ctypes.Structure):
_fields_ = [('end_joint', JointId),
('start_joint', JointId),
('hierarchical_rotation', SkeletonBoneRotation),
('absolute_rotation', SkeletonBoneRotation),
]
def __repr__(self):
return '<SkeletonBoneOrientation(%r, %r, %r, %r)>' % (self.end_joint, self.start_joint, self.hierarchical_rotation, self.absolute_rotation)
class JointTrackingState(_Enumeration):
"""Specifies the joint tracking state. """
not_tracked = NOT_TRACKED = 0
inferred = INFERRED = 1
tracked = TRACKED = 2
class SkeletonTrackingState(_Enumeration):
"""Specifies a skeleton's tracking state."""
not_tracked = NOT_TRACKED = 0
position_only = POSITION_ONLY = 1
tracked = TRACKED = 2
class SkeletonFrameQuality(_Enumeration):
"""Specifies skeleton frame quality. """
camera_motion = CameraMotion = 0x01
extrapolated_floor = ExtrapolatedFloor = 0x02
upper_body_skeleton = UpperBodySkeleton = 0x04
seated_support_enabled = 0x08
class SkeletonQuality(_Enumeration):
"""Specifies how much of the skeleton is visible. """
clipped_right = ClippedRight = 0x00000001
clipped_left = ClippedLeft = 0x00000002
clipped_top = ClippedTop = 0x00000004
clipped_bottom = ClippedBottom = 0x00000008
NUI_SKELETON_POSITION_COUNT = 20
class SkeletonData(ctypes.Structure):
"""Contains data that characterizes a skeleton."""
_fields_ = [('eTrackingState', SkeletonTrackingState),
('dwTrackingID', ctypes.c_uint32),
('dwEnrollmentIndex', ctypes.c_uint32),
('dwUserIndex', ctypes.c_uint32),
('Position', Vector),
('SkeletonPositions', ctypes.ARRAY(Vector, NUI_SKELETON_POSITION_COUNT)),
('eSkeletonPositionTrackingState', ctypes.ARRAY(JointTrackingState, NUI_SKELETON_POSITION_COUNT)),
('Quality', SkeletonQuality),
]
def get_tracking_state(self):
return self.eTrackingState
def set_tracking_state(self, value):
self.eTrackingState = value
tracking_state = property(get_tracking_state, set_tracking_state)
def get_tracking_id(self):
return self.dwTrackingID
def set_tracking_id(self, value):
self.dwTrackingID = value
tracking_id = property(get_tracking_id, set_tracking_id)
def get_enrollment_index(self):
return self.dwEnrollmentIndex
def set_enrollment_index(self, value):
self.dwEnrollmentIndex = value
enrollment_index = property(get_enrollment_index, set_enrollment_index)
def get_user_index(self):
return self.dwUserIndex
def set_user_index(self, value):
self.dwUserIndex = value
user_index = property(get_user_index, set_user_index)
def get_position(self):
return self.Position
def set_position(self, value):
self.Position = value
position = property(get_position, set_position)
def get_skeleton_positions(self):
return self.SkeletonPositions
def set_skeleton_positions(self, value):
self.SkeletonPositions = value
skeleton_positions = property(get_skeleton_positions, set_skeleton_positions)
def get_skeleton_position_tracking_states(self):
return self.eSkeletonPositionTrackingState
def set_skeleton_position_tracking_states(self, value):
self.eSkeletonPositionTrackingState = value
skeleton_position_tracking_states = property(get_skeleton_position_tracking_states,
set_skeleton_position_tracking_states)
def get_skeleton_quality(self):
return self.Quality
def set_skeleton_quality(self, value):
self.Quality = value
skeleton_quality = property(get_skeleton_quality, set_skeleton_quality)
def calculate_bone_orientations(self):
"""Calculate bone orientations for a skeleton.
The function calculates hierarchical and absolute joint angles for the skeleton, which can
be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy,
and describes an absolute rotation in the right-hand camera coordinate system. All other
joints describe rotations relative to their parent joint orientation. The angles are returned
in the same order as the joints are defined.
Returns a sequence of SkeletonBoneOrientation objects."""
arr = (SkeletonBoneOrientation*JointId.Count)()
_NuiSkeletonCalculateBoneOrientations(self, arr)
return tuple(arr)
def __repr__(self):
return '<Tracking: %r, ID: %r, Position: %r>' % (self.eTrackingState,
self.dwTrackingID,
self.Position)
def __eq__(self, other):
if (self.tracking_state == other.tracking_state and
self.tracking_id == other.tracking_id and
self.enrollment_index == other.enrollment_index and
self.user_index == other.user_index and
self.position == other.position and
self.skeleton_quality == other.skeleton_quality):
for i in range(len(self.skeleton_positions)):
if (self.skeleton_positions[i] != other.skeleton_positions[i] or
self.skeleton_position_tracking_states[i] != other.skeleton_position_tracking_states[i]):
return False
return True
return False
def __ne__(self, other):
return not self.__eq__(other)
def __bool__(self):
return self.tracking_state != SkeletonTrackingState.not_tracked
_NuiSkeletonCalculateBoneOrientations = _NUIDLL.NuiSkeletonCalculateBoneOrientations
_NuiSkeletonCalculateBoneOrientations.argtypes = [ctypes.POINTER(SkeletonData), ctypes.POINTER(SkeletonBoneOrientation)]
_NuiSkeletonCalculateBoneOrientations.restype = ctypes.HRESULT
class SkeletonFrame(ctypes.Structure):
_pack_ = 16
_fields_ = [('liTimeStamp', ctypes.c_longlong),
('dwFrameNumber', ctypes.c_uint32),
('Quality', SkeletonFrameQuality),
('vFloorClipPlane', Vector),
('vNormalToGravity', Vector),
('SkeletonData', ctypes.ARRAY(SkeletonData, NUI_SKELETON_COUNT)),
]
def get_timestamp(self):
return self.liTimeStamp
def set_timestamp(self, value):
self.liTimeStamp = value
timestamp = property(get_timestamp, set_timestamp)
def get_frame_number(self):
return self.dwFrameNumber
def set_frame_number(self, value):
self.dwFrameNumber = value
frame_number = property(get_frame_number, set_frame_number)
def get_quality(self):
return self.Quality
def set_quality(self, value):
self.Quality = value
quality = property(get_quality, set_quality)
def get_floor_clip_plane(self):
return self.vFloorClipPlane
def set_floor_clip_plane(self, value):
self.vFloorClipPlane = value
floor_clip_plane = property(get_floor_clip_plane, set_floor_clip_plane)
def get_normal_to_gravity(self):
return self.vNormalToGravity
def set_normal_to_gravity(self, value):
self.vNormalToGravity = value
normal_to_gravity = property(get_normal_to_gravity, set_normal_to_gravity)
def get_skeleton_data(self):
return self.SkeletonData
def set_skeleton_data(self, value):
self.SkeletonData = value
skeleton_data = property(get_skeleton_data, set_skeleton_data)
class TransformSmoothParameters(ctypes.Structure):
"""Contains transform smoothing parameters. """
_fields_ = [('fSmoothing', ctypes.c_float),
('fCorrection', ctypes.c_float),
('fPrediction', ctypes.c_float),
('fJitterRadius', ctypes.c_float),
('fMaxDeviationRadius', ctypes.c_float)
]
def get_smoothing(self):
return self.fSmoothing
def set_smoothing(self, value):
self.fSmoothing = value
smoothing = property(get_smoothing, set_smoothing)
def get_correction(self):
return self.fCorrection
def set_correction(self, value):
self.fCorrection = value
correction = property(get_correction, set_correction)
def get_prediction(self):
return self.fPrediction
def set_prediction(self, value):
self.fPrediction = value
prediction = property(get_prediction, set_prediction)
def get_jitter_radius(self):
return self.fJitterRadius
def set_jitter_radius(self, value):
self.fJitterRadius = value
jitter_radius = property(get_jitter_radius, set_jitter_radius)
def get_max_deviation_radius(self):
return self.fMaxDeviationRadius
def set_max_deviation_radius(self, value):
self.fMaxDeviationRadius = value
max_deviation_radius = property(get_max_deviation_radius, set_max_deviation_radius)
| 34.632166 | 227 | 0.646696 | [
"Apache-2.0"
] | howieraem/KinectActionDetection | pykinect/nui/structs.py | 21,749 | Python |
import _plotly_utils.basevalidators
class ArrowcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="arrowcolor", parent_name="layout.scene.annotation", **kwargs
):
super(ArrowcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
| 31.071429 | 87 | 0.664368 | [
"MIT"
] | labaran1/plotly.py | packages/python/plotly/plotly/validators/layout/scene/annotation/_arrowcolor.py | 435 | Python |
from sotd_indicators.indicators import *
from arcgis.gis import GIS
import configparser
import time
class Indicator:
def __init__(self):
# GIS Resources
self.pem = None
self.key = None
self.username = None
self.password = None
self.portal = None
self.debug = None
#if GIS2 is specified
self.pub_pem = None
self.pub_key = None
self.pub_username = None
self.pub_password = None
self.pub_portal = None
self.pub_gis_conn = None
# Selection Drivers
self.grid_url = None
self.feat_url = None
# Positional Accuracy
self.poac_sdf = None
self.poac_url = None
# Completeness
self.cmpl_sdf = None
self.cmpl_url = None
# Logical Consistency
self.logc_sdf = None
self.logc_url = None
# Temporal Currency
self.curr_sdf = None
self.curr_url = None
# Thematic Accuracy
self.them_sdf = None
self.them_url = None
# Source Lineage
self.srln_sdf = None
self.srln_url = None
# Values Derived From Set Functions
self.grid_sdf = None
self.grid_wkid = None
self.features = None
self.selected = None
def load_config(self, config_file):
# Read Incoming Config File
config = configparser.ConfigParser()
config.read_file(open(config_file))
for section in config.sections():
print('Loading Section: {}'.format(section))
for k, v in dict(config.items(section)).items():
self.__setattr__(k, v)
def set_gis(self):
if self.pub_password!=None and self.pub_username!=None:
self.pub_gis_conn = GIS(url=self.pub_portal,
username=self.pub_username, password=self.pub_password)
print((self.pub_gis_conn.users.me.role, self.pub_gis_conn.users.me.username))
else:
self.pub_gis_conn = None
if self.key != None and self.pem != None:
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
self.gis_conn = GIS(url=self.portal,
key_file=self.key,
cert_file=self.pem,
verify_cert=False)
print((self.gis_conn.users.me.role, self.gis_conn.users.me.username))
elif self.username != None and self.password != None:
self.gis_conn = GIS(url=self.portal,
username=self.username, password=self.password)
print((self.gis_conn.users.me.role, self.gis_conn.users.me.username))
else:
self.gis_conn = GIS()
def set_grid_sdf(self, lb_days=1000, use_query=False):
if not self.grid_url:
raise Exception('Grid URL Not Set')
else:
if use_query:
dates = get_dates_in_range(lb_days)
grid_fl = FeatureLayer(url=self.grid_url, gis=self.gis_conn)
self.grid_wkid = grid_fl.properties.extent.spatialReference.wkid
self.grid_sdf = grid_fl.query(where=form_query_string(dates)).df
else:
grid_fl = FeatureLayer(url=self.grid_url, gis=self.gis_conn)
self.grid_wkid = grid_fl.properties.extent.spatialReference.wkid
self.grid_sdf = grid_fl.query(return_all_records=False).df
def set_features(self):
df_list = []
for idx, row in enumerate(self.grid_sdf.iterrows()):
geom = Geometry(row[1].SHAPE)
sp_filter = filters.intersects(geom, self.grid_wkid)
data_fl = FeatureLayer(url=self.feat_url, gis=self.gis_conn)
df_list.append(
data_fl.query(geometry_filter=sp_filter, return_all_records=False).df
)
self.features = df_list
def set_selected(self, indicator):
created = False
out_sdf = None
print(len(self.grid_sdf))
for idx, row in enumerate(self.grid_sdf.iterrows()):
if not self.__getattribute__(indicator + '_url'):
df_current = SpatialDataFrame(
columns=field_schema.get(indicator),
geometry=[Geometry(json.loads(row[1].SHAPE.JSON))]
)
created = True
else:
# Negative Buffer to Select a Single Grid Cell
sp_filter = filters.intersects(
Geometry(row[1].SHAPE).buffer(-.1),
self.grid_wkid
)
data_fl = FeatureLayer(
url=self.__getattribute__(indicator + '_url'),
gis=self.gis_conn
)
df_current = data_fl.query(geometry_filter=sp_filter, return_all_records=False).df
if idx == 0:
out_sdf = df_current
else:
#out_sdf.merge(df_current)
out_sdf = out_sdf.merge_datasets(df_current)
#out_sdf = out_sdf.append(df_current)
self.selected = out_sdf.reset_index(drop=False)
print("Selected: " + str(len(out_sdf)))
return created
def create_layer(self, df, title):
print('Creating New Hosted Feature Layer: {}'.format(title))
if self.pub_gis_conn==None:
new_layer = df.to_featurelayer(
title,
gis=self.gis_conn
)
else:
new_layer = df.to_featurelayer(
title,
gis=self.pub_gis_conn
)
return new_layer.id
def update_layer(self, df, url):
feat_layer = FeatureLayer(url=url, gis=self.gis_conn)
res = feat_layer.edit_features(updates=df.to_featureset())
if 'updateResults' not in res:
raise Exception('Edit Features Returned Issues: {}'.format(res))
else:
return res['updateResults']
def run_poac(self, p1, apply_edits=True):
try:
new_flag = self.set_selected('poac')
df = positional_accuracy(
self.selected,
self.features,
p1
)
if self.debug:
df.to_featureclass(self.debug, 'poac', overwrite=True)
return df
if new_flag:
print(df.to_featureclass)
return [
df,
self.create_layer(
df,
'Positional Accuracy {}'.format(round(time.time()))
)
]
else:
if apply_edits:
return [
df,
self.update_layer(
df,
self.poac_url
)
]
else:
return df
except Exception as e:
print('Exception Running Positional Accuracy: {}'.format(str(e)))
def run_cmpl(self, comparison_sdf, apply_edits=True):
try:
new_flag = self.set_selected('cmpl')
df = completeness(
self.selected,
self.features,
comparison_sdf
)
if self.debug:
df.to_featureclass(self.debug, 'cmpl', overwrite=True)
return df
if new_flag:
return [
df,
self.create_layer(
df,
'Completeness {}'.format(round(time.time()))
)
]
else:
if apply_edits:
return [
df,
self.update_layer(
df,
self.cmpl_url
)
]
else:
return df
except Exception as e:
print('Exception Running Completeness: {}'.format(str(e)))
def run_curr(self, p1, date='1901-1-1', apply_edits=True):
try:
new_flag = self.set_selected('curr')
df = temporal_currency(
self.selected,
self.features,
p1,
date
)
if self.debug:
df.to_featureclass(self.debug, 'curr', overwrite=True)
return df
if new_flag:
print(df)
return [
df,
self.create_layer(
df,
'Temporal Currency {}'.format(round(time.time()))
)
]
else:
if apply_edits:
return [
df,
self.update_layer(
df,
self.curr_url
)
]
else:
return df
except Exception as e:
print('Exception Running Temporal Currency: {}'.format(str(e)))
def run_them(self, p1, apply_edits=True):
try:
new_flag = self.set_selected('them')
df = thematic_accuracy(
self.selected,
self.features,
p1
)
if new_flag:
return [
df,
self.create_layer(
df,
'Thematic Accuracy {}'.format(round(time.time()))
)
]
else:
if apply_edits:
return [
df,
self.update_layer(
df,
self.them_url
)
]
else:
return df
except Exception as e:
print('Exception Running Thematic Accuracy: {}'.format(str(e)))
def run_srln(self, p1, p2=None, search_value=1001, apply_edits=True):
try:
new_flag = self.set_selected('srln')
df = source_lineage(
self.selected,
self.features,
p1,
p2,
search_value
)
if self.debug:
df.to_featureclass(self.debug, 'srln', overwrite=True)
return df
if new_flag:
return [
df,
self.create_layer(
df,
'Source Lineage {}'.format(round(time.time()))
)
]
else:
if apply_edits:
return [
df,
self.update_layer(
df,
self.srln_url
)
]
else:
return df
except Exception as e:
print('Exception Running Source Lineage: {}'.format(str(e)))
def run_logc(self, p1, p2, p3, p4, apply_edits=True):
try:
new_flag = self.set_selected('logc')
df = logical_consistency(
self.selected,
self.features,
self.feat_url,
p1,
p2,
p3,
p4
)
if new_flag:
return [
df,
self.create_layer(
df,
'Logical Consistency {}'.format(round(time.time()))
)
]
else:
if apply_edits:
return [
df,
self.update_layer(
df,
self.logc_url
)
]
else:
return df
except Exception as e:
print('Exception Running Source Lineage: {}'.format(str(e)))
| 28.017897 | 98 | 0.453689 | [
"MIT"
] | twever/state-of-the-data | state-of-the-data-for-webgis/sotd_indicators/Indicator.py | 12,524 | Python |
# Generated by Django 3.1.2 on 2020-10-18 02:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rosters', '0035_auto_20200827_0124'),
]
operations = [
migrations.AlterUniqueTogether(
name='daygroupday',
unique_together={('daygroup', 'day')},
),
]
| 19.944444 | 50 | 0.604457 | [
"MIT"
] | galojix/roster-wizard | rosters/migrations/0036_unique_together_daygroupday.py | 359 | Python |
# -*- coding: utf-8 -*-
from datetime import datetime
import pytest
from AIPscan.conftest import AIP_1_CREATION_DATE, AIP_2_CREATION_DATE
from AIPscan.conftest import ORIGINAL_FILE_SIZE as JPEG_1_01_FILE_SIZE
from AIPscan.conftest import PRESERVATION_FILE_SIZE as JPEG_1_02_FILE_SIZE
from AIPscan.Data import fields, report_data
from AIPscan.Data.tests import (
MOCK_STORAGE_SERVICE,
MOCK_STORAGE_SERVICE_ID,
MOCK_STORAGE_SERVICE_NAME,
)
from AIPscan.helpers import parse_datetime_bound
from AIPscan.test_helpers import create_test_storage_location
TOTAL_FILE_SIZE = JPEG_1_01_FILE_SIZE + JPEG_1_02_FILE_SIZE
DATE_BEFORE_AIP_1 = "2019-01-01"
DATE_AFTER_AIP_1 = "2020-01-02"
DATE_BEFORE_AIP_2 = "2020-05-30"
DATE_AFTER_AIP_2 = "2020-06-02"
class MockFormatsCountQueryResult:
"""Fixture for mocking SQLAlchemy query results."""
def __init__(self, file_format, file_count, total_size):
self.file_format = file_format
self.file_count = file_count
self.total_size = total_size
MOCK_QUERY_RESULTS = [
MockFormatsCountQueryResult(file_format="JPEG", file_count=5, total_size=12345678),
MockFormatsCountQueryResult(file_format="CSV", file_count=3, total_size=123456),
MockFormatsCountQueryResult(
file_format="MPEG-4 Media File", file_count=1, total_size=12345
),
]
@pytest.mark.parametrize(
"query_results, results_count",
[
# Empty result set, count is 0.
([], 0),
# Test the return of complete result set, count is the length
# of all results.
(MOCK_QUERY_RESULTS, len(MOCK_QUERY_RESULTS)),
# Test the return of only the first two results, count is 2.
(MOCK_QUERY_RESULTS[:2], 2),
],
)
def test_formats_count(app_instance, mocker, query_results, results_count):
"""Test that results match high-level expectations."""
query = mocker.patch("AIPscan.Data.report_data._formats_count_query")
query.return_value = query_results
get_ss = mocker.patch("AIPscan.Data._get_storage_service")
get_ss.return_value = MOCK_STORAGE_SERVICE
test_location = create_test_storage_location()
get_location = mocker.patch("AIPscan.Data._get_storage_location")
get_location.return_value = test_location
report = report_data.formats_count(
storage_service_id=MOCK_STORAGE_SERVICE_ID,
start_date=datetime.min,
end_date=datetime.max,
storage_location_id=test_location.id,
)
assert report[fields.FIELD_STORAGE_NAME] == MOCK_STORAGE_SERVICE_NAME
assert report[fields.FIELD_STORAGE_LOCATION] == test_location.description
assert len(report[fields.FIELD_FORMATS]) == results_count
@pytest.mark.parametrize(
"test_format", [mock_result for mock_result in MOCK_QUERY_RESULTS]
)
def test_formats_count_elements(app_instance, mocker, test_format):
"""Test that structure of versions data matches expectations."""
mock_query = mocker.patch("AIPscan.Data.report_data._formats_count_query")
mock_query.return_value = [test_format]
mock_get_ss_name = mocker.patch("AIPscan.Data._get_storage_service")
mock_get_ss_name.return_value = MOCK_STORAGE_SERVICE
report = report_data.formats_count(
MOCK_STORAGE_SERVICE_ID, datetime.min, datetime.max
)
report_format = report[fields.FIELD_FORMATS][0]
assert test_format.file_format == report_format.get(fields.FIELD_FORMAT)
assert test_format.file_count == report_format.get(fields.FIELD_COUNT)
assert test_format.total_size == report_format.get(fields.FIELD_SIZE)
@pytest.mark.parametrize(
"start_date, end_date, format_count, total_file_count, total_file_size",
[
# Not specifying dates should return all files and versions.
(None, None, 2, 3, TOTAL_FILE_SIZE),
# Start date before first AIP was ingested hould return all
# files and versions.
(DATE_BEFORE_AIP_1, None, 2, 3, TOTAL_FILE_SIZE),
# Start date that's the same day our first AIP was ingested
# should return all files and versions.
(AIP_1_CREATION_DATE, None, 2, 3, TOTAL_FILE_SIZE),
# Start date after our first AIP was ingested should return
# only the second JPEG version and ISO disk image.
(DATE_AFTER_AIP_1, None, 2, 2, JPEG_1_02_FILE_SIZE),
# End date before second AIP was ingested should return only
# the first JPEG version.
(None, DATE_BEFORE_AIP_2, 1, 1, JPEG_1_01_FILE_SIZE),
# End date that's the same day our second AIP was ingested
# should return all files and versions.
(None, AIP_2_CREATION_DATE, 2, 3, TOTAL_FILE_SIZE),
# End date that's after our second AIP was ingested should
# return all files and versions.
(None, DATE_AFTER_AIP_2, 2, 3, TOTAL_FILE_SIZE),
# Start and end dates that define a range in which we haven't
# ingested any AIPs should return no files or versions.
("2019-01-01", "2019-01-02", 0, 0, 0),
# Invalid values for start and end dates should be treated as
# None values and return both JPEG versions.
(True, "NOT A DATE", 2, 3, TOTAL_FILE_SIZE),
],
)
def test_formats_count_contents(
app_with_populated_format_versions,
start_date,
end_date,
format_count,
total_file_count,
total_file_size,
):
"""Test that content of response matches expectations.
This integration test uses a pre-populated fixture to verify that
the database access layer of our endpoint returns what we expect.
"""
results = report_data.formats_count(
storage_service_id=1,
start_date=parse_datetime_bound(start_date),
end_date=parse_datetime_bound(end_date, upper=True),
)
formats = results[fields.FIELD_FORMATS]
assert len(formats) == format_count
assert (
sum(format_.get(fields.FIELD_COUNT, 0) for format_ in formats)
== total_file_count
)
assert (
sum(format_.get(fields.FIELD_SIZE, 0) for format_ in formats) == total_file_size
)
| 38.044025 | 88 | 0.723425 | [
"Apache-2.0"
] | artefactual-labs/AIPscan | AIPscan/Data/tests/test_formats_count.py | 6,049 | Python |
import json
from kazoo.client import KazooClient
zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()
value = json.dumps({'host': '127.0.0.2', 'port': 8080}).encode()
zk.ensure_path('/demo')
r = zk.create('/demo/rpc', value, ephemeral=True, sequence=True)
print(r)
zk.stop() | 23 | 64 | 0.688406 | [
"MIT"
] | ResolveWang/rpc_demo | zk/producer.py | 276 | Python |
import io
import os
import random
import textwrap
from PIL import Image, ImageDraw, ImageFont
from telethon.tl.types import InputMessagesFilterDocument
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="srgb (.*)"))
async def sticklet(event):
R = random.randint(0,256)
G = random.randint(0,256)
B = random.randint(0,256)
sticktext = event.pattern_match.group(1)
await event.delete()
sticktext = textwrap.wrap(sticktext, width=10)
sticktext = '\n'.join(sticktext)
image = Image.new("RGBA", (512, 512), (255, 255, 255, 0))
draw = ImageDraw.Draw(image)
fontsize = 230
FONT_FILE = await get_font_file(event.client, "@FontRes")
font = ImageFont.truetype(FONT_FILE, size=fontsize)
while draw.multiline_textsize(sticktext, font=font) > (512, 512):
fontsize -= 3
font = ImageFont.truetype(FONT_FILE, size=fontsize)
width, height = draw.multiline_textsize(sticktext, font=font)
draw.multiline_text(((512-width)/2,(512-height)/2), sticktext, font=font, fill=(R, G, B))
image_stream = io.BytesIO()
image_stream.name = "@AnonHexo.webp"
image.save(image_stream, "WebP")
image_stream.seek(0)
await event.reply("https://t.me/AnonHexo", file=image_stream)
try:
os.remove(FONT_FILE)
except:
pass
async def get_font_file(client, channel_id):
font_file_message_s = await client.get_messages(
entity=channel_id,
filter=InputMessagesFilterDocument,
limit=None
)
font_file_message = random.choice(font_file_message_s)
return await client.download_media(font_file_message)
| 24.61194 | 93 | 0.688902 | [
"MIT"
] | Fregiant16/fregiantuserbot | userbot/plugins/srgb.py | 1,649 | Python |
import platform
import json
import subprocess
import os
from urllib2 import urlopen
from lbryschema import __version__ as lbryschema_version
from lbryum import __version__ as LBRYUM_VERSION
from lbrynet import build_type, __version__ as lbrynet_version
from lbrynet.conf import ROOT_DIR
def get_lbrynet_version():
if build_type.BUILD == "dev":
try:
with open(os.devnull, 'w') as devnull:
git_dir = ROOT_DIR + '/.git'
return subprocess.check_output(
['git', '--git-dir='+git_dir, 'describe', '--dirty', '--always'],
stderr=devnull
).strip().lstrip('v')
except (subprocess.CalledProcessError, OSError):
print "failed to get version from git"
return lbrynet_version
def get_platform(get_ip=True):
p = {
"processor": platform.processor(),
"python_version": platform.python_version(),
"platform": platform.platform(),
"os_release": platform.release(),
"os_system": platform.system(),
"lbrynet_version": get_lbrynet_version(),
"lbryum_version": LBRYUM_VERSION,
"lbryschema_version": lbryschema_version,
"build": build_type.BUILD, # CI server sets this during build step
}
if get_ip:
try:
p['ip'] = json.load(urlopen('http://jsonip.com'))['ip']
except:
p['ip'] = "Could not determine IP"
return p
| 31.106383 | 85 | 0.621067 | [
"MIT"
] | mrlucky9/lbry | lbrynet/core/system_info.py | 1,462 | Python |
from _std_inplace_ops import *
from _std_construct1_ops import *
from _std_construct2_ops import *
| 24.75 | 33 | 0.848485 | [
"Apache-2.0"
] | Alienmaster/pykaldi | kaldi/fstext/_std_ops.py | 99 | Python |
import gflags
import logging
import threading
import time
FLAGS = gflags.FLAGS
gflags.DEFINE_integer("probe_frequency_secs", 10*60,
"How often to probe the logs for updates")
from ct.client import log_client
from ct.client import monitor
from ct.client import state
from ct.crypto import merkle
from ct.crypto import verify
from twisted.internet import reactor
from twisted.web import client as twisted_client
class ProberThread(threading.Thread):
"""A prober for scheduled updating of the log view."""
def __init__(self, ct_logs, db, cert_db, temp_db_factory, monitor_state_dir,
agent=None, state_keeper_class=None):
"""Initialize from a CtLogs proto."""
threading.Thread.__init__(self)
self.__monitors = []
self.__db = db
if not agent:
agent = twisted_client.Agent(reactor)
if not state_keeper_class:
state_keeper_class = state.StateKeeper
for log in ct_logs.ctlog:
if not log.log_server or not log.log_id or not log.public_key_info:
raise RuntimeError("Cannot start monitor: log proto has "
"missing or empty fields: %s" % log)
temp_db = temp_db_factory.create_storage(log.log_server)
client = log_client.AsyncLogClient(agent,
log.log_server,
temp_db)
hasher = merkle.TreeHasher()
verifier = verify.LogVerifier(log.public_key_info,
merkle.MerkleVerifier(hasher))
# Convert from standard Base64 to URL-safe Base64 so that the log ID
# can be used as part of a file path.
log_id_urlsafe = log.log_id.replace('/', '_').replace('+', '-')
state_keeper = state_keeper_class(monitor_state_dir +
"/" + log_id_urlsafe)
log_key = db.get_log_id(log.log_server)
self.__monitors.append(monitor.Monitor(client, verifier, hasher, db,
cert_db, log_key,
state_keeper))
self.__last_update_start_time = 0
self.__stopped = False
self.__called_later = None
def __repr__(self):
return "%r(%r)" % (self.__class__.__name__, self.__monitors)
def __str__(self):
return "%s(%s)" % (self.__class__.__name__, self.__monitors)
def _log_probed_callback(self, success, monitor):
if success:
logging.info("Data for %s updated: latest timestamp is %s" %
(monitor.servername,
time.strftime("%c", time.localtime(
monitor.data_timestamp/1000))))
else:
logging.error("Failed to update data for %s: latest timestamp "
"is %s" % (monitor.servername,
time.strftime("%c", time.localtime(
monitor.data_timestamp/1000))))
self.__probed += 1
if self.__probed == len(self.__monitors):
self._all_logs_probed()
def _all_logs_probed(self):
logging.info("Probe loop completed in %d seconds" %
(time.time() - self.__start_time))
sleep_time = max(0, self.__start_time +
FLAGS.probe_frequency_secs - time.time())
logging.info("Next probe loop in: %d seconds" % sleep_time)
self.__called_later = reactor.callLater(sleep_time,
self.probe_all_logs)
def _has_outstanding_call_later(self):
return self.__called_later and self.__called_later.active()
def probe_all_logs(self):
logging.info("Starting probe loop...")
self.__called_later = None
self.__start_time = time.time()
self.__probed = 0
"""Loop through all logs in the list and check for updates."""
if self.__monitors:
for monitor_ in self.__monitors:
monitor_result = monitor_.update()
monitor_result.addCallback(self._log_probed_callback, monitor_)
# TODO(hadfieldp): do we need an errback too?
else:
# If we're configured with no monitors we still need to behave
# correctly in order for the reactor to be stoppable.
self._all_logs_probed()
logging.info("Done starting probe loop.")
def run(self):
logging.info("Running reactor...")
self.__called_later = reactor.callLater(0, self.probe_all_logs)
reactor.run(installSignalHandlers=0)
logging.info("Reactor no longer running.")
def stop(self):
logging.info("Stopping reactor...")
if self._has_outstanding_call_later():
self.__called_later.cancel()
self.__called_later = None
reactor.stop()
logging.info("Reactor stopped.")
| 41.268293 | 80 | 0.581757 | [
"Apache-2.0"
] | DavadDi/archon | vendor/github.com/google/certificate-transparency/python/ct/client/prober.py | 5,076 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from api.tests.base import BaseTest
class IndexTest(BaseTest):
def test_index(self):
# Expected result from server.
expected_result = "Hello Flask Restful Example!"
# Send request to index.
response = self.client.get("/")
# This raises an AssertionError
assert response.status_code == 200
# This raises an AssertionError
assert expected_result == response.json
| 22.761905 | 56 | 0.640167 | [
"MIT"
] | JackyCJ/flask-restful-login | api/tests/tests_index.py | 478 | Python |
# Generated by Django 3.0.10 on 2021-02-01 18:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_auto_20210201_1832'),
]
operations = [
migrations.AlterField(
model_name='bcmactivity',
name='MTPD',
field=models.PositiveSmallIntegerField(choices=[(1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')]),
),
migrations.AlterField(
model_name='bcmactivity',
name='TTN',
field=models.PositiveSmallIntegerField(blank=True, choices=[(1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')]),
),
migrations.AlterField(
model_name='equipment',
name='over_MTPD',
field=models.PositiveSmallIntegerField(choices=[('', '------'), (1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')], default=None, null=True),
),
migrations.AlterField(
model_name='information',
name='over_MTPD',
field=models.PositiveSmallIntegerField(choices=[('', '------'), (1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')], default=None, null=True),
),
migrations.AlterField(
model_name='location',
name='over_MTPD',
field=models.PositiveSmallIntegerField(choices=[('', '------'), (1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')], default=None, null=True),
),
migrations.AlterField(
model_name='position',
name='over_MTPD',
field=models.PositiveSmallIntegerField(choices=[('', '------'), (1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')], default=None, null=True),
),
migrations.AlterField(
model_name='supplies',
name='over_MTPD',
field=models.PositiveSmallIntegerField(choices=[('', '------'), (1, '4 godz.'), (2, 'dzień'), (3, '2 dni'), (4, 'tydzień'), (5, '2 tygodnie'), (6, 'miesiąc'), (7, '2 miesiące'), (8, 'do odwołania')], default=None, null=True),
),
]
| 52.55102 | 237 | 0.530874 | [
"MIT"
] | ig0r45ure/recipe-app-api | app/core/migrations/0055_auto_20210201_1838.py | 2,610 | Python |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
from libs.constants import DEFAULT_ENCODING
from libs.ustr import ustr
XML_EXT = '.xml'
ENCODE_METHOD = DEFAULT_ENCODING
class PascalVocWriter:
def __init__(self, foldername, filename, imgSize,databaseSrc='Unknown', localImgPath=None):
self.foldername = foldername
self.filename = filename
self.databaseSrc = databaseSrc
self.imgSize = imgSize
self.boxlist = []
self.localImgPath = localImgPath
self.verified = False
def convertPoints2BndBox(self, QPoints):
points=[(p.x(), p.y()) for p in QPoints]
xmin = float('inf')
ymin = float('inf')
xmax = float('-inf')
ymax = float('-inf')
for p in points:
x = p[0]
y = p[1]
xmin = min(x, xmin)
ymin = min(y, ymin)
xmax = max(x, xmax)
ymax = max(y, ymax)
# Martin Kersner, 2015/11/12
# 0-valued coordinates of BB caused an error while
# training faster-rcnn object detector.
if xmin < 1:
xmin = 1
if ymin < 1:
ymin = 1
return (int(xmin), int(ymin), int(xmax), int(ymax))
def prettify(self, elem):
"""
Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf8')
root = etree.fromstring(rough_string)
return etree.tostring(root, pretty_print=True, encoding=ENCODE_METHOD).replace(" ".encode(), "\t".encode())
# minidom does not support UTF-8
'''reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent="\t", encoding=ENCODE_METHOD)'''
def genXML(self):
"""
Return XML root
"""
# Check conditions
if self.filename is None or \
self.foldername is None or \
self.imgSize is None:
return None
top = Element('annotation')
if self.verified:
top.set('verified', 'yes')
folder = SubElement(top, 'data_set')
folder.text = self.foldername
# filename = SubElement(top, 'filename')
# filename.text = self.filename
# if self.localImgPath is not None:
# localImgPath = SubElement(top, 'path')
# localImgPath.text = self.localImgPath
# source = SubElement(top, 'source')
# database = SubElement(source, 'database')
# database.text = self.databaseSrc
size_part = SubElement(top, 'size')
width = SubElement(size_part, 'width')
height = SubElement(size_part, 'height')
depth = SubElement(size_part, 'depth')
width.text = str(self.imgSize[1])
height.text = str(self.imgSize[0])
if len(self.imgSize) == 3:
depth.text = str(self.imgSize[2])
else:
depth.text = '1'
# segmented = SubElement(top, 'segmented')
# segmented.text = '0'
return top
def addBndBox(self, xmin, ymin, xmax, ymax, name, difficult, parents, children, self_id):
bndbox = {'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax}
bndbox['name'] = name
bndbox['difficult'] = difficult
bndbox['parents'] = parents
bndbox['children'] = children
bndbox['self_id'] = self_id
self.boxlist.append(bndbox)
def addBehavior(self, label, self_id, start_frame, end_frame, shapes=None):
bndbox = {}
bndbox['behavior'] = label
bndbox['self_id'] = self_id
bndbox['start_frame'] = start_frame
bndbox['end_frame'] = end_frame
bndbox['shapes'] = shapes
self.boxlist.append(bndbox)
def appendObjects(self, top):
for each_behavior in self.boxlist:
object_item = SubElement(top, 'behaviors')
object_id = SubElement(object_item, 'behavior_id')
object_id.text = str(each_behavior['self_id'])
name = SubElement(object_item, 'behavior')
name.text = str(each_behavior['behavior'])
start = SubElement(object_item, 'start_frame')
start.text = str(each_behavior['start_frame'])
if start.text == "":
start.text = "undefined"
end = SubElement(object_item, 'end_frame')
end.text = str(each_behavior['end_frame'])
if end.text == "":
end.text = "undefined"
if each_behavior['shapes'] != None:
shapes = SubElement(object_item, 'bounding_boxes')
for each_shape in each_behavior['shapes']:
bounding_box = self.convertPoints2BndBox(each_shape.points)
shape = SubElement(shapes, 'bounding_box')
frame = SubElement(shape, 'frame')
frame.text = str(each_shape.filename)
bndbox = SubElement(shape, 'bndbox')
xmin = SubElement(bndbox, 'xmin')
xmin.text = str(bounding_box[0])
ymin = SubElement(bndbox, 'ymin')
ymin.text = str(bounding_box[1])
xmax = SubElement(bndbox, 'xmax')
xmax.text = str(bounding_box[2])
ymax = SubElement(bndbox, 'ymax')
ymax.text = str(bounding_box[3])
# all_ids = []
# for each_object in self.boxlist:
# all_ids.append(each_object['self_id'])
# for each_object in self.boxlist:
# object_item = SubElement(top, 'object')
# object_id = SubElement(object_item, 'object_id')
# object_id.text = str(each_object['self_id'])
# name = SubElement(object_item, 'name')
# real_name = ustr(each_object['name'])
# name.text = str()
# for letter in real_name:
# if letter != ' ':
# name.text += letter
# else:
# name.text = str()
# if len(each_object['parents']) != 0:
# parents = SubElement(object_item, 'has_parents')
# for each_id in each_object['parents']:
# if each_id in all_ids:
# parent = SubElement(parents, 'parent')
# parent.text = str(each_id)
# if len(each_object['children']) != 0:
# children = SubElement(object_item, 'has_children')
# for each_id in each_object['children']:
# if each_id in all_ids:
# child = SubElement(children, 'child')
# child.text = str(each_id)
# pose = SubElement(object_item, 'pose')
# pose.text = "Unspecified"
# truncated = SubElement(object_item, 'truncated')
# if int(float(each_object['ymax'])) == int(float(self.imgSize[0])) or (int(float(each_object['ymin']))== 1):
# truncated.text = "1" # max == height or min
# elif (int(float(each_object['xmax']))==int(float(self.imgSize[1]))) or (int(float(each_object['xmin']))== 1):
# truncated.text = "1" # max == width or min
# else:
# truncated.text = "0"
# difficult = SubElement(object_item, 'difficult')
# difficult.text = str( bool(each_object['difficult']) & 1 )
# bndbox = SubElement(object_item, 'bndbox')
# xmin = SubElement(bndbox, 'xmin')
# xmin.text = str(each_object['xmin'])
# ymin = SubElement(bndbox, 'ymin')
# ymin.text = str(each_object['ymin'])
# xmax = SubElement(bndbox, 'xmax')
# xmax.text = str(each_object['xmax'])
# ymax = SubElement(bndbox, 'ymax')
# ymax.text = str(each_object['ymax'])
def save(self, targetFile=None):
root = self.genXML()
self.appendObjects(root)
out_file = None
if targetFile is None:
out_file = codecs.open(
self.filename + XML_EXT, 'w', encoding=ENCODE_METHOD)
else:
out_file = codecs.open(targetFile, 'w', encoding=ENCODE_METHOD)
prettifyResult = self.prettify(root)
out_file.write(prettifyResult.decode('utf8'))
out_file.close()
class PascalVocReader:
def __init__(self, filepath):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.filepath = filepath
self.behaviors = []
self.verified = False
try:
self.readBehavior()
except:
pass
def getShapes(self):
return self.shapes
def getBehaviors(self):
return self.behaviors
def addBehavior(self, behavior_name, behavior_id, starting_frame, ending_frame, shapes=None):
self.behaviors.append((behavior_name, behavior_id, starting_frame, ending_frame, shapes))
def addShape(self, label, bndbox, difficult, parents, children, object_id):
parent_ids = []
try:
for item in parents.findall('parent'):
parent_ids.append(int(item.text))
except:
pass
child_ids = []
try:
for item in children.findall('child'):
child_ids.append(int(item.text))
except:
pass
xmin = int(float(bndbox.find('xmin').text))
ymin = int(float(bndbox.find('ymin').text))
xmax = int(float(bndbox.find('xmax').text))
ymax = int(float(bndbox.find('ymax').text))
points = [(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax)]
self.shapes.append((label, points, parent_ids, child_ids, object_id, None, None, difficult, ))
def readBehavior(self):
assert self.filepath.endswith(XML_EXT), "Unsupport file format"
parser = etree.XMLParser(encoding=ENCODE_METHOD)
xmltree = ElementTree.parse(self.filepath, parser=parser).getroot()
for object_iter in xmltree.findall('behaviors'):
label = object_iter.find('behavior').text
object_id = int(object_iter.find('behavior_id').text)
start_frame = object_iter.find('start_frame').text
end_frame = object_iter.find('end_frame').text
shapes = object_iter.find('bounding_boxes')
bounding_boxes = []
for shape_tier in shapes.findall('bounding_box'):
box = {}
bndbox = shape_tier.find("bndbox")
box["bndbox"] = bndbox
frame = shape_tier.find("frame")
box["frame"] = frame.text
bounding_boxes.append(box)
self.addBehavior(label, object_id, start_frame, end_frame, bounding_boxes)
return True
def parseXML(self):
assert self.filepath.endswith(XML_EXT), "Unsupport file format"
parser = etree.XMLParser(encoding=ENCODE_METHOD)
xmltree = ElementTree.parse(self.filepath, parser=parser).getroot()
filename = xmltree.find('filename').text
try:
verified = xmltree.attrib['verified']
if verified == 'yes':
self.verified = True
except KeyError:
self.verified = False
for object_iter in xmltree.findall('object'):
bndbox = object_iter.find("bndbox")
label = object_iter.find('name').text
parents = object_iter.find('has_parents')
children = object_iter.find('has_children')
object_id = int(object_iter.find('object_id').text)
# Add chris
difficult = False
if object_iter.find('difficult') is not None:
difficult = bool(int(object_iter.find('difficult').text))
self.addShape(label, bndbox, difficult, parents, children, object_id)
return True
| 37.555556 | 123 | 0.559993 | [
"MIT"
] | yuxluo/umtri_video_label | libs/pascal_voc_io.py | 12,168 | Python |
"""
Description: This file implements a wrapper around the original SLCT code in C
Author: LogPAI team
License: MIT
"""
import sys
sys.path.append('../')
import hashlib
import pandas as pd
import re
from datetime import datetime
from ..logmatch import regexmatch
import subprocess
import os
import logging
logger = logging.getLogger(__name__)
class LogParser(object):
def __init__(self, indir, outdir, log_format, support, para_j=True, saveLog=False, rex=[]):
self.outdir = outdir
self.log_format = log_format
self.rex = rex
self.para = {}
self.para['dataPath'] = indir
self.para['para_j'] = para_j
self.para['savePath'] = outdir
self.para['support'] = support
self.para['saveLog'] = saveLog
def parse(self, logname):
self.para['dataName'] = logname
SLCT(self.para, self.log_format, self.rex)
def SLCT(para, log_format, rex):
startTime = datetime.now() # start timing
logname = os.path.join(para['dataPath'], para['dataName'])
logger.info("Parsing file: {}".format(logname))
# SLCT compilation
if not os.path.isfile('../SLCT/slct'):
try:
logger.info('Compile SLCT...\n>> gcc -o ../logparser/SLCT/slct -O2 ../logparser/SLCT/cslct.c')
subprocess.check_output('gcc -o ../logparser/SLCT/slct -O2 ../logparser/SLCT/cslct.c',
stderr=subprocess.STDOUT, shell=True)
except:
logger.info("Compile error! Please check GCC installed.\n")
raise
headers, regex = generate_logformat_regex(log_format)
df_log = log_to_dataframe(logname, regex, headers, log_format)
# Generate input file
with open('slct_input.log', 'w') as fw:
for line in df_log['Content']:
if rex:
for currentRex in rex:
line = re.sub(currentRex, '<*>', line)
fw.write(line + '\n')
# Run SLCT command
SLCT_command = extract_command(para, "slct_input.log")
try:
logger.info ("Run SLCT...\n>> {}".format(SLCT_command))
subprocess.check_call(SLCT_command, shell=True)
except:
logger.info("SLCT executable is invalid! Please compile it using GCC.\n")
raise
# Collect and dump templates
tempParameter = TempPara(path = "./", savePath=para['savePath'], logname="slct_input.log")
tempProcess(tempParameter)
matcher = regexmatch.PatternMatch(outdir=para['savePath'], logformat=log_format)
matched_df = matcher.match(logname, "temp_templates.csv")
# sys.exit()
os.remove("slct_input.log")
os.remove("slct_outliers.log")
os.remove("slct_templates.txt")
os.remove("temp_templates.csv")
for idx, line in matched_df.iterrows():
if line['EventTemplate'] == "None":
content = line['Content']
matched_df.loc[idx, "EventTemplate"] = content
matched_df.loc[idx, "EventId"] = hashlib.md5(content.encode('utf-8')).hexdigest()[0:8]
occ_dict = dict(matched_df['EventTemplate'].value_counts())
df_event = pd.DataFrame()
df_event['EventTemplate'] = matched_df['EventTemplate'].unique()
df_event['EventId'] = df_event['EventTemplate'].map(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest()[0:8])
df_event['Occurrences'] = df_event['EventTemplate'].map(occ_dict)
df_event.to_csv(os.path.join(para['savePath'], para['dataName'] + "_templates.csv"), index=False, columns=["EventId", "EventTemplate", "Occurrences"])
matched_df.to_csv(os.path.join(para['savePath'], para['dataName'] + "_structured.csv"), index=False)
logger.info('Parsing done. [Time: {!s}]'.format(datetime.now() - startTime))
def extract_command(para, logname):
support = para['support']
parajTF = para['para_j']
input = ''
if parajTF:
input = '../logparser/SLCT/slct -j -o ' + 'slct_outliers.log -r -s ' + str(support) + ' ' + logname
else:
input = '../logparser/SLCT/slct -o ' + 'slct_outliers.log -r -s ' + str(support) + ' ' + logname
return input
def log_to_dataframe(log_file, regex, headers, logformat):
''' Function to transform log file to dataframe '''
log_messages = []
linecount = 0
with open(log_file, 'r') as fin:
for line in fin.readlines():
try:
match = regex.search(line.strip())
message = [match.group(header) for header in headers]
log_messages.append(message)
linecount += 1
except Exception as e:
pass
logdf = pd.DataFrame(log_messages, columns=headers)
logdf.insert(0, 'LineId', None)
logdf['LineId'] = [i + 1 for i in range(linecount)]
return logdf
def generate_logformat_regex(logformat):
'''
Function to generate regular expression to split log messages
'''
headers = []
splitters = re.split(r'(<[^<>]+>)', logformat)
regex = ''
for k in range(len(splitters)):
if k % 2 == 0:
splitter = re.sub(' +', '\s+', splitters[k])
regex += splitter
else:
header = splitters[k].strip('<').strip('>')
regex += '(?P<%s>.*?)' % header
headers.append(header)
regex = re.compile('^' + regex + '$')
return headers, regex
class TempPara:
def __init__(self, path='./', logname='rawlog.log', savePath='./', templateName='slct_templates.txt', outlierName='slct_outliers.log'):
self.path = path
self.logname = logname
self.savePath = savePath
self.templateName = templateName
self.outlierName = outlierName
def tempProcess(tempPara):
logger.info('Dumping event templates...')
if not os.path.exists(tempPara.savePath):
os.makedirs(tempPara.savePath)
#read the templates
templates = []
with open('./' + tempPara.templateName) as tl:
for line in tl:
templates.append([0, line.strip(), 0])
pd.DataFrame(templates, columns=["EventId","EventTemplate","Occurrences"]).to_csv("temp_templates.csv", index=False)
def matchTempLog(templates, logs):
len_temp = {}
for tidx, temp in enumerate(templates):
tempL = temp.split()
templen = len(tempL)
if templen not in len_temp:
len_temp[templen] = [(tidx, tempL)]
else:
len_temp[templen].append((tidx, tempL))
logid_groupid = []
for idx, log in enumerate(logs):
logL = log.split()
logid = idx+1
if len(logL) in len_temp:
logid_groupid.append([idx + 1, get_groupid(logL, len_temp[len(logL)])])
else:
logid_groupid.append([idx+1, -1])
return logid_groupid
def get_groupid(logL, tempLs):
maxvalue = -1
for templ in tempLs:
starnum = 0
shot = 0
for idx, token in enumerate(logL):
if token == templ[1][idx] or templ[1][idx].count("*"):
shot += 1
if templ[1][idx].count("*"):
starnum += 1
shot = shot - starnum
if shot > maxvalue:
maxvalue = shot
groupid = templ[0]
return groupid | 35.660194 | 155 | 0.59284 | [
"MIT"
] | LogAnalysisTeam/logparser | logparser/SLCT/SLCT.py | 7,346 | Python |
from discord.ext import commands, tasks
from collections import Counter, defaultdict
from .utils import checks, db, time, formats
from .utils.paginator import CannotPaginate
import pkg_resources
import logging
import discord
import textwrap
import datetime
import traceback
import itertools
import typing
import asyncpg
import asyncio
import pygit2
import psutil
import json
import os
import re
import io
import gc
log = logging.getLogger(__name__)
LOGGING_CHANNEL = 309632009427222529
class GatewayHandler(logging.Handler):
def __init__(self, cog):
self.cog = cog
super().__init__(logging.INFO)
def filter(self, record):
return record.name == 'discord.gateway' or 'Shard ID' in record.msg or 'Websocket closed ' in record.msg
def emit(self, record):
self.cog.add_record(record)
class Commands(db.Table):
id = db.PrimaryKeyColumn()
guild_id = db.Column(db.Integer(big=True), index=True)
channel_id = db.Column(db.Integer(big=True))
author_id = db.Column(db.Integer(big=True), index=True)
used = db.Column(db.Datetime, index=True)
prefix = db.Column(db.String)
command = db.Column(db.String, index=True)
failed = db.Column(db.Boolean, index=True)
_INVITE_REGEX = re.compile(r'(?:https?:\/\/)?discord(?:\.gg|\.com|app\.com\/invite)?\/[A-Za-z0-9]+')
def censor_invite(obj, *, _regex=_INVITE_REGEX):
return _regex.sub('[censored-invite]', str(obj))
def hex_value(arg):
return int(arg, base=16)
def object_at(addr):
for o in gc.get_objects():
if id(o) == addr:
return o
return None
class Stats(commands.Cog):
"""Bot usage statistics."""
def __init__(self, bot):
self.bot = bot
self.process = psutil.Process()
self._batch_lock = asyncio.Lock(loop=bot.loop)
self._data_batch = []
self.bulk_insert_loop.add_exception_type(asyncpg.PostgresConnectionError)
self.bulk_insert_loop.start()
self._gateway_queue = asyncio.Queue(loop=bot.loop)
self.gateway_worker.start()
# This is a datetime list
self._resumes = []
# shard_id: List[datetime]
self._identifies = defaultdict(list)
def _clear_gateway_data(self):
one_week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7)
to_remove = [index for index, dt in enumerate(self._resumes) if dt < one_week_ago]
for index in reversed(to_remove):
del self._resumes[index]
for shard_id, dates in self._identifies.items():
to_remove = [index for index, dt in enumerate(dates) if dt < one_week_ago]
for index in reversed(to_remove):
del dates[index]
async def bulk_insert(self):
query = """INSERT INTO commands (guild_id, channel_id, author_id, used, prefix, command, failed)
SELECT x.guild, x.channel, x.author, x.used, x.prefix, x.command, x.failed
FROM jsonb_to_recordset($1::jsonb) AS
x(guild BIGINT, channel BIGINT, author BIGINT, used TIMESTAMP, prefix TEXT, command TEXT, failed BOOLEAN)
"""
if self._data_batch:
await self.bot.pool.execute(query, self._data_batch)
total = len(self._data_batch)
if total > 1:
log.info('Registered %s commands to the database.', total)
self._data_batch.clear()
def cog_unload(self):
self.bulk_insert_loop.stop()
self._gateway_worker.cancel()
@tasks.loop(seconds=10.0)
async def bulk_insert_loop(self):
async with self._batch_lock:
await self.bulk_insert()
@tasks.loop(seconds=0.0)
async def gateway_worker(self):
record = await self._gateway_queue.get()
await self.notify_gateway_status(record)
async def register_command(self, ctx):
if ctx.command is None:
return
command = ctx.command.qualified_name
self.bot.command_stats[command] += 1
message = ctx.message
destination = None
if ctx.guild is None:
destination = 'Private Message'
guild_id = None
else:
destination = f'#{message.channel} ({message.guild})'
guild_id = ctx.guild.id
log.info(f'{message.created_at}: {message.author} in {destination}: {message.content}')
async with self._batch_lock:
self._data_batch.append({
'guild': guild_id,
'channel': ctx.channel.id,
'author': ctx.author.id,
'used': message.created_at.isoformat(),
'prefix': ctx.prefix,
'command': command,
'failed': ctx.command_failed,
})
@commands.Cog.listener()
async def on_command_completion(self, ctx):
await self.register_command(ctx)
@commands.Cog.listener()
async def on_socket_response(self, msg):
self.bot.socket_stats[msg.get('t')] += 1
@property
def webhook(self):
wh_id, wh_token = self.bot.config.stat_webhook
hook = discord.Webhook.partial(id=wh_id, token=wh_token, adapter=discord.AsyncWebhookAdapter(self.bot.session))
return hook
async def log_error(self, *, ctx=None, extra=None):
e = discord.Embed(title='Error', colour=0xdd5f53)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.add_field(name='Extra', value=extra, inline=False)
e.timestamp = datetime.datetime.utcnow()
if ctx is not None:
fmt = '{0} (ID: {0.id})'
author = fmt.format(ctx.author)
channel = fmt.format(ctx.channel)
guild = 'None' if ctx.guild is None else fmt.format(ctx.guild)
e.add_field(name='Author', value=author)
e.add_field(name='Channel', value=channel)
e.add_field(name='Guild', value=guild)
await self.webhook.send(embed=e)
@commands.command(hidden=True)
@commands.is_owner()
async def commandstats(self, ctx, limit=20):
"""Shows command stats.
Use a negative number for bottom instead of top.
This is only for the current session.
"""
counter = self.bot.command_stats
width = len(max(counter, key=len))
total = sum(counter.values())
if limit > 0:
common = counter.most_common(limit)
else:
common = counter.most_common()[limit:]
output = '\n'.join(f'{k:<{width}}: {c}' for k, c in common)
await ctx.send(f'```\n{output}\n```')
@commands.command(hidden=True)
async def socketstats(self, ctx):
delta = datetime.datetime.utcnow() - self.bot.uptime
minutes = delta.total_seconds() / 60
total = sum(self.bot.socket_stats.values())
cpm = total / minutes
await ctx.send(f'{total} socket events observed ({cpm:.2f}/minute):\n{self.bot.socket_stats}')
def get_bot_uptime(self, *, brief=False):
return time.human_timedelta(self.bot.uptime, accuracy=None, brief=brief, suffix=False)
@commands.command()
async def uptime(self, ctx):
"""Tells you how long the bot has been up for."""
await ctx.send(f'Uptime: **{self.get_bot_uptime()}**')
def format_commit(self, commit):
short, _, _ = commit.message.partition('\n')
short_sha2 = commit.hex[0:6]
commit_tz = datetime.timezone(datetime.timedelta(minutes=commit.commit_time_offset))
commit_time = datetime.datetime.fromtimestamp(commit.commit_time).replace(tzinfo=commit_tz)
# [`hash`](url) message (offset)
offset = time.human_timedelta(commit_time.astimezone(datetime.timezone.utc).replace(tzinfo=None), accuracy=1)
return f'[`{short_sha2}`](https://github.com/Rapptz/RoboDanny/commit/{commit.hex}) {short} ({offset})'
def get_last_commits(self, count=3):
repo = pygit2.Repository('.git')
commits = list(itertools.islice(repo.walk(repo.head.target, pygit2.GIT_SORT_TOPOLOGICAL), count))
return '\n'.join(self.format_commit(c) for c in commits)
@commands.command()
async def about(self, ctx):
"""Tells you information about the bot itself."""
revision = self.get_last_commits()
embed = discord.Embed(description='Latest Changes:\n' + revision)
embed.title = 'Official Bot Server Invite'
embed.url = 'https://discord.gg/DWEaqMy'
embed.colour = discord.Colour.blurple()
owner = self.bot.get_user(self.bot.owner_id)
embed.set_author(name=str(owner), icon_url=owner.avatar_url)
# statistics
total_members = 0
total_online = 0
offline = discord.Status.offline
for member in self.bot.get_all_members():
total_members += 1
if member.status is not offline:
total_online += 1
total_unique = len(self.bot.users)
text = 0
voice = 0
guilds = 0
for guild in self.bot.guilds:
guilds += 1
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
text += 1
elif isinstance(channel, discord.VoiceChannel):
voice += 1
embed.add_field(name='Members', value=f'{total_members} total\n{total_unique} unique\n{total_online} unique online')
embed.add_field(name='Channels', value=f'{text + voice} total\n{text} text\n{voice} voice')
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU')
version = pkg_resources.get_distribution('discord.py').version
embed.add_field(name='Guilds', value=guilds)
embed.add_field(name='Commands Run', value=sum(self.bot.command_stats.values()))
embed.add_field(name='Uptime', value=self.get_bot_uptime(brief=True))
embed.set_footer(text=f'Made with discord.py v{version}', icon_url='http://i.imgur.com/5BFecvA.png')
embed.timestamp = datetime.datetime.utcnow()
await ctx.send(embed=embed)
def censor_object(self, obj):
if not isinstance(obj, str) and obj.id in self.bot.blacklist:
return '[censored]'
return censor_invite(obj)
async def show_guild_stats(self, ctx):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Server Command Stats', colour=discord.Colour.blurple())
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1;"
count = await ctx.db.fetchrow(query, ctx.guild.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='Tracking command usage since').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Top Commands', value=value, inline=True)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands.'
embed.add_field(name='Top Commands Today', value=value, inline=True)
embed.add_field(name='\u200b', value='\u200b', inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No bot users.'
embed.add_field(name='Top Command Users', value=value, inline=True)
query = """SELECT author_id,
COUNT(*) AS "uses"
FROM commands
WHERE guild_id=$1
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id)
value = '\n'.join(f'{lookup[index]}: <@!{author_id}> ({uses} bot uses)'
for (index, (author_id, uses)) in enumerate(records)) or 'No command users.'
embed.add_field(name='Top Command Users Today', value=value, inline=True)
await ctx.send(embed=embed)
async def show_member_stats(self, ctx, member):
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
embed = discord.Embed(title='Command Stats', colour=member.colour)
embed.set_author(name=str(member), icon_url=member.avatar_url)
# total command uses
query = "SELECT COUNT(*), MIN(used) FROM commands WHERE guild_id=$1 AND author_id=$2;"
count = await ctx.db.fetchrow(query, ctx.guild.id, member.id)
embed.description = f'{count[0]} commands used.'
embed.set_footer(text='First command used').timestamp = count[1] or datetime.datetime.utcnow()
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1 AND author_id=$2
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands', value=value, inline=False)
query = """SELECT command,
COUNT(*) as "uses"
FROM commands
WHERE guild_id=$1
AND author_id=$2
AND used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query, ctx.guild.id, member.id)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)'
for (index, (command, uses)) in enumerate(records)) or 'No Commands'
embed.add_field(name='Most Used Commands Today', value=value, inline=False)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True)
@commands.guild_only()
@commands.cooldown(1, 30.0, type=commands.BucketType.member)
async def stats(self, ctx, *, member: discord.Member = None):
"""Tells you command usage stats for the server or a member."""
async with ctx.typing():
if member is None:
await self.show_guild_stats(ctx)
else:
await self.show_member_stats(ctx, member)
@stats.command(name='global')
@commands.is_owner()
async def stats_global(self, ctx):
"""Global all time command statistics."""
query = "SELECT COUNT(*) FROM commands;"
total = await ctx.db.fetchrow(query)
e = discord.Embed(title='Command Stats', colour=discord.Colour.blurple())
e.description = f'{total[0]} commands used.'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
@stats.command(name='today')
@commands.is_owner()
async def stats_today(self, ctx):
"""Global command statistics for the day."""
query = "SELECT failed, COUNT(*) FROM commands WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day') GROUP BY failed;"
total = await ctx.db.fetch(query)
failed = 0
success = 0
question = 0
for state, count in total:
if state is False:
success += count
elif state is True:
failed += count
else:
question += count
e = discord.Embed(title='Last 24 Hour Command Stats', colour=discord.Colour.blurple())
e.description = f'{failed + success + question} commands used today. ' \
f'({success} succeeded, {failed} failed, {question} unknown)'
lookup = (
'\N{FIRST PLACE MEDAL}',
'\N{SECOND PLACE MEDAL}',
'\N{THIRD PLACE MEDAL}',
'\N{SPORTS MEDAL}',
'\N{SPORTS MEDAL}'
)
query = """SELECT command, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY command
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = '\n'.join(f'{lookup[index]}: {command} ({uses} uses)' for (index, (command, uses)) in enumerate(records))
e.add_field(name='Top Commands', value=value, inline=False)
query = """SELECT guild_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY guild_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (guild_id, uses)) in enumerate(records):
if guild_id is None:
guild = 'Private Message'
else:
guild = self.censor_object(self.bot.get_guild(guild_id) or f'<Unknown {guild_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {guild} ({uses} uses)')
e.add_field(name='Top Guilds', value='\n'.join(value), inline=False)
query = """SELECT author_id, COUNT(*) AS "uses"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - INTERVAL '1 day')
GROUP BY author_id
ORDER BY "uses" DESC
LIMIT 5;
"""
records = await ctx.db.fetch(query)
value = []
for (index, (author_id, uses)) in enumerate(records):
user = self.censor_object(self.bot.get_user(author_id) or f'<Unknown {author_id}>')
emoji = lookup[index]
value.append(f'{emoji}: {user} ({uses} uses)')
e.add_field(name='Top Users', value='\n'.join(value), inline=False)
await ctx.send(embed=e)
async def send_guild_stats(self, e, guild):
e.add_field(name='Name', value=guild.name)
e.add_field(name='ID', value=guild.id)
e.add_field(name='Shard ID', value=guild.shard_id or 'N/A')
e.add_field(name='Owner', value=f'{guild.owner} (ID: {guild.owner.id})')
bots = sum(m.bot for m in guild.members)
total = guild.member_count
online = sum(m.status is discord.Status.online for m in guild.members)
e.add_field(name='Members', value=str(total))
e.add_field(name='Bots', value=f'{bots} ({bots/total:.2%})')
e.add_field(name='Online', value=f'{online} ({online/total:.2%})')
if guild.icon:
e.set_thumbnail(url=guild.icon_url)
if guild.me:
e.timestamp = guild.me.joined_at
await self.webhook.send(embed=e)
@stats_today.before_invoke
@stats_global.before_invoke
async def before_stats_invoke(self, ctx):
await ctx.trigger_typing()
@commands.Cog.listener()
async def on_guild_join(self, guild):
e = discord.Embed(colour=0x53dda4, title='New Guild') # green colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_guild_remove(self, guild):
e = discord.Embed(colour=0xdd5f53, title='Left Guild') # red colour
await self.send_guild_stats(e, guild)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
await self.register_command(ctx)
if not isinstance(error, (commands.CommandInvokeError, commands.ConversionError)):
return
error = error.original
if isinstance(error, (discord.Forbidden, discord.NotFound, CannotPaginate)):
return
e = discord.Embed(title='Command Error', colour=0xcc3366)
e.add_field(name='Name', value=ctx.command.qualified_name)
e.add_field(name='Author', value=f'{ctx.author} (ID: {ctx.author.id})')
fmt = f'Channel: {ctx.channel} (ID: {ctx.channel.id})'
if ctx.guild:
fmt = f'{fmt}\nGuild: {ctx.guild} (ID: {ctx.guild.id})'
e.add_field(name='Location', value=fmt, inline=False)
e.add_field(name='Content', value=textwrap.shorten(ctx.message.content, width=512))
exc = ''.join(traceback.format_exception(type(error), error, error.__traceback__, chain=False))
e.description = f'```py\n{exc}\n```'
e.timestamp = datetime.datetime.utcnow()
await self.webhook.send(embed=e)
@commands.Cog.listener()
async def on_socket_raw_send(self, data):
# kind of weird way to check if we're sending
# IDENTIFY or RESUME
if '"op":2' not in data and '"op":6' not in data:
return
back_to_json = json.loads(data)
if back_to_json['op'] == 2:
payload = back_to_json['d']
inner_shard = payload.get('shard', [0])
self._identifies[inner_shard[0]].append(datetime.datetime.utcnow())
else:
self._resumes.append(datetime.datetime.utcnow())
# don't want to permanently grow memory
self._clear_gateway_data()
def add_record(self, record):
# if self.bot.config.debug:
# return
self._gateway_queue.put_nowait(record)
async def notify_gateway_status(self, record):
attributes = {
'INFO': '\N{INFORMATION SOURCE}',
'WARNING': '\N{WARNING SIGN}'
}
emoji = attributes.get(record.levelname, '\N{CROSS MARK}')
dt = datetime.datetime.utcfromtimestamp(record.created)
msg = f'{emoji} `[{dt:%Y-%m-%d %H:%M:%S}] {record.message}`'
await self.webhook.send(msg, username='Gateway', avatar_url='https://i.imgur.com/4PnCKB3.png')
@commands.command(hidden=True)
@commands.is_owner()
async def bothealth(self, ctx):
"""Various bot health monitoring tools."""
# This uses a lot of private methods because there is no
# clean way of doing this otherwise.
HEALTHY = discord.Colour(value=0x43B581)
UNHEALTHY = discord.Colour(value=0xF04947)
WARNING = discord.Colour(value=0xF09E47)
total_warnings = 0
embed = discord.Embed(title='Bot Health Report', colour=HEALTHY)
# Check the connection pool health.
pool = self.bot.pool
total_waiting = len(pool._queue._getters)
current_generation = pool._generation
description = [
f'Total `Pool.acquire` Waiters: {total_waiting}',
f'Current Pool Generation: {current_generation}',
f'Connections In Use: {len(pool._holders) - pool._queue.qsize()}'
]
questionable_connections = 0
connection_value = []
for index, holder in enumerate(pool._holders, start=1):
generation = holder._generation
in_use = holder._in_use is not None
is_closed = holder._con is None or holder._con.is_closed()
display = f'gen={holder._generation} in_use={in_use} closed={is_closed}'
questionable_connections += any((in_use, generation != current_generation))
connection_value.append(f'<Holder i={index} {display}>')
joined_value = '\n'.join(connection_value)
embed.add_field(name='Connections', value=f'```py\n{joined_value}\n```', inline=False)
spam_control = self.bot.spam_control
being_spammed = [
str(key) for key, value in spam_control._cache.items()
if value._tokens == 0
]
description.append(f'Current Spammers: {", ".join(being_spammed) if being_spammed else "None"}')
description.append(f'Questionable Connections: {questionable_connections}')
total_warnings += questionable_connections
if being_spammed:
embed.colour = WARNING
total_warnings += 1
try:
task_retriever = asyncio.Task.all_tasks
except AttributeError:
# future proofing for 3.9 I guess
task_retriever = asyncio.all_tasks
else:
all_tasks = task_retriever(loop=self.bot.loop)
event_tasks = [
t for t in all_tasks
if 'Client._run_event' in repr(t) and not t.done()
]
cogs_directory = os.path.dirname(__file__)
tasks_directory = os.path.join('discord', 'ext', 'tasks', '__init__.py')
inner_tasks = [
t for t in all_tasks
if cogs_directory in repr(t) or tasks_directory in repr(t)
]
bad_inner_tasks = ", ".join(hex(id(t)) for t in inner_tasks if t.done() and t._exception is not None)
total_warnings += bool(bad_inner_tasks)
embed.add_field(name='Inner Tasks', value=f'Total: {len(inner_tasks)}\nFailed: {bad_inner_tasks or "None"}')
embed.add_field(name='Events Waiting', value=f'Total: {len(event_tasks)}', inline=False)
command_waiters = len(self._data_batch)
is_locked = self._batch_lock.locked()
description.append(f'Commands Waiting: {command_waiters}, Batch Locked: {is_locked}')
# RESUME/IDENTIFY data
yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
total_resumes = sum(1 for dt in self._resumes if dt > yesterday)
identifies = {
shard_id: sum(1 for dt in dates if dt > yesterday)
for shard_id, dates in self._identifies.items()
}
absolute_total_identifies = sum(identifies.values())
resume_info_builder = [
f'Total RESUMEs: {total_resumes}',
f'Total IDENTIFYs: {absolute_total_identifies}'
]
for shard_id, total in identifies.items():
resume_info_builder.append(f'Shard ID {shard_id} IDENTIFYs: {total}')
if absolute_total_identifies >= (len(self.bot.shards) * 5):
total_warnings += 1
embed.colour = WARNING
embed.add_field(name='Gateway (last 24 hours)', value='\n'.join(resume_info_builder), inline=False)
memory_usage = self.process.memory_full_info().uss / 1024**2
cpu_usage = self.process.cpu_percent() / psutil.cpu_count()
embed.add_field(name='Process', value=f'{memory_usage:.2f} MiB\n{cpu_usage:.2f}% CPU', inline=False)
global_rate_limit = not self.bot.http._global_over.is_set()
description.append(f'Global Rate Limit: {global_rate_limit}')
if command_waiters >= 8:
total_warnings += 1
embed.colour = WARNING
if global_rate_limit or total_warnings >= 9:
embed.colour = UNHEALTHY
embed.set_footer(text=f'{total_warnings} warning(s)')
embed.description = '\n'.join(description)
await ctx.send(embed=embed)
@commands.command(hidden=True, aliases=['cancel_task'])
@commands.is_owner()
async def debug_task(self, ctx, memory_id: hex_value):
"""Debug a task by a memory location."""
task = object_at(memory_id)
if task is None or not isinstance(task, asyncio.Task):
return await ctx.send(f'Could not find Task object at {hex(memory_id)}.')
if ctx.invoked_with == 'cancel_task':
task.cancel()
return await ctx.send(f'Cancelled task object {task!r}.')
paginator = commands.Paginator(prefix='```py')
fp = io.StringIO()
frames = len(task.get_stack())
paginator.add_line(f'# Total Frames: {frames}')
task.print_stack(file=fp)
for line in fp.getvalue().splitlines():
paginator.add_line(line)
for page in paginator.pages:
await ctx.send(page)
async def tabulate_query(self, ctx, query, *args):
records = await ctx.db.fetch(query, *args)
if len(records) == 0:
return await ctx.send('No results found.')
headers = list(records[0].keys())
table = formats.TabularData()
table.set_columns(headers)
table.add_rows(list(r.values()) for r in records)
render = table.render()
fmt = f'```\n{render}\n```'
if len(fmt) > 2000:
fp = io.BytesIO(fmt.encode('utf-8'))
await ctx.send('Too many results...', file=discord.File(fp, 'results.txt'))
else:
await ctx.send(fmt)
@commands.group(hidden=True, invoke_without_command=True)
@commands.is_owner()
async def command_history(self, ctx):
"""Command history."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
to_char(used, 'Mon DD HH12:MI:SS AM') AS "invoked",
author_id,
guild_id
FROM commands
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query)
@command_history.command(name='for')
@commands.is_owner()
async def command_history_for(self, ctx, days: typing.Optional[int] = 7, *, command: str):
"""Command history for a command."""
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT guild_id,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command=$1
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY guild_id
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
await self.tabulate_query(ctx, query, command, datetime.timedelta(days=days))
@command_history.command(name='guild', aliases=['server'])
@commands.is_owner()
async def command_history_guild(self, ctx, guild_id: int):
"""Command history for a guild."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
channel_id,
author_id,
used
FROM commands
WHERE guild_id=$1
ORDER BY used DESC
LIMIT 15;
"""
await self.tabulate_query(ctx, query, guild_id)
@command_history.command(name='user', aliases=['member'])
@commands.is_owner()
async def command_history_user(self, ctx, user_id: int):
"""Command history for a user."""
query = """SELECT
CASE failed
WHEN TRUE THEN command || ' [!]'
ELSE command
END AS "command",
guild_id,
used
FROM commands
WHERE author_id=$1
ORDER BY used DESC
LIMIT 20;
"""
await self.tabulate_query(ctx, query, user_id)
@command_history.command(name='log')
@commands.is_owner()
async def command_history_log(self, ctx, days=7):
"""Command history log for the last N days."""
query = """SELECT command, COUNT(*)
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
ORDER BY 2 DESC
"""
all_commands = {
c.qualified_name: 0
for c in self.bot.walk_commands()
}
records = await ctx.db.fetch(query, datetime.timedelta(days=days))
for name, uses in records:
if name in all_commands:
all_commands[name] = uses
as_data = sorted(all_commands.items(), key=lambda t: t[1], reverse=True)
table = formats.TabularData()
table.set_columns(['Command', 'Uses'])
table.add_rows(tup for tup in as_data)
render = table.render()
embed = discord.Embed(title='Summary', colour=discord.Colour.green())
embed.set_footer(text='Since').timestamp = datetime.datetime.utcnow() - datetime.timedelta(days=days)
top_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[:10])
bottom_ten = '\n'.join(f'{command}: {uses}' for command, uses in records[-10:])
embed.add_field(name='Top 10', value=top_ten)
embed.add_field(name='Bottom 10', value=bottom_ten)
unused = ', '.join(name for name, uses in as_data if uses == 0)
if len(unused) > 1024:
unused = 'Way too many...'
embed.add_field(name='Unused', value=unused, inline=False)
await ctx.send(embed=embed, file=discord.File(io.BytesIO(render.encode()), filename='full_results.txt'))
@command_history.command(name='cog')
@commands.is_owner()
async def command_history_cog(self, ctx, days: typing.Optional[int] = 7, *, cog: str = None):
"""Command history for a cog or grouped by a cog."""
interval = datetime.timedelta(days=days)
if cog is not None:
cog = self.bot.get_cog(cog)
if cog is None:
return await ctx.send(f'Unknown cog: {cog}')
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE command = any($1::text[])
AND used > (CURRENT_TIMESTAMP - $2::interval)
GROUP BY command
) AS t
ORDER BY "total" DESC
LIMIT 30;
"""
return await self.tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval)
# A more manual query with a manual grouper.
query = """SELECT *, t.success + t.failed AS "total"
FROM (
SELECT command,
SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",
SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"
FROM commands
WHERE used > (CURRENT_TIMESTAMP - $1::interval)
GROUP BY command
) AS t;
"""
class Count:
__slots__ = ('success', 'failed', 'total')
def __init__(self):
self.success = 0
self.failed = 0
self.total = 0
def add(self, record):
self.success += record['success']
self.failed += record['failed']
self.total += record['total']
data = defaultdict(Count)
records = await ctx.db.fetch(query, interval)
for record in records:
command = self.bot.get_command(record['command'])
if command is None or command.cog is None:
data['No Cog'].add(record)
else:
data[command.cog.qualified_name].add(record)
table = formats.TabularData()
table.set_columns(['Cog', 'Success', 'Failed', 'Total'])
data = sorted([
(cog, e.success, e.failed, e.total)
for cog, e in data.items()
], key=lambda t: t[-1], reverse=True)
table.add_rows(data)
render = table.render()
await ctx.safe_send(f'```\n{render}\n```')
old_on_error = commands.AutoShardedBot.on_error
async def on_error(self, event, *args, **kwargs):
e = discord.Embed(title='Event Error', colour=0xa32952)
e.add_field(name='Event', value=event)
e.description = f'```py\n{traceback.format_exc()}\n```'
e.timestamp = datetime.datetime.utcnow()
args_str = ['```py']
for index, arg in enumerate(args):
args_str.append(f'[{index}]: {arg!r}')
args_str.append('```')
e.add_field(name='Args', value='\n'.join(args_str), inline=False)
hook = self.get_cog('Stats').webhook
try:
await hook.send(embed=e)
except:
pass
def setup(bot):
if not hasattr(bot, 'command_stats'):
bot.command_stats = Counter()
if not hasattr(bot, 'socket_stats'):
bot.socket_stats = Counter()
cog = Stats(bot)
bot.add_cog(cog)
bot._stats_cog_gateway_handler = handler = GatewayHandler(cog)
logging.getLogger().addHandler(handler)
commands.AutoShardedBot.on_error = on_error
def teardown(bot):
commands.AutoShardedBot.on_error = old_on_error
logging.getLogger().removeHandler(bot._stats_cog_gateway_handler)
del bot._stats_cog_gateway_handler
| 37.937674 | 124 | 0.570532 | [
"MIT"
] | ymypengueni/RoboDanny | cogs/stats.py | 40,783 | Python |
# =================================================================
# IMPORT REQUIRED LIBRARIES
# =================================================================
import os
# =================================================================
# READ DATA
# =================================================================
data_location = os.path.join(os.path.abspath(""), '2021/day-06-lanternfish/')
# with open(os.path.join(data_location, 'input_small.txt'), 'r') as f:
with open(os.path.join(data_location, 'input.txt'), 'r') as f:
data = f.read().split(",")
data = [int(fish) for fish in data]
# print(data)
# =================================================================
# LOGIC - PART ONE
# =================================================================
def part_one():
numDays = 18
numFishes = len(data)
# print("Initial state: ", data)
# Each day
for day in range(numDays):
for i in range(numFishes):
fish = data[i]
if fish == 0:
# a 0 becomes a 6
data[i] = 6
# and adds a new 8 to the end of the list
data.append(8)
else:
data[i] = fish-1
numFishes = len(data)
# print("After ", str(day), " day: ", data)
return(numFishes)
# =================================================================
# LOGIC - PART TWO
# =================================================================
def part_two():
numDays = 256
fishesDict = {}
for i in range(9):
fishesDict[i] = 0
fishesDict[i] = data.count(i)
# print(fishesDict)
# print("Initial state: ", fishesDict)
# Each day
for day in range(numDays):
newFishesDict = {}
for i in range(9):
newFishesDict[i] = 0
holder = 0
for i in fishesDict:
if i == 0:
holder += fishesDict[0]
else:
newFishesDict[i-1] = fishesDict[i]
# A 0 becomes a 6
newFishesDict[6] += holder
# and adds a new 8 to the end of the list
newFishesDict[8] += holder
fishesDict = newFishesDict
# print("After ", str(day+1), " day: ", fishesDict)
numFishes = 0
for i in range(9):
numFishes += fishesDict[i]
return(numFishes)
# =================================================================
# MAIN
# =================================================================
if __name__ == '__main__':
# print("Part one result is: " , str(part_one()))
print("Part two result is: " , str(part_two())) | 29.41573 | 77 | 0.395722 | [
"MIT"
] | priyalr/advent-of-code | 2021/day-06-lanternfish/day-06.py | 2,618 | Python |
from frappe import _
def get_data():
return [
{
"module_name": "Notice",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Notice")
}
]
| 15.153846 | 44 | 0.573604 | [
"MIT"
] | GreatDevelopers/Notice-gneEdu | noticeboard/config/desktop.py | 197 | Python |
from panda3d.core import *
from direct.distributed.DistributedObject import DistributedObject
from direct.interval.IntervalGlobal import *
from toontown.toonbase import ToontownGlobals
from otp.avatar import Emote
from toontown.toontowngui import TTDialog
import webbrowser, SafezoneInvasionGlobals
class DistributedSafezoneInvasion(DistributedObject):
def __init__(self, cr):
DistributedObject.__init__(self, cr)
cr.invasion = self
self.invasionOn = False
self.accept('localPieSplat', self.__localPieSplat)
self.accept('enterSuitAttack', self.__localToonHit)
self.showFloor = base.render.find('**/ShowFloor')
self.geom = base.cr.playGame.hood.loader.geom
self.sky = loader.loadModel(SafezoneInvasionGlobals.CogSkyFile)
self.sky.setBin('background', 100)
self.sky.setColor(0.3, 0.3, 0.28, 1)
self.sky.setTransparency(TransparencyAttrib.MDual, 1)
self.sky.setDepthTest(0)
self.sky.setDepthWrite(0)
self.sky.setFogOff()
self.sky.setZ(-20.0)
ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ)
self.sky.node().setEffect(ce)
self.fadeIn = self.sky.colorScaleInterval(5.0, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0), blendType='easeInOut')
self.cogSkyBegin = LerpColorScaleInterval(self.geom, 6.0, Vec4(0.4, 0.4, 0.4, 1), blendType='easeInOut')
self.cogSkyBeginStage = LerpColorScaleInterval(self.showFloor, 6.0, Vec4(0.4, 0.4, 0.4, 1), blendType='easeInOut')
self.beginSkySequence = Sequence(Func(self.fadeIn.start), Func(self.cogSkyBegin.start), Func(self.cogSkyBeginStage.start))
self.fadeOut = self.sky.colorScaleInterval(6.0, Vec4(1, 1, 1, 0), startColorScale=Vec4(1, 1, 1, 1), blendType='easeInOut')
self.cogSkyEnd = LerpColorScaleInterval(self.geom, 7.0, Vec4(1, 1, 1, 1), blendType='easeInOut')
self.cogSkyEndStage = LerpColorScaleInterval(self.showFloor, 7.0, Vec4(1, 1, 1, 1), blendType='easeInOut')
self.endSkySequence = Sequence(Func(self.fadeOut.start), Func(self.cogSkyEnd.start), Func(self.cogSkyEndStage.start), Wait(7), Func(self.sky.removeNode))
base.cr.playGame.hood.loader.music.stop()
self.musicEnter = base.loader.loadMusic(SafezoneInvasionGlobals.InvasionMusicEnter)
self.victoryMusic = base.loader.loadMusic('phase_9/audio/bgm/CogHQ_finale.ogg')
def delete(self):
self.cr.invasion = None
if self.invasionOn:
del self.fadeIn
del self.fadeOut
del self.cogSkyBegin
del self.cogSkyEnd
del self.cogSkyBeginStage
del self.cogSkyEndStage
del self.musicEnter
del self.beginSkySequence
del self.endSkySequence
DistributedObject.delete(self)
self.ignoreAll()
return
def setInvasionStarted(self, started):
if started and not self.invasionOn:
self.sky.reparentTo(camera)
self.beginSkySequence.start()
base.playMusic(self.musicEnter, looping=1, volume=1.0)
else:
if not started and self.invasionOn:
self.endInvasion()
else:
return
self.invasionOn = started
def endInvasion(self):
self.endSkySequence.start()
base.playMusic(self.victoryMusic, looping=0, volume=0.9)
self.victoryIval = Sequence(Func(Emote.globalEmote.disableAll, base.localAvatar, 'dbattle, enterReward'), Func(base.localAvatar.disableAvatarControls), Func(base.localAvatar.b_setEmoteState, 6, 1.0), Wait(5.15), Func(Emote.globalEmote.releaseAll, base.localAvatar, 'dbattle, enterReward'), Func(base.localAvatar.enableAvatarControls))
self.victoryIval.start()
def startCogSky(self):
self.fadeIn.start()
self.cogSkyBegin.start()
self.cogSkyBeginStage.start()
def stopCogSky(self):
if self.invasionOn:
cogSkySequence = Sequence(Func(self.cogSkyEnd.start), Func(self.cogSkyEndStage.start), Func(self.fadeOut.start), Wait(7), Func(self.sky.removeNode))
def stopMusic(self):
self.musicEnter.stop()
def showThanks(self):
self.confirm = TTDialog.TTGlobalDialog(doneEvent='confirmDone', message=SafezoneInvasionGlobals.Thanks, style=TTDialog.Acknowledge, suppressKeys=True)
self.confirm.show()
self.accept('confirmDone', self.handleConfirm)
def handleConfirm(self):
status = self.confirm.doneStatus
self.ignore('confirmDone')
self.confirm.cleanup()
del self.confirm
if status == 'ok':
webbrowser.open('http://toontownrewritten.com')
abort()
def __localPieSplat(self, pieCode, entry):
if pieCode == ToontownGlobals.PieCodeToon:
avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId')
if avatarDoId == '':
self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath()))
return
doId = int(avatarDoId)
if doId != localAvatar.doId:
self.d_pieHitToon(doId)
else:
if pieCode == ToontownGlobals.PieCodeInvasionSuit:
avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId')
if avatarDoId == '':
self.notify.warning('Suit %s has no avatarDoId tag.' % repr(entry.getIntoNodePath()))
return
doId = int(avatarDoId)
if doId != localAvatar.doId:
self.d_pieHitSuit(doId)
def __localToonHit(self, entry):
damage = int(entry.getIntoNode().getTag('damage'))
self.d_takeDamage(damage)
def d_pieHitToon(self, doId):
self.sendUpdate('pieHitToon', [doId])
def d_pieHitSuit(self, doId):
self.sendUpdate('pieHitSuit', [doId])
def d_takeDamage(self, damage):
self.sendUpdate('takeDamage', [damage]) | 45.923664 | 342 | 0.65625 | [
"MIT"
] | TTOFFLINE-LEAK/ttoffline | v2.5.7/toontown/election/DistributedSafezoneInvasion.py | 6,016 | Python |
from configparser import ConfigParser
# Initialize the Parser.
config = ConfigParser()
# Add the Section.
config.add_section('graph_api')
# Set the Values.
config.set('graph_api', 'client_id', '_______')
print('set client ID')
config.set('graph_api', 'client_secret', '_______')
print('set client secret')
config.set('graph_api', 'redirect_uri', '_______')
print('set redirect url')
# Write the file.
with open(file='config.ini', mode='w+') as f:
config.write(f)
print('config written')
print(config)
| 24.454545 | 52 | 0.684015 | [
"Unlicense"
] | perrinromney/Teams_Status | write_config.py | 538 | Python |
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo.tests
@odoo.tests.tagged('post_install','-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_crm_tour(self):
self.start_tour("/web", 'crm_tour', login="admin")
| 24.727273 | 74 | 0.724265 | [
"MIT"
] | LucasBorges-Santos/docker-odoo | odoo/base-addons/crm/tests/test_crm_ui.py | 272 | Python |
import battery_test
if __name__ == '__main__':
#assert(EV_test.battery_is_ok(25, 70, 0.7) is True)
#assert(EV_test.battery_is_ok(50, 85, 0) is False)
assert(battery_test.check_battery_is_ok({'temperature' : 25,'soc' : 70,'charge_rate' : 0.9}) is False)
assert(battery_test.check_battery_is_ok({'temperature' : 25,'soc' : 70,'charge_rate' : 0.7}) is True)
assert(battery_test.check_battery_is_ok({'temperature' : 50,'soc' : 85,'charge_rate' : 0}) is False)
| 53.111111 | 107 | 0.700837 | [
"MIT"
] | wanderer-soul92/functional-python-wanderer-soul92 | check_limits.py | 478 | Python |
"""Unit tests for comparison functions for geometry types.
Authors: Ayush Baid
"""
import unittest
from typing import List
from unittest.mock import patch
import numpy as np
from gtsam import Cal3_S2, Point3, Pose3, Rot3, Similarity3, Unit3
from gtsam.examples import SFMdata
import gtsfm.utils.geometry_comparisons as geometry_comparisons
import tests.data.sample_poses as sample_poses
POSE_LIST = SFMdata.createPoses(Cal3_S2())
ROT3_EULER_ANGLE_ERROR_THRESHOLD = 1e-2
POINT3_RELATIVE_ERROR_THRESH = 1e-1
POINT3_ABS_ERROR_THRESH = 1e-2
def rot3_compare(R: Rot3, R_: Rot3, msg=None) -> bool:
return np.allclose(R.xyz(), R_.xyz(), atol=1e-2)
def point3_compare(t: Point3, t_: Point3, msg=None) -> bool:
return np.allclose(t, t_, rtol=POINT3_RELATIVE_ERROR_THRESH, atol=POINT3_ABS_ERROR_THRESH)
class TestGeometryComparisons(unittest.TestCase):
"""Unit tests for comparison functions for geometry types."""
def __assert_equality_on_rot3s(self, computed: List[Rot3], expected: List[Rot3]) -> None:
self.assertEqual(len(computed), len(expected))
for R, R_ in zip(computed, expected):
self.assertEqual(R, R_)
def __assert_equality_on_point3s(self, computed: List[Point3], expected: List[Point3]) -> None:
self.assertEqual(len(computed), len(expected))
for t, t_ in zip(computed, expected):
np.testing.assert_allclose(t, t_, rtol=POINT3_RELATIVE_ERROR_THRESH, atol=POINT3_ABS_ERROR_THRESH)
def __assert_equality_on_pose3s(self, computed: List[Pose3], expected: List[Pose3]) -> None:
self.assertEqual(len(computed), len(expected))
computed_rot3s = [x.rotation() for x in computed]
computed_point3s = [x.translation() for x in computed]
expected_rot3s = [x.rotation() for x in expected]
expected_point3s = [x.translation() for x in expected]
self.__assert_equality_on_rot3s(computed_rot3s, expected_rot3s)
self.__assert_equality_on_point3s(computed_point3s, expected_point3s)
def setUp(self):
super().setUp()
self.addTypeEqualityFunc(Rot3, rot3_compare)
self.addTypeEqualityFunc(Point3, point3_compare)
def test_align_rotations(self):
"""Tests the alignment of rotations."""
# using rotation along just the Y-axis so that angles can be linearly added.
input_list = [
Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(-10), 0),
Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(30), 0),
]
ref_list = [
Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(80), 0),
Rot3.RzRyRx(np.deg2rad(0), np.deg2rad(-40), 0),
]
computed = geometry_comparisons.align_rotations(input_list, ref_list)
expected = [
Rot3.RzRyRx(0, np.deg2rad(80), 0),
Rot3.RzRyRx(0, np.deg2rad(120), 0),
]
self.__assert_equality_on_rot3s(computed, expected)
def test_align_poses_after_sim3_transform(self):
"""Test for alignment of poses after applying a SIM3 transformation."""
translation_shift = np.array([5, 10, -5])
rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
scaling_factor = 0.7
transform = Similarity3(rotation_shift, translation_shift, scaling_factor)
ref_list = [transform.transformFrom(x) for x in sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES]
computed_poses = geometry_comparisons.align_poses_sim3(sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES, ref_list)
self.__assert_equality_on_pose3s(computed_poses, sample_poses.CIRCLE_TWO_EDGES_GLOBAL_POSES)
def test_align_poses_on_panorama_after_sim3_transform(self):
"""Test for alignment of poses after applying a forward motion transformation."""
translation_shift = np.array([0, 5, 0])
rotation_shift = Rot3.RzRyRx(0, 0, np.deg2rad(30))
scaling_factor = 1.0
aTi_list = sample_poses.PANORAMA_GLOBAL_POSES
bSa = Similarity3(rotation_shift, translation_shift, scaling_factor)
bTi_list = [bSa.transformFrom(x) for x in aTi_list]
aTi_list_ = geometry_comparisons.align_poses_sim3(aTi_list, bTi_list)
self.__assert_equality_on_pose3s(aTi_list_, aTi_list)
@patch(
"gtsfm.utils.geometry_comparisons.align_rotations",
return_value=[
Rot3.RzRyRx(0, np.deg2rad(32), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-22)),
Rot3.RzRyRx(0, 0, np.deg2rad(83)),
], # compared with aRi_list
)
def test_compare_rotations_with_all_valid_rot3s_success(self, align_rotations_mocked):
"""Tests the comparison results on list of rotations."""
aRi_list = [
Rot3.RzRyRx(0, np.deg2rad(25), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-20)),
Rot3.RzRyRx(0, 0, np.deg2rad(80)),
]
bRi_list = [
Rot3.RzRyRx(0, np.deg2rad(31), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-22)),
Rot3.RzRyRx(0, 0, np.deg2rad(77.5)),
] # meaningless as align function is mocked
# test with threshold of 10 degrees, which satisfies all the rotations.
self.assertTrue(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 10))
align_rotations_mocked.assert_called_once()
@patch(
"gtsfm.utils.geometry_comparisons.align_rotations",
return_value=[
Rot3.RzRyRx(0, np.deg2rad(32), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-22)),
Rot3.RzRyRx(0, 0, np.deg2rad(83)),
], # compared with aRi_list
)
def test_compare_rotations_with_all_valid_rot3s_failure(self, align_rotations_mocked):
"""Tests the comparison results on list of rotations."""
aRi_list = [
Rot3.RzRyRx(0, np.deg2rad(25), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-20)),
Rot3.RzRyRx(0, 0, np.deg2rad(80)),
]
bRi_list = [
Rot3.RzRyRx(0, np.deg2rad(31), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-22)),
Rot3.RzRyRx(0, 0, np.deg2rad(77.5)),
] # meaningless as align function is mocked
# test with threshold of 5 degrees, which fails one rotation and hence the overall comparison
self.assertFalse(geometry_comparisons.compare_rotations(aRi_list, bRi_list, 5))
align_rotations_mocked.assert_called_once()
@patch(
"gtsfm.utils.geometry_comparisons.align_rotations",
return_value=[Rot3.RzRyRx(0, np.deg2rad(25), 0), Rot3.RzRyRx(0, 0, np.deg2rad(-20))], # compared with aRi_list
)
def test_compare_rotations_with_nones_at_same_indices(self, align_rotations_mocked):
"""Tests the comparison results on list of rotations."""
list1 = [
Rot3.RzRyRx(0, np.deg2rad(25), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-20)),
None,
]
list2 = [
Rot3.RzRyRx(0, np.deg2rad(31), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-22)),
None,
]
threshold_degrees = 10
# test with threshold of 10 degrees, which satisfies all the rotations.
self.assertTrue(geometry_comparisons.compare_rotations(list1, list2, threshold_degrees))
align_rotations_mocked.assert_called_once()
@patch("gtsfm.utils.geometry_comparisons.align_rotations", return_value=None)
def test_compare_rotations_with_nones_at_different_indices(self, aligned_rotations_mocked):
"""Tests the comparison results on list of rotations."""
list1 = [
Rot3.RzRyRx(0, np.deg2rad(25), 0),
Rot3.RzRyRx(0, 0, np.deg2rad(-20)),
None,
]
list2 = [
Rot3.RzRyRx(0, np.deg2rad(31), 0),
None,
Rot3.RzRyRx(0, 0, np.deg2rad(-22)),
]
# test with threshold of 10 degrees, which satisfies all the rotations.
self.assertFalse(geometry_comparisons.compare_rotations(list1, list2, 10))
aligned_rotations_mocked.assert_not_called()
def test_compute_relative_rotation_angle(self):
"""Tests the relative angle between two rotations."""
R_1 = Rot3.RzRyRx(0, np.deg2rad(45), np.deg2rad(22.5))
R_2 = Rot3.RzRyRx(0, np.deg2rad(90), np.deg2rad(22.5))
# returns angle in degrees
computed_deg = geometry_comparisons.compute_relative_rotation_angle(R_1, R_2)
expected_deg = 45
np.testing.assert_allclose(computed_deg, expected_deg, rtol=1e-3, atol=1e-3)
def test_compute_relative_unit_translation_angle(self):
"""Tests the relative angle between two unit-translations."""
U_1 = Unit3(np.array([1, 0, 0]))
U_2 = Unit3(np.array([0.5, 0.5, 0]))
# returns angle in degrees
computed_deg = geometry_comparisons.compute_relative_unit_translation_angle(U_1, U_2)
expected_deg = 45
self.assertAlmostEqual(computed_deg, expected_deg, places=3)
def test_compute_translation_to_direction_angle_is_zero(self):
i2Ui1_measured = Unit3(Point3(1, 0, 0))
wTi2_estimated = Pose3(Rot3(), Point3(0, 0, 0))
wTi1_estimated = Pose3(Rot3(), Point3(2, 0, 0))
self.assertEqual(
geometry_comparisons.compute_translation_to_direction_angle(i2Ui1_measured, wTi2_estimated, wTi1_estimated),
0.0,
)
def test_compute_translation_to_direction_angle_is_nonzero(self):
rz = np.deg2rad(90)
wRi2 = Rot3.RzRyRx(0, 0, rz) # x-axis of i2 points along y in world frame
wTi2_estimated = Pose3(wRi2, Point3(0, 0, 0))
wTi1_estimated = Pose3(Rot3(), Point3(-1, 0, 0)) # At (0, 1, 0) in i2 frame, rotation of i1 is irrelevant here.
i2Ui1_measured = Unit3(Point3(1, 0, 0))
# Estimated relative translation of i1 in i2 frame is (0, 1, 0), and the measurement in i2 frame is (1, 0, 0).
# Expected angle between the two is 90 degrees.
self.assertTrue(
geometry_comparisons.compute_translation_to_direction_angle(i2Ui1_measured, wTi2_estimated, wTi1_estimated),
90.0,
)
def test_compute_points_distance_l2_is_zero(self):
self.assertEqual(
geometry_comparisons.compute_points_distance_l2(wti1=Point3(1, -2, 3), wti2=Point3(1, -2, 3)), 0.0
)
def test_compute_points_distance_l2_is_none(self):
self.assertEqual(geometry_comparisons.compute_points_distance_l2(wti1=Point3(0, 0, 0), wti2=None), None)
def test_compute_points_distance_l2_is_nonzero(self):
wti1 = Point3(1, 1, 1)
wti2 = Point3(1, 1, -1)
self.assertEqual(geometry_comparisons.compute_points_distance_l2(wti1, wti2), 2)
def test_align_poses_sim3_ignore_missing(self):
"""Consider a simple cases with 4 poses in a line. Suppose SfM only recovers 2 of the 4 poses."""
wT0 = Pose3(Rot3(np.eye(3)), np.zeros(3))
wT1 = Pose3(Rot3(np.eye(3)), np.ones(3))
wT2 = Pose3(Rot3(np.eye(3)), np.ones(3) * 2)
wT3 = Pose3(Rot3(np.eye(3)), np.ones(3) * 3)
# `a` frame is the target/reference frame
aTi_list = [wT0, wT1, wT2, wT3]
# `b` frame contains the estimates
bTi_list = [None, wT1, None, wT3]
aTi_list_ = geometry_comparisons.align_poses_sim3_ignore_missing(aTi_list, bTi_list)
# indices 0 and 2 should still have no estimated pose, even after alignment
assert aTi_list_[0] is None
assert aTi_list_[2] is None
# identity alignment should preserve poses, should still match GT/targets at indices 1 and 3
self.__assert_equality_on_pose3s(computed=[aTi_list_[1], aTi_list_[3]], expected=[aTi_list[1], aTi_list[3]])
def test_get_points_within_radius_of_cameras():
"""Verify that points that fall outside of 10 meter radius of two camera poses.
Cameras are placed at (0,0,0) and (10,0,0).
"""
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0], [25, 0, 0]])
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)
expected_nearby_points_3d = np.array([[-5, 0, 0], [15, 0, 0]])
np.testing.assert_allclose(nearby_points_3d, expected_nearby_points_3d)
def test_get_points_within_radius_of_cameras_negative_radius():
"""Catch degenerate input."""
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0], [25, 0, 0]])
radius = -5
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)
assert nearby_points_3d is None, "Non-positive radius is not allowed"
def test_get_points_within_radius_of_cameras_no_points():
"""Catch degenerate input."""
wTi0 = Pose3(Rot3(), np.zeros(3))
wTi1 = Pose3(Rot3(), np.array([10.0, 0, 0]))
wTi_list = [wTi0, wTi1]
points_3d = np.zeros((0, 3))
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)
assert nearby_points_3d is None, "At least one 3d point must be provided"
def test_get_points_within_radius_of_cameras_no_poses():
"""Catch degenerate input."""
wTi_list = []
points_3d = np.array([[-15, 0, 0], [0, 15, 0], [-5, 0, 0], [15, 0, 0], [25, 0, 0]])
radius = 10.0
nearby_points_3d = geometry_comparisons.get_points_within_radius_of_cameras(wTi_list, points_3d, radius)
assert nearby_points_3d is None, "At least one camera pose must be provided"
if __name__ == "__main__":
unittest.main()
| 40.373529 | 120 | 0.663801 | [
"Apache-2.0"
] | yuancaimaiyi/gtsfm | tests/utils/test_geometry_comparisons.py | 13,727 | Python |
#!/usr/bin/python
"""Code for backing up pictures and videos captured to Dropbox"""
import sys
import os
import glob
from os import listdir
from os.path import isfile, join
import subprocess
from Adafruit_IO import Client, RequestError
import base64
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def main(args):
"""Main function"""
if args:
folder_path = args[0]
else:
folder_path = '/var/lib/motion'
if not os.path.exists(folder_path):
print "Folder Path: " + folder_path + " doesn't exist, exiting."
raise ValueError("Incorrect Parameters")
#Get the camera name. Default to visible
with open(os.path.join(__location__, 'camera_name.cfg'), 'r') as f:
print("open")
camera_name = f.read().strip()
print("Camera name: " + camera_name)
aio = Client('ryhajlo', 'b5fe0936d9a84629a2d49cd45858fc67')
# Start handling pictures
videos = get_videos(folder_path)
pictures = get_pictures(folder_path)
if pictures:
# Upload the files to dropbox
# Build our command to upload files
command = []
command.append('/home/pi/Dropbox-Uploader/dropbox_uploader.sh')
command.append('upload')
for picture in pictures:
print "Will upload: " + picture
command.append(picture)
command.append('/camera/pictures/' + camera_name + '/')
subprocess.call(command)
print "Finished uploading pictures"
# Do the same for videos
command = []
command.append('/home/pi/Dropbox-Uploader/dropbox_uploader.sh')
command.append('upload')
for video in videos:
print "Will upload: " + video
command.append(video)
command.append('/camera/videos/' + camera_name + '/')
subprocess.call(command)
print "Finished uploading videos"
command = []
command.append("mogrify")
command.append("-resize")
command.append("320x240")
command.append("/var/lib/motion/*.jpg")
subprocess.call(command)
latest_picture = max(pictures, key=os.path.getctime)
print "The latest picture is: " + latest_picture
with open(latest_picture, "rb") as imageFile:
image_str = base64.b64encode(imageFile.read())
print "Uploading latest to Adafruit IO"
feed_name = 'pic-' + camera_name
print("Feed Name: " + feed_name)
aio.send(feed_name, image_str )
print "Finished uploading to Adafruit IO"
else:
latest_picture = None
print "No pictures"
# Now that everything is uploaded, delete it all
for picture in pictures:
print "Deleting " + picture
os.remove(picture)
for video in videos:
print "Deleting " + video
os.remove(video)
def get_videos(folder_path):
videos = glob.glob(join(folder_path, '*.avi'))
return videos
def get_pictures(folder_path):
print "Grabbing files from " + folder_path
# Get the list of files in that directory
pictures = glob.glob(join(folder_path, '2*.jpg')) # Use the leading 2 to prevent us from getting 'latest.jpg'
latest_picture = max(pictures, key=os.path.getctime)
return pictures
if __name__ == "__main__":
main(sys.argv[1:])
| 31.59434 | 113 | 0.637802 | [
"MIT"
] | ryhajlo/camera_backup | camera_backup.py | 3,349 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.