content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Tests for client module."""
import responses
import time
import googlemaps
from googlemaps import client as _client
import test as _test
import requests
class ClientTest(_test.TestCase):
def test_no_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client()
client.directions("Sydney", "Melbourne")
def test_invalid_api_key(self):
with self.assertRaises(Exception):
client = googlemaps.Client(key="Invalid key.")
client.directions("Sydney", "Melbourne")
def test_urlencode(self):
# See GH #72.
encoded_params = _client.urlencode_params([("address", "=Sydney ~")])
self.assertEqual("address=%3DSydney+~", encoded_params)
@responses.activate
def test_queries_per_second(self):
# This test assumes that the time to run a mocked query is
# relatively small, eg a few milliseconds. We define a rate of
# 3 queries per second, and run double that, which should take at
# least 1 second but no more than 2.
queries_per_second = 3
query_range = range(queries_per_second * 2)
for _ in query_range:
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf",
queries_per_second=queries_per_second)
start = time.time()
for _ in query_range:
client.geocode("Sesame St.")
end = time.time()
self.assertTrue(start + 1 < end < start + 2)
@responses.activate
def test_key_sent(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.",
responses.calls[0].request.url)
@responses.activate
def test_extra_params(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.", extra_params={"foo": "bar"})
self.assertEqual(1, len(responses.calls))
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"key=AIzaasdf&address=Sesame+St.&foo=bar",
responses.calls[0].request.url)
def test_hmac(self):
"""
From http://en.wikipedia.org/wiki/Hash-based_message_authentication_code
HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog")
= 0xde7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9
"""
message = "The quick brown fox jumps over the lazy dog"
key = "a2V5" # "key" -> base64
signature = "3nybhbi3iqa8ino29wqQcBydtNk="
self.assertEqual(signature, _client.sign_hmac(key, message))
@responses.activate
def test_url_signed(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(client_id="foo", client_secret="a2V5")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
# Check ordering of parameters.
self.assertIn("address=Sesame+St.&client=foo&signature",
responses.calls[0].request.url)
self.assertURLEqual("https://maps.googleapis.com/maps/api/geocode/json?"
"address=Sesame+St.&client=foo&"
"signature=fxbWUIcNPZSekVOhp2ul9LW5TpY=",
responses.calls[0].request.url)
@responses.activate
def test_ua_sent(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(1, len(responses.calls))
user_agent = responses.calls[0].request.headers["User-Agent"]
self.assertTrue(user_agent.startswith("GoogleGeoApiClientPython"))
@responses.activate
def test_retry(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (200, {}, '{"status":"OVER_QUERY_LIMIT"}')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type='application/json',
callback=request_callback())
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
self.assertEqual(responses.calls[0].request.url, responses.calls[1].request.url)
@responses.activate
def test_transport_error(self):
responses.add(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
status=404,
content_type='application/json')
client = googlemaps.Client(key="AIzaasdf")
with self.assertRaises(googlemaps.exceptions.HTTPError) as e:
client.geocode("Foo")
self.assertEqual(e.exception.status_code, 404)
@responses.activate
def test_host_override(self):
responses.add(responses.GET,
"https://foo.com/bar",
body='{"status":"OK","results":[]}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
client._get("/bar", {}, base_url="https://foo.com")
self.assertEqual(1, len(responses.calls))
@responses.activate
def test_custom_extract(self):
def custom_extract(resp):
return resp.json()
responses.add(responses.GET,
"https://maps.googleapis.com/bar",
body='{"error":"errormessage"}',
status=403,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf")
b = client._get("/bar", {}, extract_body=custom_extract)
self.assertEqual(1, len(responses.calls))
self.assertEqual("errormessage", b["error"])
@responses.activate
def test_retry_intermittent(self):
class request_callback:
def __init__(self):
self.first_req = True
def __call__(self, req):
if self.first_req:
self.first_req = False
return (500, {}, 'Internal Server Error.')
return (200, {}, '{"status":"OK","results":[]}')
responses.add_callback(responses.GET,
"https://maps.googleapis.com/maps/api/geocode/json",
content_type="application/json",
callback=request_callback())
client = googlemaps.Client(key="AIzaasdf")
client.geocode("Sesame St.")
self.assertEqual(2, len(responses.calls))
def test_channel_without_client_id(self):
with self.assertRaises(ValueError):
client = googlemaps.Client(key="AIzaasdf", channel="mychannel")
def test_invalid_channel(self):
# Cf. limitations here:
# https://developers.google.com/maps/premium/reports
# /usage-reports#channels
with self.assertRaises(ValueError):
client = googlemaps.Client(client_id="foo", client_secret="a2V5",
channel="auieauie$? ")
def test_auth_url_with_channel(self):
client = googlemaps.Client(key="AIzaasdf",
client_id="foo",
client_secret="a2V5",
channel="MyChannel_1")
# Check ordering of parameters + signature.
auth_url = client._generate_auth_url("/test",
{"param": "param"},
accepts_clientid=True)
self.assertEqual(auth_url, "/test?param=param"
"&channel=MyChannel_1"
"&client=foo"
"&signature=OH18GuQto_mEpxj99UimKskvo4k=")
# Check if added to requests to API with accepts_clientid=False
auth_url = client._generate_auth_url("/test",
{"param": "param"},
accepts_clientid=False)
self.assertEqual(auth_url, "/test?param=param&key=AIzaasdf")
def test_requests_version(self):
client_args_timeout = {
"key": "AIzaasdf",
"client_id": "foo",
"client_secret": "a2V5",
"channel": "MyChannel_1",
"connect_timeout": 5,
"read_timeout": 5
}
client_args = client_args_timeout.copy()
del client_args["connect_timeout"]
del client_args["read_timeout"]
requests.__version__ = '2.3.0'
with self.assertRaises(NotImplementedError):
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
requests.__version__ = '2.4.0'
googlemaps.Client(**client_args_timeout)
googlemaps.Client(**client_args)
@responses.activate
def test_no_retry_over_query_limit(self):
responses.add(responses.GET,
"https://maps.googleapis.com/foo",
body='{"status":"OVER_QUERY_LIMIT"}',
status=200,
content_type="application/json")
client = googlemaps.Client(key="AIzaasdf",
retry_over_query_limit=False)
with self.assertRaises(googlemaps.exceptions.ApiError):
client._request("/foo", {})
self.assertEqual(1, len(responses.calls))
| 38.14658 | 88 | 0.571343 | [
"Apache-2.0"
] | Harkishen-Singh/google-maps-services-python | test/test_client.py | 11,711 | Python |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant le paramètre 'créer' de la commande 'banc'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.interpreteur.editeur.presentation import Presentation
class PrmCreer(Parametre):
"""Commande 'banc créer'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "creer", "create")
self.schema = "<cle>"
self.aide_courte = "crée un banc de poisson"
self.aide_longue = \
"Cette commande permet de créer un nouveau banc de " \
"poisson. Vous devez préciser en argument la clé identifiant " \
"le banc."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
cle = dic_masques["cle"].cle
if cle in importeur.peche.bancs:
personnage << "|err|Ce banc existe déjà.|ff|"
return
banc = importeur.peche.creer_banc(cle)
editeur = importeur.interpreteur.construire_editeur(
"schooledit", personnage, banc)
personnage.contextes.ajouter(editeur)
editeur.actualiser()
| 44.177419 | 79 | 0.71632 | [
"BSD-3-Clause"
] | vincent-lg/tsunami | src/secondaires/peche/commandes/banc/creer.py | 2,751 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankPaymentTradeFinancingOrderRefundModel(object):
def __init__(self):
self._amount = None
self._biz_no = None
self._currency_value = None
self._ext_info = None
self._order_no = None
self._refund_type = None
self._remark = None
self._request_no = None
self._request_time = None
self._scene_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def currency_value(self):
return self._currency_value
@currency_value.setter
def currency_value(self, value):
self._currency_value = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def request_time(self):
return self._request_time
@request_time.setter
def request_time(self, value):
self._request_time = value
@property
def scene_type(self):
return self._scene_type
@scene_type.setter
def scene_type(self, value):
self._scene_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_no:
if hasattr(self.biz_no, 'to_alipay_dict'):
params['biz_no'] = self.biz_no.to_alipay_dict()
else:
params['biz_no'] = self.biz_no
if self.currency_value:
if hasattr(self.currency_value, 'to_alipay_dict'):
params['currency_value'] = self.currency_value.to_alipay_dict()
else:
params['currency_value'] = self.currency_value
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
if self.request_time:
if hasattr(self.request_time, 'to_alipay_dict'):
params['request_time'] = self.request_time.to_alipay_dict()
else:
params['request_time'] = self.request_time
if self.scene_type:
if hasattr(self.scene_type, 'to_alipay_dict'):
params['scene_type'] = self.scene_type.to_alipay_dict()
else:
params['scene_type'] = self.scene_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankPaymentTradeFinancingOrderRefundModel()
if 'amount' in d:
o.amount = d['amount']
if 'biz_no' in d:
o.biz_no = d['biz_no']
if 'currency_value' in d:
o.currency_value = d['currency_value']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'order_no' in d:
o.order_no = d['order_no']
if 'refund_type' in d:
o.refund_type = d['refund_type']
if 'remark' in d:
o.remark = d['remark']
if 'request_no' in d:
o.request_no = d['request_no']
if 'request_time' in d:
o.request_time = d['request_time']
if 'scene_type' in d:
o.scene_type = d['scene_type']
return o
| 30.107955 | 79 | 0.578411 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/domain/MybankPaymentTradeFinancingOrderRefundModel.py | 5,299 | Python |
"""Plugins for CMS"""
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from emencia.django.newsletter.cmsplugin_newsletter import settings
from emencia.django.newsletter.cmsplugin_newsletter.models import SubscriptionFormPlugin
from emencia.django.newsletter.forms import MailingListSubscriptionForm
class CMSSubscriptionFormPlugin(CMSPluginBase):
module = _('newsletter')
model = SubscriptionFormPlugin
name = _('Subscription Form')
render_template = 'newsletter/cms/subscription_form.html'
text_enabled = False
admin_preview = False
def render(self, context, instance, placeholder):
request = context['request']
if request.method == "POST" and (settings.FORM_NAME in request.POST.keys()):
form = MailingListSubscriptionForm(data=request.POST)
if form.is_valid():
form.save(instance.mailing_list)
form.saved = True
else:
form = MailingListSubscriptionForm()
context.update({
'object': instance,
'form': form,
'form_name': settings.FORM_NAME,
'placeholder': placeholder,
})
return context
plugin_pool.register_plugin(CMSSubscriptionFormPlugin)
| 34.153846 | 88 | 0.698949 | [
"BSD-3-Clause"
] | nephila/djangocms-newsletter | djangocms_newsletter/cmsplugin_newsletter/cms_plugins.py | 1,332 | Python |
import bpy
track = bpy.context.edit_movieclip.tracking.tracks.active
track.color = (0.0, 1.0, 0.0)
track.use_custom_color = True
| 21.666667 | 57 | 0.761538 | [
"Unlicense"
] | 1-MillionParanoidTterabytes/Blender-2.79b-blackened | release/scripts/presets/tracking_track_color/near_plane.py | 130 | Python |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
RESOURCEGROUP = "resourceGroup"
SUBSCRIPTIONID = "subscriptionId"
class Output:
VALUE = "value"
class ListVmInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"resourceGroup": {
"type": "string",
"title": "Resource Group",
"description": "The resource group that will contain the virtual machine",
"order": 2
},
"subscriptionId": {
"type": "string",
"title": "Subscription ID",
"description": "The identifier of your subscription",
"order": 1
}
},
"required": [
"subscriptionId",
"resourceGroup"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class ListVmOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"value": {
"type": "array",
"title": "Value",
"description": "List items virtual machine in a resource group",
"items": {
"$ref": "#/definitions/value_vm"
},
"order": 1
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"value_vm": {
"type": "object",
"title": "value_vm",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the identifying url of the virtual machine",
"order": 1
},
"location": {
"type": "string",
"title": "Location",
"description": "Specifies the supported Azure location where the virtual machine should be created",
"order": 2
},
"name": {
"type": "string",
"title": "Name Virtual Machine",
"description": "The name of the virtual machine",
"order": 3
},
"properties": {
"$ref": "#/definitions/properties",
"title": "Properties",
"description": "Specifies the properties of the virtual machine",
"order": 4
},
"tags": {
"$ref": "#/definitions/tags",
"title": "Tags",
"description": "Specifies the tags that are assigned to the virtual machine",
"order": 6
},
"type": {
"type": "string",
"title": "Type",
"description": "Specifies the type of compute resource",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"properties": {
"type": "object",
"title": "properties",
"properties": {
"availabilitySet": {
"$ref": "#/definitions/availabilitySet",
"title": "Availability Set",
"description": "The availability set that contains the virtual machine",
"order": 1
},
"diagnosticsProfile": {
"$ref": "#/definitions/diagnosticsProfile",
"title": "Diagnostics Profile",
"description": "Specifies the boot diagnostic settings state",
"order": 2
},
"hardwareProfile": {
"$ref": "#/definitions/hardwareProfile",
"title": "Hardware Profile",
"description": "Specifies the hardware settings for the virtual machine",
"order": 3
},
"networkProfile": {
"$ref": "#/definitions/networkProfile",
"title": "Network Profile",
"description": "Specifies the network interfaces of the virtual machine",
"order": 4
},
"osProfile": {
"$ref": "#/definitions/osProfile",
"title": "OS Profile",
"description": "Specifies the operating system settings for the virtual machine",
"order": 5
},
"provisioningState": {
"type": "string",
"title": "Provisioning State",
"description": "Specifies the provisioned state of the virtual machine",
"order": 6
},
"storageProfile": {
"$ref": "#/definitions/storageProfile",
"title": "Storage Profile",
"description": "Specifies the storage settings for the virtual machine disks",
"order": 7
},
"vmId": {
"type": "string",
"title": "Virtual Machine ID",
"description": "The vm unique id",
"order": 8
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
},
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
},
"diagnosticsProfile": {
"type": "object",
"title": "diagnosticsProfile",
"properties": {
"bootDiagnostics": {
"$ref": "#/definitions/bootDiagnostics",
"title": "Boot Diagnostics",
"description": "Boot diagnostics is a debugging feature which allows you to view console Output and screenshot to diagnose vm status",
"order": 1
}
},
"definitions": {
"bootDiagnostics": {
"type": "object",
"title": "bootDiagnostics",
"properties": {
"enabled": {
"type": "boolean",
"title": "Enabled",
"description": "Specifies if the boot diagnostics is enabled",
"order": 1
},
"storageUri": {
"type": "string",
"title": "Storage Uri",
"description": "Uri of the storage account to use for placing the console output and screenshot",
"order": 2
}
}
}
}
},
"hardwareProfile": {
"type": "object",
"title": "hardwareProfile",
"properties": {
"vmSize": {
"type": "string",
"title": "VM Size",
"description": "Specifies the size of the virtual machine",
"order": 1
}
}
},
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"networkProfile": {
"type": "object",
"title": "networkProfile",
"properties": {
"networkInterfaces": {
"type": "array",
"title": "Network Interfaces",
"description": "Specifies the list of resource ids for the network interfaces associated with the virtual machine",
"items": {
"$ref": "#/definitions/availabilitySet"
},
"order": 1
}
},
"definitions": {
"availabilitySet": {
"type": "object",
"title": "availabilitySet",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource ID",
"order": 1
}
}
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"osProfile": {
"type": "object",
"title": "osProfile",
"properties": {
"adminPassword": {
"type": "string",
"title": "Admin Password",
"description": "Specifies the password of the administrator account",
"order": 1
},
"adminUsername": {
"type": "string",
"title": "Admin UserName",
"description": "Specifies the name of the administrator account",
"order": 2
},
"computerName": {
"type": "string",
"title": "Computer Name",
"description": "Specifies the host os name of the virtual machine",
"order": 3
},
"customData": {
"type": "string",
"title": "Custom Data",
"description": "Specifies a base-64 encoded string of custom data",
"order": 4
},
"linuxConfiguration": {
"$ref": "#/definitions/linuxConfiguration",
"title": "Linux Configuration",
"description": "Specifies the linux operating system settings on the virtual machine",
"order": 7
},
"secrets": {
"type": "array",
"title": "Secrets",
"description": "Specifies set of certificates that should be installed onto the virtual machine",
"items": {
"type": "object"
},
"order": 5
},
"windowsConfiguration": {
"$ref": "#/definitions/windowsConfiguration",
"title": "Windows Configuration",
"description": "Specifies windows operating system settings on the virtual machine",
"order": 6
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"linuxConfiguration": {
"type": "object",
"title": "linuxConfiguration",
"properties": {
"disablePasswordAuthentication": {
"type": "boolean",
"title": "Disable Password Authentication",
"description": "Specifies whether password authentication should be disabled",
"order": 1
},
"ssh": {
"$ref": "#/definitions/ssh",
"title": "SSH",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"order": 2
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
},
"ssh": {
"type": "object",
"title": "ssh",
"properties": {
"publicKeys": {
"type": "array",
"title": "Public Keys",
"description": "Specifies a collection of keys to be placed on the virtual machine",
"items": {
"$ref": "#/definitions/publicKeys"
},
"order": 1
}
},
"definitions": {
"publicKeys": {
"type": "object",
"title": "publicKeys",
"properties": {
"keyData": {
"type": "string",
"title": "Key Data",
"description": "SSH public key certificate used to authenticate with the vm through ssh",
"order": 1
},
"path": {
"type": "string",
"title": "Path",
"description": "Specifies the full path on the created VM where ssh public key is stored",
"order": 2
}
}
}
}
},
"storageProfile": {
"type": "object",
"title": "storageProfile",
"properties": {
"dataDisks": {
"type": "array",
"title": "Data Disks",
"description": "Specifies the parameters that are used to add a data disk to a virtual machine",
"items": {
"type": "object"
},
"order": 1
},
"imageReference": {
"$ref": "#/definitions/imageReference",
"title": "Image Reference",
"description": "Specifies information about the image to use",
"order": 2
},
"osDisk": {
"$ref": "#/definitions/osDisk",
"title": "OS Disk",
"description": "Specifies information about the operating system disk used by the virtual machine",
"order": 3
}
},
"definitions": {
"imageReference": {
"type": "object",
"title": "imageReference",
"properties": {
"id": {
"type": "string",
"title": "Image Reference",
"description": "Specifies the resource identifier of a virtual machine image in your subscription",
"order": 1
},
"offer": {
"type": "string",
"title": "Offer",
"description": "Specifies the offer of the platform image or marketplace image used to create the virtual machine",
"order": 2
},
"publisher": {
"type": "string",
"title": "Publisher",
"description": "Specifies the publisher of the platform image or marketplace image used to create the virtual machine",
"order": 3
},
"sku": {
"type": "string",
"title": "SKU",
"description": "Specifies the sku of the platform image or marketplace image used to create the virtual machine",
"order": 4
},
"version": {
"type": "string",
"title": "Version",
"description": "Specifies the version of the platform image or marketplace image used to create the virtual machine",
"order": 5
}
}
},
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"osDisk": {
"type": "object",
"title": "osDisk",
"properties": {
"caching": {
"type": "string",
"title": "Caching",
"description": "Specifies the caching requirements",
"order": 1
},
"createOption": {
"type": "string",
"title": "Create Option",
"description": "Specifies how the virtual machine should be created",
"order": 2
},
"managedDisk": {
"$ref": "#/definitions/managedDisk",
"title": "Managed Disk",
"description": "Specified the identifier and optional storage account type for the disk",
"order": 3
},
"name": {
"type": "string",
"title": "Name",
"description": "Specifies the disk name",
"order": 4
},
"osType": {
"type": "string",
"title": "OS Type",
"description": "This property allows you to specify the type of the os that is included in the disk if creating a vm from user-image or a specialized vhd",
"order": 5
},
"vhd": {
"$ref": "#/definitions/vhd",
"title": "VHD",
"description": "Specifies the uri of the location in storage where the vhd for the virtual machine should be placed",
"order": 6
}
},
"definitions": {
"managedDisk": {
"type": "object",
"title": "managedDisk",
"properties": {
"Id": {
"type": "string",
"title": "ID",
"description": "Specifies the resource identifier of the managed disk",
"order": 1
},
"storageAccountType": {
"type": "string",
"title": "Storage Account Type",
"description": "Specifies the storage account type for the managed disk",
"order": 2
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
}
}
},
"tags": {
"type": "object",
"title": "tags",
"properties": {
"tags": {
"type": "object",
"title": "Tags",
"description": "Tags",
"order": 1
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
},
"vhd": {
"type": "object",
"title": "vhd",
"properties": {
"uri": {
"type": "string",
"title": "VHD",
"description": "Specifies the vhd uri",
"order": 1
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
},
"windowsConfiguration": {
"type": "object",
"title": "windowsConfiguration",
"properties": {
"additionalUnattendContent": {
"$ref": "#/definitions/additionalUnattendContent",
"title": "Additional Unattend Content",
"description": "Specifies additional xml formatted information that can be included in the unattend.xml file, which is used by windows setup",
"order": 1
},
"enableAutomaticUpdates": {
"type": "boolean",
"title": "Enable Automatic Updates",
"description": "Indicates whether virtual machine is enabled for automatic updates",
"order": 2
},
"provisionVMAgent": {
"type": "boolean",
"title": "Provision VM Agent",
"description": "Indicates whether virtual machine agent should be provisioned on the virtual machine",
"order": 3
},
"winRM": {
"$ref": "#/definitions/winRM",
"title": "Win RM",
"description": "Specifies the windows remote management listeners, this enables remote windows powershell",
"order": 4
},
"winrRMListener": {
"$ref": "#/definitions/listeners",
"title": "WinrRM Listener",
"description": "Contains configuration settings for the windows remote management service on the virtual machine",
"order": 5
}
},
"definitions": {
"additionalUnattendContent": {
"type": "object",
"title": "additionalUnattendContent",
"properties": {
"component": {
"type": "string",
"title": "Component",
"description": "Specifies the name of the component to configure with the added content",
"order": 1
},
"content": {
"type": "string",
"title": "Content",
"description": "Specifies the xml formatted content that is added to the unattend.xml file for the specified path and component",
"order": 2
},
"pass": {
"type": "string",
"title": "Pass",
"description": "Specifies the name of the pass that the content applies to, the only allowable value is oobeSystem",
"order": 3
},
"settingName": {
"type": "string",
"title": "Setting Name",
"description": "Specifies the name of the setting to which the content applies, possible values are: firstlogoncommands and autologon",
"order": 4
}
}
},
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
},
"winRM": {
"type": "object",
"title": "winRM",
"properties": {
"listeners": {
"type": "array",
"title": "Listeners",
"items": {
"$ref": "#/definitions/listeners"
},
"order": 1
}
},
"definitions": {
"listeners": {
"type": "object",
"title": "listeners",
"properties": {
"certificateUrl": {
"type": "string",
"title": "Certificate Url",
"description": "Specifies url of the certificate with which new virtual machines is provisioned",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Specifies the protocol of listener",
"order": 2
}
}
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 37.224641 | 177 | 0.39752 | [
"MIT"
] | TonyHamil/insightconnect-plugins | azure_compute/komand_azure_compute/actions/list_vm/schema.py | 176,147 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'potato.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.782609 | 73 | 0.678248 | [
"MIT"
] | aidswidjaja/PotatoBoard | manage.py | 662 | Python |
# coding: utf-8
import logging
import os
import shutil
import sys
import tempfile
import unittest
import pytest
import fiona
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class UnicodePathTest(unittest.TestCase):
def setUp(self):
tempdir = tempfile.mkdtemp()
self.dir = os.path.join(tempdir, 'français')
shutil.copytree('tests/data/', self.dir)
def tearDown(self):
shutil.rmtree(os.path.dirname(self.dir))
def test_unicode_path(self):
path = self.dir + '/coutwildrnp.shp'
if sys.version_info < (3,):
path = path.decode('utf-8')
with fiona.open(path) as c:
assert len(c) == 67
def test_unicode_path_layer(self):
path = self.dir
layer = 'coutwildrnp'
if sys.version_info < (3,):
path = path.decode('utf-8')
layer = layer.decode('utf-8')
with fiona.open(path, layer=layer) as c:
assert len(c) == 67
def test_utf8_path(self):
path = self.dir + '/coutwildrnp.shp'
if sys.version_info < (3,):
with fiona.open(path) as c:
assert len(c) == 67
class UnicodeStringFieldTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
@pytest.mark.xfail(reason="OGR silently fails to convert strings")
def test_write_mismatch(self):
"""TOFIX: OGR silently fails to convert strings"""
# Details:
#
# If we tell OGR that we want a latin-1 encoded output file and
# give it a feature with a unicode property that can't be converted
# to latin-1, no error is raised and OGR just writes the utf-8
# encoded bytes to the output file.
#
# This might be shapefile specific.
#
# Consequences: no error on write, but there will be an error
# on reading the data and expecting latin-1.
schema = {
'geometry': 'Point',
'properties': {'label': 'str', 'num': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write-fail.shp"),
'w', driver="ESRI Shapefile", schema=schema,
encoding='latin1') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {
'label': u'徐汇区',
'num': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='latin1') as c:
f = next(iter(c))
# Next assert fails.
self.assertEqual(f['properties']['label'], u'徐汇区')
def test_write_utf8(self):
schema = {
'geometry': 'Point',
'properties': {'label': 'str', u'verit\xe9': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write.shp"),
"w", "ESRI Shapefile", schema=schema,
encoding='utf-8') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {
'label': u'Ba\u2019kelalan', u'verit\xe9': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='utf-8') as c:
f = next(iter(c))
self.assertEqual(f['properties']['label'], u'Ba\u2019kelalan')
self.assertEqual(f['properties'][u'verit\xe9'], 0)
def test_write_gb18030(self):
"""Can write a simplified Chinese shapefile"""
schema = {
'geometry': 'Point',
'properties': {'label': 'str', 'num': 'int'}}
with fiona.open(os.path.join(self.tempdir, "test-write-gb18030.shp"),
'w', driver="ESRI Shapefile", schema=schema,
encoding='gb18030') as c:
c.writerecords([{
'type': 'Feature',
'geometry': {'type': 'Point', 'coordinates': [0, 0]},
'properties': {'label': u'徐汇区', 'num': 0}}])
with fiona.open(os.path.join(self.tempdir), encoding='gb18030') as c:
f = next(iter(c))
self.assertEqual(f['properties']['label'], u'徐汇区')
self.assertEqual(f['properties']['num'], 0)
| 34.936 | 77 | 0.53973 | [
"BSD-3-Clause"
] | Juanlu001/Fiona | tests/test_unicode.py | 4,392 | Python |
from setuptools import setup, find_packages
__author__ = 'Giulio Rossetti'
__license__ = "BSD 2 Clause"
__email__ = "[email protected]"
# Get the long description from the README file
# with open(path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
setup(name='demon',
version='2.0.4',
license='BSD-2-Clause',
description='Community Discovery algorithm',
url='https://github.com/GiulioRossetti/DEMON',
author='Giulio Rossetti',
author_email='[email protected]',
use_2to3=True,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
keywords=['complex-networks', 'community discovery'],
install_requires=['networkx', 'future', ''],
packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test", "demon.test", "demon.test.*"]),
)
| 36.977778 | 109 | 0.615385 | [
"BSD-2-Clause"
] | KDDComplexNetworkAnalysis/DEMON | setup.py | 1,664 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import unittest
from ml.rl.models.example_sequence_model import ExampleSequenceModel
from ml.rl.test.models.test_utils import check_save_load
logger = logging.getLogger(__name__)
class TestExampleSequenceModel(unittest.TestCase):
def test_basic(self):
state_dim = 8
model = ExampleSequenceModel(state_dim)
input = model.input_prototype()
output = model(input)
self.assertEqual((1, 1), output.value.shape)
def test_save_load(self):
state_dim = 8
model = ExampleSequenceModel(state_dim)
# ONNX sure exports a lot of parameters...
expected_num_params, expected_num_inputs, expected_num_outputs = 133, 3, 1
check_save_load(
self, model, expected_num_params, expected_num_inputs, expected_num_outputs
)
| 30.766667 | 87 | 0.713976 | [
"BSD-3-Clause"
] | MisterTea/BlueWhale | ml/rl/test/models/test_sequence_model.py | 923 | Python |
from rest_framework import serializers
from apps.currency.models import Currency
class CurrencyWalletSerializer(serializers.ModelSerializer):
actual_nonce = serializers.SerializerMethodField("get_nonce")
def get_nonce(self, wallet):
return wallet.nonce
class Meta:
from apps.wallet.models import Wallet
model = Wallet
fields = ["wallet_id", "public_key", "actual_nonce", "category", "state"]
class CurrencySerializer(serializers.ModelSerializer):
owner_wallet = CurrencyWalletSerializer(source="cashout_wallet")
owner_wallet_new = CurrencyWalletSerializer(source="owner_wallet")
cashout_wallet = CurrencyWalletSerializer()
class Meta:
model = Currency
fields = [
"uuid",
"name",
"symbol",
"token_id",
"decimals",
"campaign_end",
"claim_deadline",
"allow_minting",
"owner_wallet_new",
"owner_wallet",
"cashout_wallet",
"starting_capital",
"is_public",
"needs_sms_verification",
]
| 27.142857 | 81 | 0.620175 | [
"MIT"
] | ecoo-app/ecoo-backend | apps/currency/serializers.py | 1,140 | Python |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Copyright (c) 2019 IBM Corp
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Haozhi Qi
# --------------------------------------------------------
import cPickle
import mxnet as mx
from utils.symbol import Symbol
from operator_py.pyramid_proposal import *
from operator_py.proposal_target import *
from operator_py.fpn_roi_pooling import *
from operator_py.box_annotator_ohem import *
class resnet_v1_101_fpn_dcn_rcnn(Symbol):
def __init__(self):
"""
Use __init__ to define parameter network needs
"""
self.shared_param_list = ['offset_p2', 'offset_p3', 'offset_p4', 'offset_p5',
'rpn_conv', 'rpn_cls_score', 'rpn_bbox_pred']
self.shared_param_dict = {}
for name in self.shared_param_list:
self.shared_param_dict[name + '_weight'] = mx.sym.Variable(name + '_weight')
self.shared_param_dict[name + '_bias'] = mx.sym.Variable(name + '_bias')
def get_resnet_backbone(self, data, with_dilated=False, with_dconv=False, with_dpyramid=False, eps=1e-5):
conv1 = mx.symbol.Convolution(name='conv1', data=data, num_filter=64, pad=(3, 3), kernel=(7, 7), stride=(2, 2), no_bias=True)
bn_conv1 = mx.symbol.BatchNorm(name='bn_conv1', data=conv1, use_global_stats=True, fix_gamma=False, eps=eps)
scale_conv1 = bn_conv1
conv1_relu = mx.symbol.Activation(name='conv1_relu', data=scale_conv1, act_type='relu')
pool1 = mx.symbol.Pooling(name='pool1', data=conv1_relu, pooling_convention='full', pad=(0, 0), kernel=(3, 3), stride=(2, 2), pool_type='max')
res2a_branch1 = mx.symbol.Convolution(name='res2a_branch1', data=pool1, num_filter=256, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch1 = mx.symbol.BatchNorm(name='bn2a_branch1', data=res2a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch1 = bn2a_branch1
res2a_branch2a = mx.symbol.Convolution(name='res2a_branch2a', data=pool1, num_filter=64, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2a = mx.symbol.BatchNorm(name='bn2a_branch2a', data=res2a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2a = bn2a_branch2a
res2a_branch2a_relu = mx.symbol.Activation(name='res2a_branch2a_relu', data=scale2a_branch2a, act_type='relu')
res2a_branch2b = mx.symbol.Convolution(name='res2a_branch2b', data=res2a_branch2a_relu, num_filter=64, pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2a_branch2b = mx.symbol.BatchNorm(name='bn2a_branch2b', data=res2a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale2a_branch2b = bn2a_branch2b
res2a_branch2b_relu = mx.symbol.Activation(name='res2a_branch2b_relu', data=scale2a_branch2b, act_type='relu')
res2a_branch2c = mx.symbol.Convolution(name='res2a_branch2c', data=res2a_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2a_branch2c = mx.symbol.BatchNorm(name='bn2a_branch2c', data=res2a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2a_branch2c = bn2a_branch2c
res2a = mx.symbol.broadcast_add(name='res2a', *[scale2a_branch1, scale2a_branch2c])
res2a_relu = mx.symbol.Activation(name='res2a_relu', data=res2a, act_type='relu')
res2b_branch2a = mx.symbol.Convolution(name='res2b_branch2a', data=res2a_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2a = mx.symbol.BatchNorm(name='bn2b_branch2a', data=res2b_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2a = bn2b_branch2a
res2b_branch2a_relu = mx.symbol.Activation(name='res2b_branch2a_relu', data=scale2b_branch2a, act_type='relu')
res2b_branch2b = mx.symbol.Convolution(name='res2b_branch2b', data=res2b_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2b_branch2b = mx.symbol.BatchNorm(name='bn2b_branch2b', data=res2b_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2b = bn2b_branch2b
res2b_branch2b_relu = mx.symbol.Activation(name='res2b_branch2b_relu', data=scale2b_branch2b, act_type='relu')
res2b_branch2c = mx.symbol.Convolution(name='res2b_branch2c', data=res2b_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2b_branch2c = mx.symbol.BatchNorm(name='bn2b_branch2c', data=res2b_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2b_branch2c = bn2b_branch2c
res2b = mx.symbol.broadcast_add(name='res2b', *[res2a_relu, scale2b_branch2c])
res2b_relu = mx.symbol.Activation(name='res2b_relu', data=res2b, act_type='relu')
res2c_branch2a = mx.symbol.Convolution(name='res2c_branch2a', data=res2b_relu, num_filter=64, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2a = mx.symbol.BatchNorm(name='bn2c_branch2a', data=res2c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2a = bn2c_branch2a
res2c_branch2a_relu = mx.symbol.Activation(name='res2c_branch2a_relu', data=scale2c_branch2a, act_type='relu')
res2c_branch2b = mx.symbol.Convolution(name='res2c_branch2b', data=res2c_branch2a_relu, num_filter=64,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn2c_branch2b = mx.symbol.BatchNorm(name='bn2c_branch2b', data=res2c_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2b = bn2c_branch2b
res2c_branch2b_relu = mx.symbol.Activation(name='res2c_branch2b_relu', data=scale2c_branch2b, act_type='relu')
res2c_branch2c = mx.symbol.Convolution(name='res2c_branch2c', data=res2c_branch2b_relu, num_filter=256,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn2c_branch2c = mx.symbol.BatchNorm(name='bn2c_branch2c', data=res2c_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale2c_branch2c = bn2c_branch2c
res2c = mx.symbol.broadcast_add(name='res2c', *[res2b_relu, scale2c_branch2c])
res2c_relu = mx.symbol.Activation(name='res2c_relu', data=res2c, act_type='relu')
res3a_branch1 = mx.symbol.Convolution(name='res3a_branch1', data=res2c_relu, num_filter=512, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch1 = mx.symbol.BatchNorm(name='bn3a_branch1', data=res3a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch1 = bn3a_branch1
res3a_branch2a = mx.symbol.Convolution(name='res3a_branch2a', data=res2c_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn3a_branch2a = mx.symbol.BatchNorm(name='bn3a_branch2a', data=res3a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2a = bn3a_branch2a
res3a_branch2a_relu = mx.symbol.Activation(name='res3a_branch2a_relu', data=scale3a_branch2a, act_type='relu')
res3a_branch2b = mx.symbol.Convolution(name='res3a_branch2b', data=res3a_branch2a_relu, num_filter=128,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3a_branch2b = mx.symbol.BatchNorm(name='bn3a_branch2b', data=res3a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2b = bn3a_branch2b
res3a_branch2b_relu = mx.symbol.Activation(name='res3a_branch2b_relu', data=scale3a_branch2b, act_type='relu')
res3a_branch2c = mx.symbol.Convolution(name='res3a_branch2c', data=res3a_branch2b_relu, num_filter=512,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3a_branch2c = mx.symbol.BatchNorm(name='bn3a_branch2c', data=res3a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3a_branch2c = bn3a_branch2c
res3a = mx.symbol.broadcast_add(name='res3a', *[scale3a_branch1, scale3a_branch2c])
res3a_relu = mx.symbol.Activation(name='res3a_relu', data=res3a, act_type='relu')
res3b1_branch2a = mx.symbol.Convolution(name='res3b1_branch2a', data=res3a_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2a = mx.symbol.BatchNorm(name='bn3b1_branch2a', data=res3b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2a = bn3b1_branch2a
res3b1_branch2a_relu = mx.symbol.Activation(name='res3b1_branch2a_relu', data=scale3b1_branch2a,
act_type='relu')
res3b1_branch2b = mx.symbol.Convolution(name='res3b1_branch2b', data=res3b1_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b1_branch2b = mx.symbol.BatchNorm(name='bn3b1_branch2b', data=res3b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2b = bn3b1_branch2b
res3b1_branch2b_relu = mx.symbol.Activation(name='res3b1_branch2b_relu', data=scale3b1_branch2b,
act_type='relu')
res3b1_branch2c = mx.symbol.Convolution(name='res3b1_branch2c', data=res3b1_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b1_branch2c = mx.symbol.BatchNorm(name='bn3b1_branch2c', data=res3b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b1_branch2c = bn3b1_branch2c
res3b1 = mx.symbol.broadcast_add(name='res3b1', *[res3a_relu, scale3b1_branch2c])
res3b1_relu = mx.symbol.Activation(name='res3b1_relu', data=res3b1, act_type='relu')
res3b2_branch2a = mx.symbol.Convolution(name='res3b2_branch2a', data=res3b1_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2a = mx.symbol.BatchNorm(name='bn3b2_branch2a', data=res3b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2a = bn3b2_branch2a
res3b2_branch2a_relu = mx.symbol.Activation(name='res3b2_branch2a_relu', data=scale3b2_branch2a,
act_type='relu')
res3b2_branch2b = mx.symbol.Convolution(name='res3b2_branch2b', data=res3b2_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b2_branch2b = mx.symbol.BatchNorm(name='bn3b2_branch2b', data=res3b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2b = bn3b2_branch2b
res3b2_branch2b_relu = mx.symbol.Activation(name='res3b2_branch2b_relu', data=scale3b2_branch2b,
act_type='relu')
res3b2_branch2c = mx.symbol.Convolution(name='res3b2_branch2c', data=res3b2_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b2_branch2c = mx.symbol.BatchNorm(name='bn3b2_branch2c', data=res3b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b2_branch2c = bn3b2_branch2c
res3b2 = mx.symbol.broadcast_add(name='res3b2', *[res3b1_relu, scale3b2_branch2c])
res3b2_relu = mx.symbol.Activation(name='res3b2_relu', data=res3b2, act_type='relu')
res3b3_branch2a = mx.symbol.Convolution(name='res3b3_branch2a', data=res3b2_relu, num_filter=128, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2a = mx.symbol.BatchNorm(name='bn3b3_branch2a', data=res3b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2a = bn3b3_branch2a
res3b3_branch2a_relu = mx.symbol.Activation(name='res3b3_branch2a_relu', data=scale3b3_branch2a,
act_type='relu')
if with_dpyramid:
res3b3_branch2b_offset = mx.symbol.Convolution(name='res3b3_branch2b_offset', data=res3b3_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res3b3_branch2b = mx.contrib.symbol.DeformableConvolution(name='res3b3_branch2b', data=res3b3_branch2a_relu,
offset=res3b3_branch2b_offset,
num_filter=128, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res3b3_branch2b = mx.symbol.Convolution(name='res3b3_branch2b', data=res3b3_branch2a_relu, num_filter=128,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn3b3_branch2b = mx.symbol.BatchNorm(name='bn3b3_branch2b', data=res3b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2b = bn3b3_branch2b
res3b3_branch2b_relu = mx.symbol.Activation(name='res3b3_branch2b_relu', data=scale3b3_branch2b,
act_type='relu')
res3b3_branch2c = mx.symbol.Convolution(name='res3b3_branch2c', data=res3b3_branch2b_relu, num_filter=512,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn3b3_branch2c = mx.symbol.BatchNorm(name='bn3b3_branch2c', data=res3b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale3b3_branch2c = bn3b3_branch2c
res3b3 = mx.symbol.broadcast_add(name='res3b3', *[res3b2_relu, scale3b3_branch2c])
res3b3_relu = mx.symbol.Activation(name='res3b3_relu', data=res3b3, act_type='relu')
res4a_branch1 = mx.symbol.Convolution(name='res4a_branch1', data=res3b3_relu, num_filter=1024, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch1 = mx.symbol.BatchNorm(name='bn4a_branch1', data=res4a_branch1, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch1 = bn4a_branch1
res4a_branch2a = mx.symbol.Convolution(name='res4a_branch2a', data=res3b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(2, 2), no_bias=True)
bn4a_branch2a = mx.symbol.BatchNorm(name='bn4a_branch2a', data=res4a_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2a = bn4a_branch2a
res4a_branch2a_relu = mx.symbol.Activation(name='res4a_branch2a_relu', data=scale4a_branch2a, act_type='relu')
res4a_branch2b = mx.symbol.Convolution(name='res4a_branch2b', data=res4a_branch2a_relu, num_filter=256,
pad=(1, 1),
kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4a_branch2b = mx.symbol.BatchNorm(name='bn4a_branch2b', data=res4a_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2b = bn4a_branch2b
res4a_branch2b_relu = mx.symbol.Activation(name='res4a_branch2b_relu', data=scale4a_branch2b, act_type='relu')
res4a_branch2c = mx.symbol.Convolution(name='res4a_branch2c', data=res4a_branch2b_relu, num_filter=1024,
pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4a_branch2c = mx.symbol.BatchNorm(name='bn4a_branch2c', data=res4a_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4a_branch2c = bn4a_branch2c
res4a = mx.symbol.broadcast_add(name='res4a', *[scale4a_branch1, scale4a_branch2c])
res4a_relu = mx.symbol.Activation(name='res4a_relu', data=res4a, act_type='relu')
res4b1_branch2a = mx.symbol.Convolution(name='res4b1_branch2a', data=res4a_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2a = mx.symbol.BatchNorm(name='bn4b1_branch2a', data=res4b1_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2a = bn4b1_branch2a
res4b1_branch2a_relu = mx.symbol.Activation(name='res4b1_branch2a_relu', data=scale4b1_branch2a,
act_type='relu')
res4b1_branch2b = mx.symbol.Convolution(name='res4b1_branch2b', data=res4b1_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b1_branch2b = mx.symbol.BatchNorm(name='bn4b1_branch2b', data=res4b1_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2b = bn4b1_branch2b
res4b1_branch2b_relu = mx.symbol.Activation(name='res4b1_branch2b_relu', data=scale4b1_branch2b,
act_type='relu')
res4b1_branch2c = mx.symbol.Convolution(name='res4b1_branch2c', data=res4b1_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b1_branch2c = mx.symbol.BatchNorm(name='bn4b1_branch2c', data=res4b1_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b1_branch2c = bn4b1_branch2c
res4b1 = mx.symbol.broadcast_add(name='res4b1', *[res4a_relu, scale4b1_branch2c])
res4b1_relu = mx.symbol.Activation(name='res4b1_relu', data=res4b1, act_type='relu')
res4b2_branch2a = mx.symbol.Convolution(name='res4b2_branch2a', data=res4b1_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2a = mx.symbol.BatchNorm(name='bn4b2_branch2a', data=res4b2_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2a = bn4b2_branch2a
res4b2_branch2a_relu = mx.symbol.Activation(name='res4b2_branch2a_relu', data=scale4b2_branch2a,
act_type='relu')
res4b2_branch2b = mx.symbol.Convolution(name='res4b2_branch2b', data=res4b2_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b2_branch2b = mx.symbol.BatchNorm(name='bn4b2_branch2b', data=res4b2_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2b = bn4b2_branch2b
res4b2_branch2b_relu = mx.symbol.Activation(name='res4b2_branch2b_relu', data=scale4b2_branch2b,
act_type='relu')
res4b2_branch2c = mx.symbol.Convolution(name='res4b2_branch2c', data=res4b2_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b2_branch2c = mx.symbol.BatchNorm(name='bn4b2_branch2c', data=res4b2_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b2_branch2c = bn4b2_branch2c
res4b2 = mx.symbol.broadcast_add(name='res4b2', *[res4b1_relu, scale4b2_branch2c])
res4b2_relu = mx.symbol.Activation(name='res4b2_relu', data=res4b2, act_type='relu')
res4b3_branch2a = mx.symbol.Convolution(name='res4b3_branch2a', data=res4b2_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2a = mx.symbol.BatchNorm(name='bn4b3_branch2a', data=res4b3_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2a = bn4b3_branch2a
res4b3_branch2a_relu = mx.symbol.Activation(name='res4b3_branch2a_relu', data=scale4b3_branch2a,
act_type='relu')
res4b3_branch2b = mx.symbol.Convolution(name='res4b3_branch2b', data=res4b3_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b3_branch2b = mx.symbol.BatchNorm(name='bn4b3_branch2b', data=res4b3_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2b = bn4b3_branch2b
res4b3_branch2b_relu = mx.symbol.Activation(name='res4b3_branch2b_relu', data=scale4b3_branch2b,
act_type='relu')
res4b3_branch2c = mx.symbol.Convolution(name='res4b3_branch2c', data=res4b3_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b3_branch2c = mx.symbol.BatchNorm(name='bn4b3_branch2c', data=res4b3_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b3_branch2c = bn4b3_branch2c
res4b3 = mx.symbol.broadcast_add(name='res4b3', *[res4b2_relu, scale4b3_branch2c])
res4b3_relu = mx.symbol.Activation(name='res4b3_relu', data=res4b3, act_type='relu')
res4b4_branch2a = mx.symbol.Convolution(name='res4b4_branch2a', data=res4b3_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2a = mx.symbol.BatchNorm(name='bn4b4_branch2a', data=res4b4_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2a = bn4b4_branch2a
res4b4_branch2a_relu = mx.symbol.Activation(name='res4b4_branch2a_relu', data=scale4b4_branch2a,
act_type='relu')
res4b4_branch2b = mx.symbol.Convolution(name='res4b4_branch2b', data=res4b4_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b4_branch2b = mx.symbol.BatchNorm(name='bn4b4_branch2b', data=res4b4_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2b = bn4b4_branch2b
res4b4_branch2b_relu = mx.symbol.Activation(name='res4b4_branch2b_relu', data=scale4b4_branch2b,
act_type='relu')
res4b4_branch2c = mx.symbol.Convolution(name='res4b4_branch2c', data=res4b4_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b4_branch2c = mx.symbol.BatchNorm(name='bn4b4_branch2c', data=res4b4_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b4_branch2c = bn4b4_branch2c
res4b4 = mx.symbol.broadcast_add(name='res4b4', *[res4b3_relu, scale4b4_branch2c])
res4b4_relu = mx.symbol.Activation(name='res4b4_relu', data=res4b4, act_type='relu')
res4b5_branch2a = mx.symbol.Convolution(name='res4b5_branch2a', data=res4b4_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2a = mx.symbol.BatchNorm(name='bn4b5_branch2a', data=res4b5_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2a = bn4b5_branch2a
res4b5_branch2a_relu = mx.symbol.Activation(name='res4b5_branch2a_relu', data=scale4b5_branch2a,
act_type='relu')
res4b5_branch2b = mx.symbol.Convolution(name='res4b5_branch2b', data=res4b5_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b5_branch2b = mx.symbol.BatchNorm(name='bn4b5_branch2b', data=res4b5_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2b = bn4b5_branch2b
res4b5_branch2b_relu = mx.symbol.Activation(name='res4b5_branch2b_relu', data=scale4b5_branch2b,
act_type='relu')
res4b5_branch2c = mx.symbol.Convolution(name='res4b5_branch2c', data=res4b5_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b5_branch2c = mx.symbol.BatchNorm(name='bn4b5_branch2c', data=res4b5_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b5_branch2c = bn4b5_branch2c
res4b5 = mx.symbol.broadcast_add(name='res4b5', *[res4b4_relu, scale4b5_branch2c])
res4b5_relu = mx.symbol.Activation(name='res4b5_relu', data=res4b5, act_type='relu')
res4b6_branch2a = mx.symbol.Convolution(name='res4b6_branch2a', data=res4b5_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2a = mx.symbol.BatchNorm(name='bn4b6_branch2a', data=res4b6_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2a = bn4b6_branch2a
res4b6_branch2a_relu = mx.symbol.Activation(name='res4b6_branch2a_relu', data=scale4b6_branch2a,
act_type='relu')
res4b6_branch2b = mx.symbol.Convolution(name='res4b6_branch2b', data=res4b6_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b6_branch2b = mx.symbol.BatchNorm(name='bn4b6_branch2b', data=res4b6_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2b = bn4b6_branch2b
res4b6_branch2b_relu = mx.symbol.Activation(name='res4b6_branch2b_relu', data=scale4b6_branch2b,
act_type='relu')
res4b6_branch2c = mx.symbol.Convolution(name='res4b6_branch2c', data=res4b6_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b6_branch2c = mx.symbol.BatchNorm(name='bn4b6_branch2c', data=res4b6_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b6_branch2c = bn4b6_branch2c
res4b6 = mx.symbol.broadcast_add(name='res4b6', *[res4b5_relu, scale4b6_branch2c])
res4b6_relu = mx.symbol.Activation(name='res4b6_relu', data=res4b6, act_type='relu')
res4b7_branch2a = mx.symbol.Convolution(name='res4b7_branch2a', data=res4b6_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2a = mx.symbol.BatchNorm(name='bn4b7_branch2a', data=res4b7_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2a = bn4b7_branch2a
res4b7_branch2a_relu = mx.symbol.Activation(name='res4b7_branch2a_relu', data=scale4b7_branch2a,
act_type='relu')
res4b7_branch2b = mx.symbol.Convolution(name='res4b7_branch2b', data=res4b7_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b7_branch2b = mx.symbol.BatchNorm(name='bn4b7_branch2b', data=res4b7_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2b = bn4b7_branch2b
res4b7_branch2b_relu = mx.symbol.Activation(name='res4b7_branch2b_relu', data=scale4b7_branch2b,
act_type='relu')
res4b7_branch2c = mx.symbol.Convolution(name='res4b7_branch2c', data=res4b7_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b7_branch2c = mx.symbol.BatchNorm(name='bn4b7_branch2c', data=res4b7_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b7_branch2c = bn4b7_branch2c
res4b7 = mx.symbol.broadcast_add(name='res4b7', *[res4b6_relu, scale4b7_branch2c])
res4b7_relu = mx.symbol.Activation(name='res4b7_relu', data=res4b7, act_type='relu')
res4b8_branch2a = mx.symbol.Convolution(name='res4b8_branch2a', data=res4b7_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2a = mx.symbol.BatchNorm(name='bn4b8_branch2a', data=res4b8_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2a = bn4b8_branch2a
res4b8_branch2a_relu = mx.symbol.Activation(name='res4b8_branch2a_relu', data=scale4b8_branch2a,
act_type='relu')
res4b8_branch2b = mx.symbol.Convolution(name='res4b8_branch2b', data=res4b8_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b8_branch2b = mx.symbol.BatchNorm(name='bn4b8_branch2b', data=res4b8_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2b = bn4b8_branch2b
res4b8_branch2b_relu = mx.symbol.Activation(name='res4b8_branch2b_relu', data=scale4b8_branch2b,
act_type='relu')
res4b8_branch2c = mx.symbol.Convolution(name='res4b8_branch2c', data=res4b8_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b8_branch2c = mx.symbol.BatchNorm(name='bn4b8_branch2c', data=res4b8_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b8_branch2c = bn4b8_branch2c
res4b8 = mx.symbol.broadcast_add(name='res4b8', *[res4b7_relu, scale4b8_branch2c])
res4b8_relu = mx.symbol.Activation(name='res4b8_relu', data=res4b8, act_type='relu')
res4b9_branch2a = mx.symbol.Convolution(name='res4b9_branch2a', data=res4b8_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2a = mx.symbol.BatchNorm(name='bn4b9_branch2a', data=res4b9_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2a = bn4b9_branch2a
res4b9_branch2a_relu = mx.symbol.Activation(name='res4b9_branch2a_relu', data=scale4b9_branch2a,
act_type='relu')
res4b9_branch2b = mx.symbol.Convolution(name='res4b9_branch2b', data=res4b9_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b9_branch2b = mx.symbol.BatchNorm(name='bn4b9_branch2b', data=res4b9_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2b = bn4b9_branch2b
res4b9_branch2b_relu = mx.symbol.Activation(name='res4b9_branch2b_relu', data=scale4b9_branch2b,
act_type='relu')
res4b9_branch2c = mx.symbol.Convolution(name='res4b9_branch2c', data=res4b9_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b9_branch2c = mx.symbol.BatchNorm(name='bn4b9_branch2c', data=res4b9_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b9_branch2c = bn4b9_branch2c
res4b9 = mx.symbol.broadcast_add(name='res4b9', *[res4b8_relu, scale4b9_branch2c])
res4b9_relu = mx.symbol.Activation(name='res4b9_relu', data=res4b9, act_type='relu')
res4b10_branch2a = mx.symbol.Convolution(name='res4b10_branch2a', data=res4b9_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2a = mx.symbol.BatchNorm(name='bn4b10_branch2a', data=res4b10_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2a = bn4b10_branch2a
res4b10_branch2a_relu = mx.symbol.Activation(name='res4b10_branch2a_relu', data=scale4b10_branch2a,
act_type='relu')
res4b10_branch2b = mx.symbol.Convolution(name='res4b10_branch2b', data=res4b10_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b10_branch2b = mx.symbol.BatchNorm(name='bn4b10_branch2b', data=res4b10_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2b = bn4b10_branch2b
res4b10_branch2b_relu = mx.symbol.Activation(name='res4b10_branch2b_relu', data=scale4b10_branch2b,
act_type='relu')
res4b10_branch2c = mx.symbol.Convolution(name='res4b10_branch2c', data=res4b10_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b10_branch2c = mx.symbol.BatchNorm(name='bn4b10_branch2c', data=res4b10_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b10_branch2c = bn4b10_branch2c
res4b10 = mx.symbol.broadcast_add(name='res4b10', *[res4b9_relu, scale4b10_branch2c])
res4b10_relu = mx.symbol.Activation(name='res4b10_relu', data=res4b10, act_type='relu')
res4b11_branch2a = mx.symbol.Convolution(name='res4b11_branch2a', data=res4b10_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2a = mx.symbol.BatchNorm(name='bn4b11_branch2a', data=res4b11_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2a = bn4b11_branch2a
res4b11_branch2a_relu = mx.symbol.Activation(name='res4b11_branch2a_relu', data=scale4b11_branch2a,
act_type='relu')
res4b11_branch2b = mx.symbol.Convolution(name='res4b11_branch2b', data=res4b11_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b11_branch2b = mx.symbol.BatchNorm(name='bn4b11_branch2b', data=res4b11_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2b = bn4b11_branch2b
res4b11_branch2b_relu = mx.symbol.Activation(name='res4b11_branch2b_relu', data=scale4b11_branch2b,
act_type='relu')
res4b11_branch2c = mx.symbol.Convolution(name='res4b11_branch2c', data=res4b11_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b11_branch2c = mx.symbol.BatchNorm(name='bn4b11_branch2c', data=res4b11_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b11_branch2c = bn4b11_branch2c
res4b11 = mx.symbol.broadcast_add(name='res4b11', *[res4b10_relu, scale4b11_branch2c])
res4b11_relu = mx.symbol.Activation(name='res4b11_relu', data=res4b11, act_type='relu')
res4b12_branch2a = mx.symbol.Convolution(name='res4b12_branch2a', data=res4b11_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2a = mx.symbol.BatchNorm(name='bn4b12_branch2a', data=res4b12_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2a = bn4b12_branch2a
res4b12_branch2a_relu = mx.symbol.Activation(name='res4b12_branch2a_relu', data=scale4b12_branch2a,
act_type='relu')
res4b12_branch2b = mx.symbol.Convolution(name='res4b12_branch2b', data=res4b12_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b12_branch2b = mx.symbol.BatchNorm(name='bn4b12_branch2b', data=res4b12_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2b = bn4b12_branch2b
res4b12_branch2b_relu = mx.symbol.Activation(name='res4b12_branch2b_relu', data=scale4b12_branch2b,
act_type='relu')
res4b12_branch2c = mx.symbol.Convolution(name='res4b12_branch2c', data=res4b12_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b12_branch2c = mx.symbol.BatchNorm(name='bn4b12_branch2c', data=res4b12_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b12_branch2c = bn4b12_branch2c
res4b12 = mx.symbol.broadcast_add(name='res4b12', *[res4b11_relu, scale4b12_branch2c])
res4b12_relu = mx.symbol.Activation(name='res4b12_relu', data=res4b12, act_type='relu')
res4b13_branch2a = mx.symbol.Convolution(name='res4b13_branch2a', data=res4b12_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2a = mx.symbol.BatchNorm(name='bn4b13_branch2a', data=res4b13_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2a = bn4b13_branch2a
res4b13_branch2a_relu = mx.symbol.Activation(name='res4b13_branch2a_relu', data=scale4b13_branch2a,
act_type='relu')
res4b13_branch2b = mx.symbol.Convolution(name='res4b13_branch2b', data=res4b13_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b13_branch2b = mx.symbol.BatchNorm(name='bn4b13_branch2b', data=res4b13_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2b = bn4b13_branch2b
res4b13_branch2b_relu = mx.symbol.Activation(name='res4b13_branch2b_relu', data=scale4b13_branch2b,
act_type='relu')
res4b13_branch2c = mx.symbol.Convolution(name='res4b13_branch2c', data=res4b13_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b13_branch2c = mx.symbol.BatchNorm(name='bn4b13_branch2c', data=res4b13_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b13_branch2c = bn4b13_branch2c
res4b13 = mx.symbol.broadcast_add(name='res4b13', *[res4b12_relu, scale4b13_branch2c])
res4b13_relu = mx.symbol.Activation(name='res4b13_relu', data=res4b13, act_type='relu')
res4b14_branch2a = mx.symbol.Convolution(name='res4b14_branch2a', data=res4b13_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2a = mx.symbol.BatchNorm(name='bn4b14_branch2a', data=res4b14_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2a = bn4b14_branch2a
res4b14_branch2a_relu = mx.symbol.Activation(name='res4b14_branch2a_relu', data=scale4b14_branch2a,
act_type='relu')
res4b14_branch2b = mx.symbol.Convolution(name='res4b14_branch2b', data=res4b14_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b14_branch2b = mx.symbol.BatchNorm(name='bn4b14_branch2b', data=res4b14_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2b = bn4b14_branch2b
res4b14_branch2b_relu = mx.symbol.Activation(name='res4b14_branch2b_relu', data=scale4b14_branch2b,
act_type='relu')
res4b14_branch2c = mx.symbol.Convolution(name='res4b14_branch2c', data=res4b14_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b14_branch2c = mx.symbol.BatchNorm(name='bn4b14_branch2c', data=res4b14_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b14_branch2c = bn4b14_branch2c
res4b14 = mx.symbol.broadcast_add(name='res4b14', *[res4b13_relu, scale4b14_branch2c])
res4b14_relu = mx.symbol.Activation(name='res4b14_relu', data=res4b14, act_type='relu')
res4b15_branch2a = mx.symbol.Convolution(name='res4b15_branch2a', data=res4b14_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2a = mx.symbol.BatchNorm(name='bn4b15_branch2a', data=res4b15_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2a = bn4b15_branch2a
res4b15_branch2a_relu = mx.symbol.Activation(name='res4b15_branch2a_relu', data=scale4b15_branch2a,
act_type='relu')
res4b15_branch2b = mx.symbol.Convolution(name='res4b15_branch2b', data=res4b15_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b15_branch2b = mx.symbol.BatchNorm(name='bn4b15_branch2b', data=res4b15_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2b = bn4b15_branch2b
res4b15_branch2b_relu = mx.symbol.Activation(name='res4b15_branch2b_relu', data=scale4b15_branch2b,
act_type='relu')
res4b15_branch2c = mx.symbol.Convolution(name='res4b15_branch2c', data=res4b15_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b15_branch2c = mx.symbol.BatchNorm(name='bn4b15_branch2c', data=res4b15_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b15_branch2c = bn4b15_branch2c
res4b15 = mx.symbol.broadcast_add(name='res4b15', *[res4b14_relu, scale4b15_branch2c])
res4b15_relu = mx.symbol.Activation(name='res4b15_relu', data=res4b15, act_type='relu')
res4b16_branch2a = mx.symbol.Convolution(name='res4b16_branch2a', data=res4b15_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2a = mx.symbol.BatchNorm(name='bn4b16_branch2a', data=res4b16_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2a = bn4b16_branch2a
res4b16_branch2a_relu = mx.symbol.Activation(name='res4b16_branch2a_relu', data=scale4b16_branch2a,
act_type='relu')
res4b16_branch2b = mx.symbol.Convolution(name='res4b16_branch2b', data=res4b16_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b16_branch2b = mx.symbol.BatchNorm(name='bn4b16_branch2b', data=res4b16_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2b = bn4b16_branch2b
res4b16_branch2b_relu = mx.symbol.Activation(name='res4b16_branch2b_relu', data=scale4b16_branch2b,
act_type='relu')
res4b16_branch2c = mx.symbol.Convolution(name='res4b16_branch2c', data=res4b16_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b16_branch2c = mx.symbol.BatchNorm(name='bn4b16_branch2c', data=res4b16_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b16_branch2c = bn4b16_branch2c
res4b16 = mx.symbol.broadcast_add(name='res4b16', *[res4b15_relu, scale4b16_branch2c])
res4b16_relu = mx.symbol.Activation(name='res4b16_relu', data=res4b16, act_type='relu')
res4b17_branch2a = mx.symbol.Convolution(name='res4b17_branch2a', data=res4b16_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2a = mx.symbol.BatchNorm(name='bn4b17_branch2a', data=res4b17_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2a = bn4b17_branch2a
res4b17_branch2a_relu = mx.symbol.Activation(name='res4b17_branch2a_relu', data=scale4b17_branch2a,
act_type='relu')
res4b17_branch2b = mx.symbol.Convolution(name='res4b17_branch2b', data=res4b17_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b17_branch2b = mx.symbol.BatchNorm(name='bn4b17_branch2b', data=res4b17_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2b = bn4b17_branch2b
res4b17_branch2b_relu = mx.symbol.Activation(name='res4b17_branch2b_relu', data=scale4b17_branch2b,
act_type='relu')
res4b17_branch2c = mx.symbol.Convolution(name='res4b17_branch2c', data=res4b17_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b17_branch2c = mx.symbol.BatchNorm(name='bn4b17_branch2c', data=res4b17_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b17_branch2c = bn4b17_branch2c
res4b17 = mx.symbol.broadcast_add(name='res4b17', *[res4b16_relu, scale4b17_branch2c])
res4b17_relu = mx.symbol.Activation(name='res4b17_relu', data=res4b17, act_type='relu')
res4b18_branch2a = mx.symbol.Convolution(name='res4b18_branch2a', data=res4b17_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2a = mx.symbol.BatchNorm(name='bn4b18_branch2a', data=res4b18_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2a = bn4b18_branch2a
res4b18_branch2a_relu = mx.symbol.Activation(name='res4b18_branch2a_relu', data=scale4b18_branch2a,
act_type='relu')
res4b18_branch2b = mx.symbol.Convolution(name='res4b18_branch2b', data=res4b18_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b18_branch2b = mx.symbol.BatchNorm(name='bn4b18_branch2b', data=res4b18_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2b = bn4b18_branch2b
res4b18_branch2b_relu = mx.symbol.Activation(name='res4b18_branch2b_relu', data=scale4b18_branch2b,
act_type='relu')
res4b18_branch2c = mx.symbol.Convolution(name='res4b18_branch2c', data=res4b18_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b18_branch2c = mx.symbol.BatchNorm(name='bn4b18_branch2c', data=res4b18_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b18_branch2c = bn4b18_branch2c
res4b18 = mx.symbol.broadcast_add(name='res4b18', *[res4b17_relu, scale4b18_branch2c])
res4b18_relu = mx.symbol.Activation(name='res4b18_relu', data=res4b18, act_type='relu')
res4b19_branch2a = mx.symbol.Convolution(name='res4b19_branch2a', data=res4b18_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2a = mx.symbol.BatchNorm(name='bn4b19_branch2a', data=res4b19_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2a = bn4b19_branch2a
res4b19_branch2a_relu = mx.symbol.Activation(name='res4b19_branch2a_relu', data=scale4b19_branch2a,
act_type='relu')
res4b19_branch2b = mx.symbol.Convolution(name='res4b19_branch2b', data=res4b19_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b19_branch2b = mx.symbol.BatchNorm(name='bn4b19_branch2b', data=res4b19_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2b = bn4b19_branch2b
res4b19_branch2b_relu = mx.symbol.Activation(name='res4b19_branch2b_relu', data=scale4b19_branch2b,
act_type='relu')
res4b19_branch2c = mx.symbol.Convolution(name='res4b19_branch2c', data=res4b19_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b19_branch2c = mx.symbol.BatchNorm(name='bn4b19_branch2c', data=res4b19_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b19_branch2c = bn4b19_branch2c
res4b19 = mx.symbol.broadcast_add(name='res4b19', *[res4b18_relu, scale4b19_branch2c])
res4b19_relu = mx.symbol.Activation(name='res4b19_relu', data=res4b19, act_type='relu')
res4b20_branch2a = mx.symbol.Convolution(name='res4b20_branch2a', data=res4b19_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2a = mx.symbol.BatchNorm(name='bn4b20_branch2a', data=res4b20_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2a = bn4b20_branch2a
res4b20_branch2a_relu = mx.symbol.Activation(name='res4b20_branch2a_relu', data=scale4b20_branch2a,
act_type='relu')
res4b20_branch2b = mx.symbol.Convolution(name='res4b20_branch2b', data=res4b20_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b20_branch2b = mx.symbol.BatchNorm(name='bn4b20_branch2b', data=res4b20_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2b = bn4b20_branch2b
res4b20_branch2b_relu = mx.symbol.Activation(name='res4b20_branch2b_relu', data=scale4b20_branch2b,
act_type='relu')
res4b20_branch2c = mx.symbol.Convolution(name='res4b20_branch2c', data=res4b20_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b20_branch2c = mx.symbol.BatchNorm(name='bn4b20_branch2c', data=res4b20_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b20_branch2c = bn4b20_branch2c
res4b20 = mx.symbol.broadcast_add(name='res4b20', *[res4b19_relu, scale4b20_branch2c])
res4b20_relu = mx.symbol.Activation(name='res4b20_relu', data=res4b20, act_type='relu')
res4b21_branch2a = mx.symbol.Convolution(name='res4b21_branch2a', data=res4b20_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2a = mx.symbol.BatchNorm(name='bn4b21_branch2a', data=res4b21_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2a = bn4b21_branch2a
res4b21_branch2a_relu = mx.symbol.Activation(name='res4b21_branch2a_relu', data=scale4b21_branch2a,
act_type='relu')
res4b21_branch2b = mx.symbol.Convolution(name='res4b21_branch2b', data=res4b21_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b21_branch2b = mx.symbol.BatchNorm(name='bn4b21_branch2b', data=res4b21_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2b = bn4b21_branch2b
res4b21_branch2b_relu = mx.symbol.Activation(name='res4b21_branch2b_relu', data=scale4b21_branch2b,
act_type='relu')
res4b21_branch2c = mx.symbol.Convolution(name='res4b21_branch2c', data=res4b21_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b21_branch2c = mx.symbol.BatchNorm(name='bn4b21_branch2c', data=res4b21_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b21_branch2c = bn4b21_branch2c
res4b21 = mx.symbol.broadcast_add(name='res4b21', *[res4b20_relu, scale4b21_branch2c])
res4b21_relu = mx.symbol.Activation(name='res4b21_relu', data=res4b21, act_type='relu')
res4b22_branch2a = mx.symbol.Convolution(name='res4b22_branch2a', data=res4b21_relu, num_filter=256, pad=(0, 0),
kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2a = mx.symbol.BatchNorm(name='bn4b22_branch2a', data=res4b22_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2a = bn4b22_branch2a
res4b22_branch2a_relu = mx.symbol.Activation(name='res4b22_branch2a_relu', data=scale4b22_branch2a,
act_type='relu')
if with_dpyramid:
res4b22_branch2b_offset = mx.symbol.Convolution(name='res4b22_branch2b_offset', data=res4b22_branch2a_relu,
num_filter=72, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
res4b22_branch2b = mx.contrib.symbol.DeformableConvolution(name='res4b22_branch2b', data=res4b22_branch2a_relu,
offset=res4b22_branch2b_offset,
num_filter=256, pad=(1, 1), kernel=(3, 3),
num_deformable_group=4,
stride=(1, 1), no_bias=True)
else:
res4b22_branch2b = mx.symbol.Convolution(name='res4b22_branch2b', data=res4b22_branch2a_relu, num_filter=256,
pad=(1, 1), kernel=(3, 3), stride=(1, 1), no_bias=True)
bn4b22_branch2b = mx.symbol.BatchNorm(name='bn4b22_branch2b', data=res4b22_branch2b, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2b = bn4b22_branch2b
res4b22_branch2b_relu = mx.symbol.Activation(name='res4b22_branch2b_relu', data=scale4b22_branch2b,
act_type='relu')
res4b22_branch2c = mx.symbol.Convolution(name='res4b22_branch2c', data=res4b22_branch2b_relu, num_filter=1024,
pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn4b22_branch2c = mx.symbol.BatchNorm(name='bn4b22_branch2c', data=res4b22_branch2c, use_global_stats=True,
fix_gamma=False, eps=eps)
scale4b22_branch2c = bn4b22_branch2c
res4b22 = mx.symbol.broadcast_add(name='res4b22', *[res4b21_relu, scale4b22_branch2c])
res4b22_relu = mx.symbol.Activation(name='res4b22_relu', data=res4b22, act_type='relu')
if with_dilated:
res5_stride = (1, 1)
res5_dilate = (2, 2)
else:
res5_stride = (2, 2)
res5_dilate = (1, 1)
# res5a-bottleneck
res5a_branch2a = mx.symbol.Convolution(name='res5a_branch2a', data=res4b22_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch2a = mx.symbol.BatchNorm(name='bn5a_branch2a', data=res5a_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2a = bn5a_branch2a
res5a_branch2a_relu = mx.symbol.Activation(name='res5a_branch2a_relu', data=scale5a_branch2a, act_type='relu')
if with_dconv:
res5a_branch2b_offset = mx.symbol.Convolution(name='res5a_branch2b_offset', data=res5a_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5a_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5a_branch2b', data=res5a_branch2a_relu, offset=res5a_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, stride=(1, 1), dilate=res5_dilate, no_bias=True)
else:
res5a_branch2b = mx.symbol.Convolution(name='res5a_branch2b', data=res5a_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5a_branch2b = mx.symbol.BatchNorm(name='bn5a_branch2b', data=res5a_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2b = bn5a_branch2b
res5a_branch2b_relu = mx.symbol.Activation(name='res5a_branch2b_relu', data=scale5a_branch2b, act_type='relu')
res5a_branch2c = mx.symbol.Convolution(name='res5a_branch2c', data=res5a_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5a_branch2c = mx.symbol.BatchNorm(name='bn5a_branch2c', data=res5a_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch2c = bn5a_branch2c
# res5a-shortcut
res5a_branch1 = mx.symbol.Convolution(name='res5a_branch1', data=res4b22_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=res5_stride, no_bias=True)
bn5a_branch1 = mx.symbol.BatchNorm(name='bn5a_branch1', data=res5a_branch1, use_global_stats=True, fix_gamma=False, eps=eps)
scale5a_branch1 = bn5a_branch1
res5a = mx.symbol.broadcast_add(name='res5a', *[scale5a_branch1, scale5a_branch2c])
res5a_relu = mx.symbol.Activation(name='res5a_relu', data=res5a, act_type='relu')
# res5b-bottleneck
res5b_branch2a = mx.symbol.Convolution(name='res5b_branch2a', data=res5a_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2a = mx.symbol.BatchNorm(name='bn5b_branch2a', data=res5b_branch2a, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2a = bn5b_branch2a
res5b_branch2a_relu = mx.symbol.Activation(name='res5b_branch2a_relu', data=scale5b_branch2a, act_type='relu')
if with_dconv:
res5b_branch2b_offset = mx.symbol.Convolution(name='res5b_branch2b_offset', data=res5b_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5b_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5b_branch2b', data=res5b_branch2a_relu, offset=res5b_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5b_branch2b = mx.symbol.Convolution(name='res5b_branch2b', data=res5b_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5b_branch2b = mx.symbol.BatchNorm(name='bn5b_branch2b', data=res5b_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2b = bn5b_branch2b
res5b_branch2b_relu = mx.symbol.Activation(name='res5b_branch2b_relu', data=scale5b_branch2b, act_type='relu')
res5b_branch2c = mx.symbol.Convolution(name='res5b_branch2c', data=res5b_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5b_branch2c = mx.symbol.BatchNorm(name='bn5b_branch2c', data=res5b_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5b_branch2c = bn5b_branch2c
# res5b-shortcut
res5b = mx.symbol.broadcast_add(name='res5b', *[res5a_relu, scale5b_branch2c])
res5b_relu = mx.symbol.Activation(name='res5b_relu', data=res5b, act_type='relu')
# res5c-bottleneck
res5c_branch2a = mx.symbol.Convolution(name='res5c_branch2a', data=res5b_relu, num_filter=512, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2a = mx.symbol.BatchNorm(name='bn5c_branch2a', data=res5c_branch2a, use_global_stats=True,
fix_gamma=False, eps=eps)
scale5c_branch2a = bn5c_branch2a
res5c_branch2a_relu = mx.symbol.Activation(name='res5c_branch2a_relu', data=scale5c_branch2a, act_type='relu')
if with_dconv:
res5c_branch2b_offset = mx.symbol.Convolution(name='res5c_branch2b_offset', data=res5c_branch2a_relu, num_filter=72, pad=res5_dilate, kernel=(3, 3), dilate=res5_dilate)
res5c_branch2b = mx.contrib.symbol.DeformableConvolution(name='res5c_branch2b', data=res5c_branch2a_relu, offset=res5c_branch2b_offset, num_filter=512,
pad=res5_dilate, kernel=(3, 3), num_deformable_group=4, dilate=res5_dilate, no_bias=True)
else:
res5c_branch2b = mx.symbol.Convolution(name='res5c_branch2b', data=res5c_branch2a_relu, num_filter=512, pad=res5_dilate,
kernel=(3, 3), stride=(1, 1), dilate=res5_dilate, no_bias=True)
bn5c_branch2b = mx.symbol.BatchNorm(name='bn5c_branch2b', data=res5c_branch2b, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2b = bn5c_branch2b
res5c_branch2b_relu = mx.symbol.Activation(name='res5c_branch2b_relu', data=scale5c_branch2b, act_type='relu')
res5c_branch2c = mx.symbol.Convolution(name='res5c_branch2c', data=res5c_branch2b_relu, num_filter=2048, pad=(0, 0), kernel=(1, 1), stride=(1, 1), no_bias=True)
bn5c_branch2c = mx.symbol.BatchNorm(name='bn5c_branch2c', data=res5c_branch2c, use_global_stats=True, fix_gamma=False, eps=eps)
scale5c_branch2c = bn5c_branch2c
# res5c-shortcut
res5c = mx.symbol.broadcast_add(name='res5c', *[res5b_relu, scale5c_branch2c])
res5c_relu = mx.symbol.Activation(name='res5c_relu', data=res5c, act_type='relu')
return res2c_relu, res3b3_relu, res4b22_relu, res5c_relu
def get_fpn_feature(self, c2, c3, c4, c5, feature_dim=256):
# lateral connection
fpn_p5_1x1 = mx.symbol.Convolution(data=c5, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p5_1x1')
fpn_p4_1x1 = mx.symbol.Convolution(data=c4, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p4_1x1')
fpn_p3_1x1 = mx.symbol.Convolution(data=c3, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p3_1x1')
fpn_p2_1x1 = mx.symbol.Convolution(data=c2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=feature_dim, name='fpn_p2_1x1')
# top-down connection
fpn_p5_upsample = mx.symbol.UpSampling(fpn_p5_1x1, scale=2, sample_type='nearest', name='fpn_p5_upsample')
fpn_p4_plus = mx.sym.ElementWiseSum(*[fpn_p5_upsample, fpn_p4_1x1], name='fpn_p4_sum')
fpn_p4_upsample = mx.symbol.UpSampling(fpn_p4_plus, scale=2, sample_type='nearest', name='fpn_p4_upsample')
fpn_p3_plus = mx.sym.ElementWiseSum(*[fpn_p4_upsample, fpn_p3_1x1], name='fpn_p3_sum')
fpn_p3_upsample = mx.symbol.UpSampling(fpn_p3_plus, scale=2, sample_type='nearest', name='fpn_p3_upsample')
fpn_p2_plus = mx.sym.ElementWiseSum(*[fpn_p3_upsample, fpn_p2_1x1], name='fpn_p2_sum')
# FPN feature
fpn_p6 = mx.sym.Convolution(data=c5, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=feature_dim, name='fpn_p6')
fpn_p5 = mx.symbol.Convolution(data=fpn_p5_1x1, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p5')
fpn_p4 = mx.symbol.Convolution(data=fpn_p4_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p4')
fpn_p3 = mx.symbol.Convolution(data=fpn_p3_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p3')
fpn_p2 = mx.symbol.Convolution(data=fpn_p2_plus, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=feature_dim, name='fpn_p2')
return fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6
def get_rpn_subnet(self, data, num_anchors, suffix):
rpn_conv = mx.sym.Convolution(data=data, kernel=(3, 3), pad=(1, 1), num_filter=512, name='rpn_conv_' + suffix,
weight=self.shared_param_dict['rpn_conv_weight'], bias=self.shared_param_dict['rpn_conv_bias'])
rpn_relu = mx.sym.Activation(data=rpn_conv, act_type='relu', name='rpn_relu_' + suffix)
rpn_cls_score = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name='rpn_cls_score_' + suffix,
weight=self.shared_param_dict['rpn_cls_score_weight'], bias=self.shared_param_dict['rpn_cls_score_bias'])
rpn_bbox_pred = mx.sym.Convolution(data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name='rpn_bbox_pred_' + suffix,
weight=self.shared_param_dict['rpn_bbox_pred_weight'], bias=self.shared_param_dict['rpn_bbox_pred_bias'])
# n x (2*A) x H x W => n x 2 x (A*H*W)
rpn_cls_score_t1 = mx.sym.Reshape(data=rpn_cls_score, shape=(0, 2, -1, 0), name='rpn_cls_score_t1_' + suffix)
rpn_cls_score_t2 = mx.sym.Reshape(data=rpn_cls_score_t1, shape=(0, 2, -1), name='rpn_cls_score_t2_' + suffix)
rpn_cls_prob = mx.sym.SoftmaxActivation(data=rpn_cls_score_t1, mode='channel', name='rpn_cls_prob_' + suffix)
rpn_cls_prob_t = mx.sym.Reshape(data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_t_' + suffix)
rpn_bbox_pred_t = mx.sym.Reshape(data=rpn_bbox_pred, shape=(0, 0, -1), name='rpn_bbox_pred_t_' + suffix)
return rpn_cls_score_t2, rpn_cls_prob_t, rpn_bbox_pred_t, rpn_bbox_pred
def get_deformable_roipooling(self, name, data, rois, output_dim, spatial_scale, param_name, group_size=1, pooled_size=7,
sample_per_part=4, part_size=7):
offset = mx.contrib.sym.DeformablePSROIPooling(name='offset_' + name + '_t', data=data, rois=rois, group_size=group_size, pooled_size=pooled_size,
sample_per_part=sample_per_part, no_trans=True, part_size=part_size, output_dim=output_dim,
spatial_scale=spatial_scale)
offset = mx.sym.FullyConnected(name='offset_' + name, data=offset, num_hidden=part_size * part_size * 2, lr_mult=0.01,
weight=self.shared_param_dict['offset_' + param_name + '_weight'], bias=self.shared_param_dict['offset_' + param_name + '_bias'])
offset_reshape = mx.sym.Reshape(data=offset, shape=(-1, 2, part_size, part_size), name='offset_reshape_' + name)
output = mx.contrib.sym.DeformablePSROIPooling(name='deformable_roi_pool_' + name, data=data, rois=rois, trans=offset_reshape, group_size=group_size,
pooled_size=pooled_size, sample_per_part=sample_per_part, no_trans=False, part_size=part_size, output_dim=output_dim,
spatial_scale=spatial_scale, trans_std=0.1)
return output
def get_symbol(self, cfg, is_train=True):
# config alias for convenient
num_classes = cfg.dataset.NUM_CLASSES
num_reg_classes = (2 if cfg.CLASS_AGNOSTIC else num_classes)
data = mx.sym.Variable(name="data")
im_info = mx.sym.Variable(name="im_info")
# shared convolutional layers
res2, res3, res4, res5 = self.get_resnet_backbone(data, with_dpyramid=True, with_dconv=True)
fpn_p2, fpn_p3, fpn_p4, fpn_p5, fpn_p6 = self.get_fpn_feature(res2, res3, res4, res5)
rpn_cls_score_p2, rpn_prob_p2, rpn_bbox_loss_p2, rpn_bbox_pred_p2 = self.get_rpn_subnet(fpn_p2, cfg.network.NUM_ANCHORS, 'p2')
rpn_cls_score_p3, rpn_prob_p3, rpn_bbox_loss_p3, rpn_bbox_pred_p3 = self.get_rpn_subnet(fpn_p3, cfg.network.NUM_ANCHORS, 'p3')
rpn_cls_score_p4, rpn_prob_p4, rpn_bbox_loss_p4, rpn_bbox_pred_p4 = self.get_rpn_subnet(fpn_p4, cfg.network.NUM_ANCHORS, 'p4')
rpn_cls_score_p5, rpn_prob_p5, rpn_bbox_loss_p5, rpn_bbox_pred_p5 = self.get_rpn_subnet(fpn_p5, cfg.network.NUM_ANCHORS, 'p5')
rpn_cls_score_p6, rpn_prob_p6, rpn_bbox_loss_p6, rpn_bbox_pred_p6 = self.get_rpn_subnet(fpn_p6, cfg.network.NUM_ANCHORS, 'p6')
rpn_cls_prob_dict = {
'rpn_cls_prob_stride64': rpn_prob_p6,
'rpn_cls_prob_stride32': rpn_prob_p5,
'rpn_cls_prob_stride16': rpn_prob_p4,
'rpn_cls_prob_stride8': rpn_prob_p3,
'rpn_cls_prob_stride4': rpn_prob_p2,
}
rpn_bbox_pred_dict = {
'rpn_bbox_pred_stride64': rpn_bbox_pred_p6,
'rpn_bbox_pred_stride32': rpn_bbox_pred_p5,
'rpn_bbox_pred_stride16': rpn_bbox_pred_p4,
'rpn_bbox_pred_stride8': rpn_bbox_pred_p3,
'rpn_bbox_pred_stride4': rpn_bbox_pred_p2,
}
arg_dict = dict(rpn_cls_prob_dict.items() + rpn_bbox_pred_dict.items())
if is_train:
rpn_label = mx.sym.Variable(name='label')
rpn_bbox_target = mx.sym.Variable(name='bbox_target')
rpn_bbox_weight = mx.sym.Variable(name='bbox_weight')
gt_boxes = mx.sym.Variable(name="gt_boxes")
rpn_cls_score = mx.sym.Concat(rpn_cls_score_p2, rpn_cls_score_p3, rpn_cls_score_p4, rpn_cls_score_p5, rpn_cls_score_p6, dim=2)
rpn_bbox_loss = mx.sym.Concat(rpn_bbox_loss_p2, rpn_bbox_loss_p3, rpn_bbox_loss_p4, rpn_bbox_loss_p5, rpn_bbox_loss_p6, dim=2)
# RPN classification loss
rpn_cls_output = mx.sym.SoftmaxOutput(data=rpn_cls_score, label=rpn_label, multi_output=True, normalization='valid',
use_ignore=True, ignore_label=-1, name='rpn_cls_prob')
# bounding box regression
rpn_bbox_loss = rpn_bbox_weight * mx.sym.smooth_l1(name='rpn_bbox_loss_l1', scalar=3.0, data=(rpn_bbox_loss - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss, grad_scale=1.0 / cfg.TRAIN.RPN_BATCH_SIZE)
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TRAIN.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TRAIN.RPN_POST_NMS_TOP_N,
'threshold': cfg.TRAIN.RPN_NMS_THRESH, 'rpn_min_size': cfg.TRAIN.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
# ROI proposal target
gt_boxes_reshape = mx.sym.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
rois, label, bbox_target, bbox_weight \
= mx.sym.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target', num_classes=num_reg_classes, batch_images=cfg.TRAIN.BATCH_IMAGES,
batch_rois=cfg.TRAIN.BATCH_ROIS, cfg=cPickle.dumps(cfg), fg_fraction=cfg.TRAIN.FG_FRACTION)
else:
aux_dict = {
'op_type': 'pyramid_proposal', 'name': 'rois',
'im_info': im_info, 'feat_stride': tuple(cfg.network.RPN_FEAT_STRIDE),
'scales': tuple(cfg.network.ANCHOR_SCALES), 'ratios': tuple(cfg.network.ANCHOR_RATIOS),
'rpn_pre_nms_top_n': cfg.TEST.RPN_PRE_NMS_TOP_N, 'rpn_post_nms_top_n': cfg.TEST.RPN_POST_NMS_TOP_N,
'threshold': cfg.TEST.RPN_NMS_THRESH, 'rpn_min_size': cfg.TEST.RPN_MIN_SIZE
}
# ROI proposal
rois = mx.sym.Custom(**dict(arg_dict.items() + aux_dict.items()))
offset_p2_weight = mx.sym.Variable(name='offset_p2_weight', dtype=np.float32, lr_mult=0.01)
offset_p3_weight = mx.sym.Variable(name='offset_p3_weight', dtype=np.float32, lr_mult=0.01)
offset_p4_weight = mx.sym.Variable(name='offset_p4_weight', dtype=np.float32, lr_mult=0.01)
offset_p5_weight = mx.sym.Variable(name='offset_p5_weight', dtype=np.float32, lr_mult=0.01)
offset_p2_bias = mx.sym.Variable(name='offset_p2_bias', dtype=np.float32, lr_mult=0.01)
offset_p3_bias = mx.sym.Variable(name='offset_p3_bias', dtype=np.float32, lr_mult=0.01)
offset_p4_bias = mx.sym.Variable(name='offset_p4_bias', dtype=np.float32, lr_mult=0.01)
offset_p5_bias = mx.sym.Variable(name='offset_p5_bias', dtype=np.float32, lr_mult=0.01)
roi_pool = mx.symbol.Custom(data_p2=fpn_p2, data_p3=fpn_p3, data_p4=fpn_p4, data_p5=fpn_p5,
offset_weight_p2=offset_p2_weight, offset_bias_p2=offset_p2_bias,
offset_weight_p3=offset_p3_weight, offset_bias_p3=offset_p3_bias,
offset_weight_p4=offset_p4_weight, offset_bias_p4=offset_p4_bias,
offset_weight_p5=offset_p5_weight, offset_bias_p5=offset_p5_bias,
rois=rois, op_type='fpn_roi_pooling', name='fpn_roi_pooling', with_deformable=True)
# 2 fc
fc_new_1 = mx.symbol.FullyConnected(name='fc_new_1', data=roi_pool, num_hidden=1024)
fc_new_1_relu = mx.sym.Activation(data=fc_new_1, act_type='relu', name='fc_new_1_relu')
fc_new_2 = mx.symbol.FullyConnected(name='fc_new_2', data=fc_new_1_relu, num_hidden=1024)
fc_new_2_relu = mx.sym.Activation(data=fc_new_2, act_type='relu', name='fc_new_2_relu')
# cls_score/bbox_pred
cls_score = mx.symbol.FullyConnected(name='cls_score', data=fc_new_2_relu, num_hidden=num_classes)
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=fc_new_2_relu, num_hidden=num_reg_classes * 4)
if is_train:
if cfg.TRAIN.ENABLE_OHEM:
labels_ohem, bbox_weights_ohem = mx.sym.Custom(op_type='BoxAnnotatorOHEM', num_classes=num_classes,
num_reg_classes=num_reg_classes, roi_per_img=cfg.TRAIN.BATCH_ROIS_OHEM,
cls_score=cls_score, bbox_pred=bbox_pred, labels=label,
bbox_targets=bbox_target, bbox_weights=bbox_weight)
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=labels_ohem, normalization='valid', use_ignore=True, ignore_label=-1)
bbox_loss_ = bbox_weights_ohem * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS_OHEM)
rcnn_label = labels_ohem
else:
cls_prob = mx.sym.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='valid')
bbox_loss_ = bbox_weight * mx.sym.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / cfg.TRAIN.BATCH_ROIS)
rcnn_label = label
# reshape output
rcnn_label = mx.sym.Reshape(data=rcnn_label, shape=(cfg.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.sym.Reshape(data=bbox_loss, shape=(cfg.TRAIN.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_loss_reshape')
group = mx.sym.Group([rpn_cls_output, rpn_bbox_loss, cls_prob, bbox_loss, mx.sym.BlockGrad(rcnn_label)])
else:
cls_prob = mx.sym.SoftmaxActivation(name='cls_prob', data=cls_score)
cls_prob = mx.sym.Reshape(data=cls_prob, shape=(cfg.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.sym.Reshape(data=bbox_pred, shape=(cfg.TEST.BATCH_IMAGES, -1, 4 * num_reg_classes), name='bbox_pred_reshape')
group = mx.sym.Group([rois, cls_prob, bbox_pred])
self.sym = group
return group
def init_weight_rcnn(self, cfg, arg_params, aux_params):
arg_params['fc_new_1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_1_weight'])
arg_params['fc_new_1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_1_bias'])
arg_params['fc_new_2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fc_new_2_weight'])
arg_params['fc_new_2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fc_new_2_bias'])
arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['bbox_pred_bias'])
def init_deformable_convnet(self, cfg, arg_params, aux_params):
arg_params['res5a_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_weight'])
arg_params['res5a_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5a_branch2b_offset_bias'])
arg_params['res5b_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_weight'])
arg_params['res5b_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5b_branch2b_offset_bias'])
arg_params['res5c_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_weight'])
arg_params['res5c_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res5c_branch2b_offset_bias'])
arg_params['res3b3_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_weight'])
arg_params['res3b3_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res3b3_branch2b_offset_bias'])
arg_params['res4b22_branch2b_offset_weight'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_weight'])
arg_params['res4b22_branch2b_offset_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['res4b22_branch2b_offset_bias'])
def init_weight_fpn(self, cfg, arg_params, aux_params):
arg_params['fpn_p6_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p6_weight'])
arg_params['fpn_p6_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p6_bias'])
arg_params['fpn_p5_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_weight'])
arg_params['fpn_p5_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_bias'])
arg_params['fpn_p4_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_weight'])
arg_params['fpn_p4_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_bias'])
arg_params['fpn_p3_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_weight'])
arg_params['fpn_p3_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_bias'])
arg_params['fpn_p2_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_weight'])
arg_params['fpn_p2_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_bias'])
arg_params['fpn_p5_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p5_1x1_weight'])
arg_params['fpn_p5_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p5_1x1_bias'])
arg_params['fpn_p4_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p4_1x1_weight'])
arg_params['fpn_p4_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p4_1x1_bias'])
arg_params['fpn_p3_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p3_1x1_weight'])
arg_params['fpn_p3_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p3_1x1_bias'])
arg_params['fpn_p2_1x1_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict['fpn_p2_1x1_weight'])
arg_params['fpn_p2_1x1_bias'] = mx.nd.zeros(shape=self.arg_shape_dict['fpn_p2_1x1_bias'])
def init_weight(self, cfg, arg_params, aux_params):
# for name in self.shared_param_list:
# if 'offset' in name:
# arg_params[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight'])
# else:
# arg_params[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
# arg_params[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
# self.init_deformable_convnet(cfg, arg_params, aux_params)
# self.init_weight_rcnn(cfg, arg_params, aux_params)
# self.init_weight_fpn(cfg, arg_params, aux_params)
arg_params2, aux_params2 = {}, {}
for name in self.shared_param_list:
if 'offset' in name:
arg_params2[name + '_weight'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_weight'])
else:
arg_params2[name + '_weight'] = mx.random.normal(0, 0.01, shape=self.arg_shape_dict[name + '_weight'])
arg_params2[name + '_bias'] = mx.nd.zeros(shape=self.arg_shape_dict[name + '_bias'])
self.init_deformable_convnet(cfg, arg_params2, aux_params2)
self.init_weight_rcnn(cfg, arg_params2, aux_params2)
self.init_weight_fpn(cfg, arg_params2, aux_params2)
for k in arg_params2:
if (k not in arg_params) or (arg_params[k].shape != arg_params2[k].shape):
arg_params[k] = arg_params2[k]
for k in aux_params2:
if k not in aux_params:
aux_params[k] = aux_params2[k] | 84.961203 | 180 | 0.616405 | [
"Apache-2.0"
] | chi3x10/RepMet | fpn/symbols/resnet_v1_101_fpn_dcn_rcnn.py | 87,595 | Python |
from . import GB, HDB
from typing import Literal
class Client:
def __init__(self, t: Literal["gb", "hbba", "dbba"]):
self.type = t
def create(self):
if self.type == "gb":
return GB()
elif self.type == "hb":
return HDB("hbba")
elif self.type == "db":
return HDB("dbba")
| 21.75 | 57 | 0.511494 | [
"Apache-2.0"
] | Lasx/gb688_downloader | standard/client.py | 348 | Python |
import sys
from CGMFtk import histories as fh
if __name__ == "__main__":
hist = fh.Histories(sys.argv[1])
print(len(hist.getFissionHistories()))
| 22 | 42 | 0.714286 | [
"MIT"
] | beykyle/omp-uq | analysis/tools/count_histories.py | 154 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2022, bahaa and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestAlaqoal(unittest.TestCase):
pass
| 18.636364 | 44 | 0.756098 | [
"MIT"
] | bahaaabed/AumAlqura | calender/calender/doctype/alaqoal/test_alaqoal.py | 205 | Python |
# <a href="https://colab.research.google.com/github/couyang24/general_learning-tiffany/blob/master/Titanic/analysis/colab_titanic_main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Need to mount Drive on or upload kaggle.json
from google.colab import drive
drive.mount("/content/drive")
# !mkdir ~/.kaggle/
# !cp drive/My\ Drive/input/kaggle.json ~/.kaggle/
# !kaggle competitions download -c titanic
# Load Package
# import numpy as np
import pandas as pd
import seaborn as sns
import featuretools
import featuretools as ft
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import (
OneHotEncoder,
StandardScaler,
LabelEncoder,
OrdinalEncoder,
)
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import cross_val_score, RandomizedSearchCV
# Load data
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
# Save data
target = train_df[["Survived"]]
submission = test_df[["PassengerId"]]
# Join and Clean
combine = pd.concat([train_df, test_df])
# EDA
combine.info()
combine.columns
mapping = {
"Mlle": "Miss",
"Major": "Mr",
"Col": "Mr",
"Sir": "Mr",
"Don": "Mr",
"Mme": "Miss",
"Jonkheer": "Mr",
"Lady": "Mrs",
"Capt": "Mr",
"Countess": "Mrs",
"Ms": "Miss",
"Dona": "Mrs",
}
combine["Title"] = combine.Name.apply(
lambda x: x.split(".")[0].split(",")[1].strip()
).replace(mapping)
combine.drop(["Cabin", "Ticket", "Name"], axis=1, inplace=True)
# +
# combine['Sex2'] = combine['Sex'].apply(lambda x: 0 if x=='female' else 1)
# +
# class ModifiedLabelEncoder(LabelEncoder):
# def fit_transform(self, y, *args, **kwargs):
# return super().fit_transform(y)
# def transform(self, y, *args, **kwargs):
# return super().transform(y)
# +
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("encode", OrdinalEncoder()),
]
)
numeric_transformer = Pipeline([("imputer", SimpleImputer(strategy="median")),])
# -
combine[["Sex", "Embarked", "Title"]] = categorical_transformer.fit_transform(
combine[["Sex", "Embarked", "Title"]]
)
combine[["Age", "Fare"]] = numeric_transformer.fit_transform(combine[["Age", "Fare"]])
# +
es = ft.EntitySet(id="titanic_data")
es = es.entity_from_dataframe(
entity_id="combine",
dataframe=combine.drop(["Survived"], axis=1),
variable_types={
"Embarked": ft.variable_types.Categorical,
"Sex": ft.variable_types.Boolean,
"Title": ft.variable_types.Categorical,
},
index="PassengerId",
)
es
# -
es = es.normalize_entity(
base_entity_id="combine", new_entity_id="Embarked", index="Embarked"
)
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Sex", index="Sex")
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Title", index="Title")
es = es.normalize_entity(
base_entity_id="combine", new_entity_id="Pclass", index="Pclass"
)
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Parch", index="Parch")
es = es.normalize_entity(base_entity_id="combine", new_entity_id="SibSp", index="SibSp")
es
primitives = ft.list_primitives()
pd.options.display.max_colwidth = 100
primitives[primitives["type"] == "aggregation"].head(
primitives[primitives["type"] == "aggregation"].shape[0]
)
primitives[primitives["type"] == "transform"].head(
primitives[primitives["type"] == "transform"].shape[0]
)
features, feature_names = ft.dfs(
entityset=es,
target_entity="combine",
# trans_primitives=['subtract_numeric', 'add_numeric', 'divide_numeric', 'multiply_numeric'],
max_depth=2,
)
feature_names
len(feature_names)
features.isnull().sum()
class RemoveLowInfo(BaseEstimator, TransformerMixin):
def __init__(self, threshold):
self.threshold = threshold
def fit(self, X, y=None):
return self
def transform(self, X):
df = X.copy()
keep = [
column
for column in df.columns
if df[column].value_counts(normalize=True).reset_index(drop=True)[0]
< self.threshold
]
return df[keep]
from sklearn.preprocessing import OneHotEncoder, StandardScaler, FunctionTransformer
impute_median = FunctionTransformer(lambda x: x.fillna(x.median()), validate=False)
normalize = FunctionTransformer(lambda x: (x - x.mean()) / x.std(), validate=False)
from sklearn.decomposition import PCA
transformer = Pipeline(
[
("imputer", impute_median),
("removelowinfo", RemoveLowInfo(threshold=0.95)),
("scaler", normalize),
]
)
clean_features = transformer.fit_transform(features)
# !pip install catboost
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
AdaBoostClassifier,
BaggingClassifier,
VotingClassifier,
)
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
import xgboost as xgb
import lightgbm as lgb
import catboost as cgb
# +
methods = [
("logistic", LogisticRegression(solver="lbfgs")),
# ('sgd', SGDClassifier()),
("tree", DecisionTreeClassifier()),
("bag", BaggingClassifier()),
("xgb", xgb.XGBClassifier(max_depth=3)),
("lgb", lgb.LGBMClassifier(max_depth=3)),
# ('cgb', cgb.CatBoostClassifier(max_depth=3,silent=True)),
("ada", AdaBoostClassifier()),
("gbm", GradientBoostingClassifier()),
("rf", RandomForestClassifier(n_estimators=100)),
# ('svc', LinearSVC()),
# ('rbf', SVC()),
("nb", Pipeline([("pca", PCA()), ("gnb", GaussianNB())])),
("nn", MLPClassifier()),
("knn", KNeighborsClassifier()),
]
ensemble = VotingClassifier(
methods,
voting="soft",
# weights=[1,1,1,1,2,2,1,1],
# flatten_transform=True,
)
clf = Pipeline(
[
# ('transformer', transformer),
("ensemble", ensemble),
]
)
clf.fit(clean_features.iloc[: train_df.shape[0], :], target)
# -
submission["Survived"] = pd.DataFrame(
clf.predict(clean_features.iloc[train_df.shape[0] :, :])
)
print(submission.dtypes)
submission.to_csv("titanic_submission.csv", index=False)
| 26.425197 | 252 | 0.674762 | [
"Apache-2.0"
] | couyang24/general_learning | Titanic/analysis/colab_titanic_main.py | 6,712 | Python |
# Generated by Django 2.1.3 on 2018-11-24 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='images/')),
('summary', models.CharField(max_length=200)),
],
),
]
| 24.043478 | 114 | 0.56962 | [
"MIT"
] | muhammadh-s/web-portfolio | jobs/migrations/0001_initial.py | 553 | Python |
import os
import pandas as pd
import re
def sort_human(l):
"""Sort a list of strings by numerical."""
def convert(text): return float(text) if text.isdigit() else text
def alphanum(key): return [convert(c)
for c in re.split('([-+]?[0-9]*\.?[0-9]*)', key)]
l.sort(key=alphanum)
return l
def data_merge_by_batch(parent_directory, verbose=True):
"""Merge a set of parameters.csv files into one.
This is intended for use with batch processes from Legion, with each batch
being 1000 runs longand numbered with integer values.
Parameters
----------
parent_directory : :obj:`list` of :obj:`str`
Parent directory to a set of directories each containing model runs and
a parameters.csv file.
verbose : :obj:`boolean`, optional
Boolean indicator of whether to print extra information.
Returns
-------
None
Concatenated will be written to file in `parent_directory`
"""
dirs = [os.path.abspath(os.path.join(parent_directory, d))
for d in os.listdir(parent_directory)
if os.path.isdir(os.path.abspath(
os.path.join(parent_directory, d))) and d != 'archives']
dirs = sort_human(dirs)
if verbose:
print(dirs)
dfs = []
for d in dirs:
try:
dfs.append(pd.read_csv(os.path.join(d, 'parameters.csv')))
ii = len(dfs) - 1
print("Processing parameter file {}".format(ii))
if ii is not 0:
dfs[ii]['ix'] = dfs[ii].index.values + \
dfs[ii - 1]['ix'].values[-1] + 1
else:
dfs[ii]['ix'] = dfs[ii].index.values
if os.path.split(d)[1].split('_')[-1].isdigit():
print(os.path.split(d)[1].split('_')[-1])
dfs[ii]['Batch'] = int(os.path.split(d)[1].split('_')[-1])
else:
print("Batch number not found for {}".format(d))
continue
except FileNotFoundError:
print("No parameters file in {}".format(d))
continue
if verbose:
print("{} dataframes to be joined".format(len(dfs)))
# for ii in range(len(dfs)):
# if ii is not 0:
# dfs[ii]['ix'] = dfs[ii].index.values + dfs[ii - 1]['ix'].values[-1]
# else:
# dfs[ii]['ix'] = dfs[ii].index.values
# if os.path.split(dirs[ii])[1][:4].isdigit():
# print(os.path.split(dirs[ii])[1][:4])
# dfs[ii]['Start Time'] = os.path.split(dirs[ii])[1][:4]
# else:
# continue
df = pd.concat(dfs)
df.index = range(len(df))
output_file = os.path.join(parent_directory,
'all_parameters.csv')
df.to_csv(output_file, index=False)
return output_file
| 34.240964 | 81 | 0.54715 | [
"MIT"
] | multimodalspectroscopy/hypothermia-bayescmd | results_processing/ABC/csv_processing.py | 2,842 | Python |
#! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: Irving He
# email: [email protected]
import logging
import argparse
import os
import random
import numpy as np
from tqdm import tqdm
import datetime
from datetime import timedelta
import torch
import torch.distributed as dist
from Data_utils import get_loader
from Data_utils import CONFIGS
from Model import VITransModel
from Utils import WarmupCosineSchedule,WarmupLinearSchedule
from Utils import set_seed, AverageMeter, simple_accuracy, model_save
from tensorboardX import SummaryWriter
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
"""Config"""
class VITConfig:
log_dir = "./TB_log/"
dataset = "cifar10" # "cifar100"
model_type = "ViT-B_16"
pretrained_dir = "./Pretrained/imagenet21k_ViT-B_16.npz" # 预训练模型存放位置
save_dir = "./Model/"
record_algo = "Pretrained_VIT_Cifar10_ViTB16_"
test_cycles = datetime.datetime.now().strftime('%Y%m%d_%H%M')
decay_type = "cosine" # "cosine", "linear" 决定了学习率Scheduler类型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
TB_log = True
img_size = 224
train_batch_size = 64 #512
eval_batch_size = 32 #64
eval_every = 100 # Run prediction on validation set every so many steps.
learning_rate = 3e-2 # SGD起始学习率
weight_decay = 0 #
num_steps = 10000 # Total number of training epochs to perform.
warmup_steps = 500 # 开始的Warmup Step数
max_grad_norm = 1.0
local_rank = -1 # local_rank for distributed training on gpus
seed = 42
gradient_accumulation_steps = 1 # Number of updates steps to accumulate before performing a backward/update pass.
"""Model Valid Process"""
def valid(args,model,writer,test_loader,global_step):
"""
:param args: 参数Config
:param model: 需验证模型
:param writer: TB写入
:param test_loader: 测试数据集
:param global_step: 全局step
:return:
"""
# Validation
eval_losses = AverageMeter()
model.eval()
all_preds, all_label = [],[]
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
loss_fct = torch.nn.CrossEntropyLoss()
global_eval_step = 0
for step, batch in enumerate(epoch_iterator):
global_eval_step += 1
batch = tuple(t.to(args.device) for t in batch)
x,y = batch
with torch.no_grad():
logits = model(x)[0]
eval_loss = loss_fct(logits,y)
eval_losses.update(eval_loss.item()) #滑动平均
preds = torch.argmax(logits,dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
# append在后面
all_preds[0] = np.append(all_preds[0], preds.detach().cpu().numpy(), axis=0)
all_label[0] = np.append(all_label[0], y.detach().cpu().numpy(), axis=0)
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
writer.add_scalar("Train/loss", scalar_value=eval_losses.val, global_step=global_eval_step)
all_preds, all_label = all_preds[0], all_label[0]
# all_preds: numpy.array; all_label: numpy.array;
accuracy = simple_accuracy(all_preds,all_label)
writer.add_scalar("test/accuracy",scalar_value=accuracy,global_step=global_step)
return accuracy
"""Model Training Process"""
def train(args=VITConfig()):
"""
:param args:
- log_dir
"""
# 模型准备
pretrained_model_config = CONFIGS[args.model_type]
num_classes = 10 if args.dataset == "cifar10" else 100
model = VITransModel(pretrained_model_config, args.img_size, zero_head=True, num_classes=num_classes)
model.load_from(np.load(args.pretrained_dir))
model.to(device=args.device)
num_params = count_parameters(model)
if args.TB_log:
os.makedirs(args.log_dir, exist_ok=True)
writer = SummaryWriter(logdir=args.log_dir + args.record_algo + args.test_cycles)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# 1. DATA准备
train_loader, test_loader = get_loader(args)
# 2. 准备优化器以及Scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr = args.learning_rate, # init lr
momentum=0.9,
weight_decay=args.weight_decay)
t_total = args.num_steps # Total time steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
# 3. Training
model.zero_grad()
set_seed(args.seed)
losses = AverageMeter()
global_step = 0
best_acc = 0
while True:
model.train()
# 一个数据迭代器
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True)
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
x,y = batch # XData, YLabel
loss = model.forward(x,y)
loss.backward()
if (step+1)%args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
torch.nn.utils.clip_grad_norm(model.parameters(),1.0)
scheduler.step()
optimizer.step()
optimizer.zero_grad()
global_step += 1
# Print Training Info
epoch_iterator.set_description(
"Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val)
)
writer.add_scalar("Train/loss",scalar_value=losses.val, global_step=global_step)
writer.add_scalar("Train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
# Valid ...
if global_step % args.eval_every == 0:
accuracy = valid(args, model, writer, test_loader, global_step)
if best_acc < accuracy:
best_acc = accuracy
model_save(args.record_algo+args.test_cycles,model)
model.train()
if global_step % t_total == 0:
break
losses.reset()
if global_step % t_total == 0:
break
writer.close()
print("==="*30)
print("Best Accuracy: \t%f" % best_acc)
print("End Training!")
print("==="*30)
if __name__ == "__main__":
train()
# all_preds = []
# all_labels = []
#
# all_pred = torch.tensor([1,0,1,1,0,1])
# all_label = torch.tensor([1,1,1,1,1,1])
#
# all_preds.append(all_pred)
# all_labels.append(all_label)
# print(all_preds)
# all_preds[0] = np.append(all_preds[0],all_label,axis=0)
# all_labels[0] = np.append(all_labels[0],all_pred,axis=0)
# print(type(all_preds[0]))
# print(type(all_labels[0]))
# acc = simple_accuracy(all_preds[0],all_labels[0])
# print(acc) | 33.665158 | 117 | 0.620699 | [
"MIT"
] | HzcIrving/DLRL-PlayGround | VIT/Train.py | 7,578 | Python |
"""
Django settings for hiren project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json
from celery.schedules import crontab
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load json file baby :D
try:
with open('config.json') as f:
JSON_DATA = json.load(f)
except FileNotFoundError:
with open('config.sample.json') as f:
JSON_DATA = json.load(f)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', JSON_DATA['secret_key'])
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', False)
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'github'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'hiren.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
if 'TRAVIS' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travisci',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'hiren_github_management',
'USER': 'hiren',
'PASSWORD': 'hiren',
'HOST': 'localhost',
'PORT': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder"
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
LOGIN_URL = '/'
# CELERY STUFF
BROKER_URL = 'redis://localhost:6379'
CELERY_RESULT_BACKEND = 'redis://localhost:6379'
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERYBEAT_SCHEDULE = {
'add-every-30-seconds': {
'task': 'github.tasks.get_data',
'schedule': crontab(minute=0, hour='22'), # execute every day at 10 pm
},
} | 26.16129 | 80 | 0.663132 | [
"MIT"
] | pyprism/Hiren-Git-Commit-Management | hiren/settings.py | 4,055 | Python |
__author__ = "Stefan Weißenberger and Johannes Gasteiger"
__license__ = "MIT"
import os
import numpy as np
from scipy.linalg import expm
import torch
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.datasets import Planetoid, Amazon, Coauthor
from seeds import development_seed
DATA_PATH = 'data'
def get_dataset(name: str, use_lcc: bool = True) -> InMemoryDataset:
path = os.path.join(DATA_PATH, name)
if name in ['Cora', 'Citeseer', 'Pubmed']:
dataset = Planetoid(path, name)
elif name in ['Computers', 'Photo']:
dataset = Amazon(path, name)
elif name == 'CoauthorCS':
dataset = Coauthor(path, 'CS')
else:
raise Exception('Unknown dataset.')
if use_lcc:
lcc = get_largest_connected_component(dataset)
x_new = dataset.data.x[lcc]
y_new = dataset.data.y[lcc]
row, col = dataset.data.edge_index.numpy()
edges = [[i, j] for i, j in zip(row, col) if i in lcc and j in lcc]
edges = remap_edges(edges, get_node_mapper(lcc))
data = Data(
x=x_new,
edge_index=torch.LongTensor(edges),
y=y_new,
train_mask=torch.zeros(y_new.size()[0], dtype=torch.bool),
test_mask=torch.zeros(y_new.size()[0], dtype=torch.bool),
val_mask=torch.zeros(y_new.size()[0], dtype=torch.bool)
)
dataset.data = data
return dataset
def get_component(dataset: InMemoryDataset, start: int = 0) -> set:
visited_nodes = set()
queued_nodes = set([start])
row, col = dataset.data.edge_index.numpy()
while queued_nodes:
current_node = queued_nodes.pop()
visited_nodes.update([current_node])
neighbors = col[np.where(row == current_node)[0]]
neighbors = [n for n in neighbors if n not in visited_nodes and n not in queued_nodes]
queued_nodes.update(neighbors)
return visited_nodes
def get_largest_connected_component(dataset: InMemoryDataset) -> np.ndarray:
remaining_nodes = set(range(dataset.data.x.shape[0]))
comps = []
while remaining_nodes:
start = min(remaining_nodes)
comp = get_component(dataset, start)
comps.append(comp)
remaining_nodes = remaining_nodes.difference(comp)
return np.array(list(comps[np.argmax(list(map(len, comps)))]))
def get_node_mapper(lcc: np.ndarray) -> dict:
mapper = {}
counter = 0
for node in lcc:
mapper[node] = counter
counter += 1
return mapper
def remap_edges(edges: list, mapper: dict) -> list:
row = [e[0] for e in edges]
col = [e[1] for e in edges]
row = list(map(lambda x: mapper[x], row))
col = list(map(lambda x: mapper[x], col))
return [row, col]
def get_adj_matrix(dataset: InMemoryDataset) -> np.ndarray:
num_nodes = dataset.data.x.shape[0]
adj_matrix = np.zeros(shape=(num_nodes, num_nodes))
for i, j in zip(dataset.data.edge_index[0], dataset.data.edge_index[1]):
adj_matrix[i, j] = 1.
return adj_matrix
def get_ppr_matrix(
adj_matrix: np.ndarray,
alpha: float = 0.1) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = adj_matrix + np.eye(num_nodes)
D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1)))
H = D_tilde @ A_tilde @ D_tilde
return alpha * np.linalg.inv(np.eye(num_nodes) - (1 - alpha) * H)
def get_heat_matrix(
adj_matrix: np.ndarray,
t: float = 5.0) -> np.ndarray:
num_nodes = adj_matrix.shape[0]
A_tilde = adj_matrix + np.eye(num_nodes)
D_tilde = np.diag(1/np.sqrt(A_tilde.sum(axis=1)))
H = D_tilde @ A_tilde @ D_tilde
return expm(-t * (np.eye(num_nodes) - H))
def get_top_k_matrix(A: np.ndarray, k: int = 128) -> np.ndarray:
num_nodes = A.shape[0]
row_idx = np.arange(num_nodes)
A[A.argsort(axis=0)[:num_nodes - k], row_idx] = 0.
norm = A.sum(axis=0)
norm[norm <= 0] = 1 # avoid dividing by zero
return A/norm
def get_clipped_matrix(A: np.ndarray, eps: float = 0.01) -> np.ndarray:
num_nodes = A.shape[0]
A[A < eps] = 0.
norm = A.sum(axis=0)
norm[norm <= 0] = 1 # avoid dividing by zero
return A/norm
def set_train_val_test_split(
seed: int,
data: Data,
num_development: int = 1500,
num_per_class: int = 20) -> Data:
rnd_state = np.random.RandomState(development_seed)
num_nodes = data.y.shape[0]
development_idx = rnd_state.choice(num_nodes, num_development, replace=False)
test_idx = [i for i in np.arange(num_nodes) if i not in development_idx]
train_idx = []
rnd_state = np.random.RandomState(seed)
for c in range(data.y.max() + 1):
class_idx = development_idx[np.where(data.y[development_idx].cpu() == c)[0]]
train_idx.extend(rnd_state.choice(class_idx, num_per_class, replace=False))
val_idx = [i for i in development_idx if i not in train_idx]
def get_mask(idx):
mask = torch.zeros(num_nodes, dtype=torch.bool)
mask[idx] = 1
return mask
data.train_mask = get_mask(train_idx)
data.val_mask = get_mask(val_idx)
data.test_mask = get_mask(test_idx)
return data
class PPRDataset(InMemoryDataset):
"""
Dataset preprocessed with GDC using PPR diffusion.
Note that this implementations is not scalable
since we directly invert the adjacency matrix.
"""
def __init__(self,
name: str = 'Cora',
use_lcc: bool = True,
alpha: float = 0.1,
k: int = 16,
eps: float = None):
self.name = name
self.use_lcc = use_lcc
self.alpha = alpha
self.k = k
self.eps = eps
super(PPRDataset, self).__init__(DATA_PATH)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> list:
return []
@property
def processed_file_names(self) -> list:
return [str(self) + '.pt']
def download(self):
pass
def process(self):
base = get_dataset(name=self.name, use_lcc=self.use_lcc)
# generate adjacency matrix from sparse representation
adj_matrix = get_adj_matrix(base)
# obtain exact PPR matrix
ppr_matrix = get_ppr_matrix(adj_matrix,
alpha=self.alpha)
if self.k:
print(f'Selecting top {self.k} edges per node.')
ppr_matrix = get_top_k_matrix(ppr_matrix, k=self.k)
elif self.eps:
print(f'Selecting edges with weight greater than {self.eps}.')
ppr_matrix = get_clipped_matrix(ppr_matrix, eps=self.eps)
else:
raise ValueError
# create PyG Data object
edges_i = []
edges_j = []
edge_attr = []
for i, row in enumerate(ppr_matrix):
for j in np.where(row > 0)[0]:
edges_i.append(i)
edges_j.append(j)
edge_attr.append(ppr_matrix[i, j])
edge_index = [edges_i, edges_j]
data = Data(
x=base.data.x,
edge_index=torch.LongTensor(edge_index),
edge_attr=torch.FloatTensor(edge_attr),
y=base.data.y,
train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool),
test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool),
val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool)
)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __str__(self) -> str:
return f'{self.name}_ppr_alpha={self.alpha}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
class HeatDataset(InMemoryDataset):
"""
Dataset preprocessed with GDC using heat kernel diffusion.
Note that this implementations is not scalable
since we directly calculate the matrix exponential
of the adjacency matrix.
"""
def __init__(self,
name: str = 'Cora',
use_lcc: bool = True,
t: float = 5.0,
k: int = 16,
eps: float = None):
self.name = name
self.use_lcc = use_lcc
self.t = t
self.k = k
self.eps = eps
super(HeatDataset, self).__init__(DATA_PATH)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self) -> list:
return []
@property
def processed_file_names(self) -> list:
return [str(self) + '.pt']
def download(self):
pass
def process(self):
base = get_dataset(name=self.name, use_lcc=self.use_lcc)
# generate adjacency matrix from sparse representation
adj_matrix = get_adj_matrix(base)
# get heat matrix as described in Berberidis et al., 2019
heat_matrix = get_heat_matrix(adj_matrix,
t=self.t)
if self.k:
print(f'Selecting top {self.k} edges per node.')
heat_matrix = get_top_k_matrix(heat_matrix, k=self.k)
elif self.eps:
print(f'Selecting edges with weight greater than {self.eps}.')
heat_matrix = get_clipped_matrix(heat_matrix, eps=self.eps)
else:
raise ValueError
# create PyG Data object
edges_i = []
edges_j = []
edge_attr = []
for i, row in enumerate(heat_matrix):
for j in np.where(row > 0)[0]:
edges_i.append(i)
edges_j.append(j)
edge_attr.append(heat_matrix[i, j])
edge_index = [edges_i, edges_j]
data = Data(
x=base.data.x,
edge_index=torch.LongTensor(edge_index),
edge_attr=torch.FloatTensor(edge_attr),
y=base.data.y,
train_mask=torch.zeros(base.data.train_mask.size()[0], dtype=torch.bool),
test_mask=torch.zeros(base.data.test_mask.size()[0], dtype=torch.bool),
val_mask=torch.zeros(base.data.val_mask.size()[0], dtype=torch.bool)
)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __str__(self) -> str:
return f'{self.name}_heat_t={self.t}_k={self.k}_eps={self.eps}_lcc={self.use_lcc}'
| 33.376947 | 98 | 0.589696 | [
"MIT"
] | gasteigerjo/gdc | data.py | 10,715 | Python |
"""This contains all of the model filters used by the Shepherd application."""
# Django & Other 3rd Party Libraries
import django_filters
from crispy_forms.bootstrap import (
Accordion,
AccordionGroup,
InlineCheckboxes,
PrependedText,
)
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, ButtonHolder, Column, Layout, Row, Submit
from django import forms
from django.forms.widgets import TextInput
# Ghostwriter Libraries
from .models import Domain, DomainStatus, HealthStatus, ServerStatus
class DomainFilter(django_filters.FilterSet):
"""
Filter :model:`shepherd.Domain` model for searching.
**Fields**
``name``
Case insensitive search of the name field contents
``all_cat``
Case insensitive search of the all_cat field
``health_status``
Checkbox choice filter using :model:`shepherd.HealthStatus`
``domain_status``
Checkbox choice filter using :model:`shepherd.DomainStatus`
``expiration_status``
Boolean field to filter expired domains
"""
name = django_filters.CharFilter(
lookup_expr="icontains",
label="Domain Name Contains",
widget=TextInput(attrs={"placeholder": "specterops.io", "autocomplete": "off"}),
)
all_cat = django_filters.CharFilter(
lookup_expr="icontains",
label="Categories Contain",
widget=TextInput(attrs={"placeholder": "Category", "autocomplete": "off"}),
)
health_status = django_filters.ModelMultipleChoiceFilter(
queryset=HealthStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="",
)
domain_status = django_filters.ModelMultipleChoiceFilter(
queryset=DomainStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="",
)
STATUS_CHOICES = (
(0, "Active"),
(1, "Expired"),
)
expiration_status = django_filters.ChoiceFilter(
field_name="expired", choices=STATUS_CHOICES, label="Expiration Status"
)
class Meta:
model = Domain
fields = ["name", "all_cat", "health_status", "domain_status"]
def __init__(self, *args, **kwargs):
super(DomainFilter, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "get"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
# Layout the form for Bootstrap
self.helper.layout = Layout(
Row(
Column(
PrependedText("name", '<i class="fas fa-filter"></i>'),
css_class="col-md-4 offset-md-2",
),
Column(
PrependedText("all_cat", '<i class="fas fa-filter"></i>'),
css_class=" col-md-4",
),
css_class="form-row",
),
Accordion(
AccordionGroup("Domain Statuses", InlineCheckboxes("domain_status")),
AccordionGroup("Health Statuses", InlineCheckboxes("health_status")),
),
ButtonHolder(
Submit("submit", "Filter", css_class="btn btn-primary col-md-2"),
HTML(
"""
<a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:domains' %}">Reset</a>
"""
),
),
)
class ServerFilter(django_filters.FilterSet):
"""
Filter :model:`shepherd.StaticServer` model for searching.
**Fields**
``io_address``
Case insensitive search of the ip_address field contents
``name``
Case insensitive search of the name field contents
``server_status``
Checkbox choice filter using :model:`shepherd.ServerStatus`
"""
ip_address = django_filters.CharFilter(
lookup_expr="icontains",
label="IP Address Contains",
widget=TextInput(attrs={"placeholder": "104.31.5.75", "autocomplete": "off"}),
)
name = django_filters.CharFilter(
lookup_expr="icontains",
label="Server Name Contains",
widget=TextInput(attrs={"placeholder": "Hostname", "autocomplete": "off"}),
)
server_status = django_filters.ModelMultipleChoiceFilter(
queryset=ServerStatus.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="Server Status",
)
class Meta:
model = Domain
fields = ["ip_address", "name", "server_status"]
def __init__(self, *args, **kwargs):
super(ServerFilter, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = "get"
self.helper.form_class = "newitem"
self.helper.form_show_labels = False
# Layout the form for Bootstrap
self.helper.layout = Layout(
Row(
Column(
PrependedText("ip_address", '<i class="fas fa-filter"></i>'),
css_class="col-md-4 offset-md-2",
),
Column(
PrependedText("name", '<i class="fas fa-filter"></i>'),
css_class=" col-md-4",
),
css_class="form-row",
),
Accordion(
AccordionGroup("Server Status", InlineCheckboxes("server_status")),
),
ButtonHolder(
Submit("submit", "Filter", css_class="btn btn-primary col-md-2"),
HTML(
"""
<a class="btn btn-outline-secondary col-md-2" role="button" href="{% url 'shepherd:servers' %}">Reset</a>
"""
),
),
)
| 33.952941 | 126 | 0.580388 | [
"BSD-3-Clause"
] | DemanNL/Ghostwriter | ghostwriter/shepherd/filters.py | 5,772 | Python |
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.interface.tf` QNode interface.
"""
import pytest
import numpy as np
try:
import tensorflow as tf
if tf.__version__[0] == "1":
import tensorflow.contrib.eager as tfe
tf.enable_eager_execution()
Variable = tfe.Variable
else:
from tensorflow import Variable
except ImportError as e:
pass
import pennylane as qml
from pennylane.qnode import _flatten, unflatten, QNode, QuantumFunctionError
from pennylane.plugins.default_qubit import CNOT, Rotx, Roty, Rotz, I, Y, Z
from pennylane._device import DeviceError
def expZ(state):
return np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2
@pytest.fixture(scope='module')
def tf_support():
"""Boolean fixture for TensorFlow support"""
try:
import tensorflow as tf
tf_support = True
except ImportError as e:
tf_support = False
return tf_support
@pytest.fixture()
def skip_if_no_tf_support(tf_support):
if not tf_support:
pytest.skip("Skipped, no tf support")
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestTFQNodeExceptions():
"""TFQNode basic tests."""
def test_qnode_fails_on_wrong_return_type(self, qubit_device_2_wires):
"""The qfunc must return only Expectations"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
return qml.expval(qml.PauliZ(0)), 0.3
with pytest.raises(QuantumFunctionError, match='must return either'):
qf(Variable(0.5))
def test_qnode_fails_on_expval_not_returned(self, qubit_device_2_wires):
"""All expectation values in the qfunc must be returned"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
ex = qml.expval(qml.PauliZ(1))
return qml.expval(qml.PauliZ(0))
with pytest.raises(QuantumFunctionError, match='All measured observables'):
qf(Variable(0.5))
def test_qnode_fails_on_wrong_expval_order(self, qubit_device_2_wires):
"""Expvals must be returned in the order they were created in"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
ex = qml.expval(qml.PauliZ(1))
return qml.expval(qml.PauliZ(0)), ex
with pytest.raises(QuantumFunctionError, match='All measured observables'):
qf(Variable(0.5))
def test_qnode_fails_on_gates_after_measurements(self, qubit_device_2_wires):
"""Gates have to precede measurements"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
ev = qml.expval(qml.PauliZ(1))
qml.RY(0.5, wires=[0])
return ev
with pytest.raises(QuantumFunctionError, match='gates must precede'):
qf(Variable(0.5))
def test_qnode_fails_on_multiple_measurements_of_same_wire(self, qubit_device_2_wires):
"""A wire can only be measured once"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1)), qml.expval(qml.PauliX(0))
with pytest.raises(QuantumFunctionError, match='can only be measured once'):
qf(Variable(0.5))
def test_qnode_fails_on_qfunc_with_too_many_wires(self, qubit_device_2_wires):
"""The device must have sufficient wires for the qfunc"""
@qml.qnode(qubit_device_2_wires, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
qml.CNOT(wires=[0, 2])
return qml.expval(qml.PauliZ(0))
with pytest.raises(QuantumFunctionError, match='applied to invalid wire'):
qf(Variable(0.5))
def test_qnode_fails_on_combination_of_cv_and_qbit_ops(self, qubit_device_1_wire):
"""CV and discrete operations must not be mixed"""
@qml.qnode(qubit_device_1_wire, interface='tf')
def qf(x):
qml.RX(x, wires=[0])
qml.Displacement(0.5, 0, wires=[0])
return qml.expval(qml.PauliZ(0))
with pytest.raises(QuantumFunctionError, match='Continuous and discrete'):
qf(Variable(0.5))
def test_qnode_fails_for_cv_ops_on_qubit_device(self, qubit_device_1_wire):
"""A qubit device cannot execute CV operations"""
@qml.qnode(qubit_device_1_wire, interface='tf')
def qf(x):
qml.Displacement(0.5, 0, wires=[0])
return qml.expval(qml.X(0))
with pytest.raises(DeviceError, match='Gate [a-zA-Z]+ not supported on device'):
qf(Variable(0.5))
def test_qnode_fails_for_cv_observables_on_qubit_device(self, qubit_device_1_wire):
"""A qubit device cannot measure CV observables"""
@qml.qnode(qubit_device_1_wire, interface='tf')
def qf(x):
return qml.expval(qml.X(0))
with pytest.raises(DeviceError, match='Observable [a-zA-Z]+ not supported on device'):
qf(Variable(0.5))
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestTFQNodeParameterHandling:
"""Test that the TFQNode properly handles the parameters of qfuncs"""
def test_qnode_fanout(self, qubit_device_1_wire, tol):
"""Tests that qnodes can compute the correct function when the same parameter is used in multiple gates."""
@qml.qnode(qubit_device_1_wire, interface='tf')
def circuit(reused_param, other_param):
qml.RX(reused_param, wires=[0])
qml.RZ(other_param, wires=[0])
qml.RX(reused_param, wires=[0])
return qml.expval(qml.PauliZ(0))
thetas = tf.linspace(-2*np.pi, 2*np.pi, 7)
for reused_param in thetas:
for theta in thetas:
other_param = theta ** 2 / 11
y_eval = circuit(reused_param, other_param)
Rx = Rotx(reused_param.numpy())
Rz = Rotz(other_param.numpy())
zero_state = np.array([1.,0.])
final_state = (Rx @ Rz @ Rx @ zero_state)
y_true = expZ(final_state)
assert np.allclose(y_eval, y_true, atol=tol, rtol=0)
def test_qnode_array_parameters_scalar_return(self, qubit_device_1_wire, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow.
Test case for a circuit that returns a scalar."""
# The objective of this test is not to check if the results are correctly calculated,
# but to check that the interoperability of the different return types works.
@qml.qnode(qubit_device_1_wire, interface='tf')
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0,1], wires=0)
qml.RY(-0.5 * array[1,1], wires=0)
return qml.expval(qml.PauliX(0)) # returns a scalar
grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4))
cost_target = 1.03257
args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13))
def cost(x, array, y):
c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32)
return c +0.5*array[0,0] +x -0.4*y
with tf.GradientTape() as tape:
cost_res = cost(*args)
grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])])
assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0)
assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0)
def test_qnode_array_parameters_1_vector_return(self, qubit_device_1_wire, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow
Test case for a circuit that returns a 1-vector."""
# The objective of this test is not to check if the results are correctly calculated,
# but to check that the interoperability of the different return types works.
@qml.qnode(qubit_device_1_wire, interface='tf')
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0,1], wires=0)
qml.RY(-0.5 * array[1,1], wires=0)
return qml.expval(qml.PauliX(0)), # note the comma, returns a 1-vector
grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4))
cost_target = 1.03257
args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13))
def cost(x, array, y):
c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32)
c = c[0] # get a scalar
return c +0.5*array[0,0] +x -0.4*y
with tf.GradientTape() as tape:
cost_res = cost(*args)
grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])])
assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0)
assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0)
def test_qnode_array_parameters_2_vector_return(self, qubit_device_2_wires, tol):
"""Test that QNode can take arrays as input arguments, and that they interact properly with TensorFlow
Test case for a circuit that returns a 2-vector."""
# The objective of this test is not to check if the results are correctly calculated,
# but to check that the interoperability of the different return types works.
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(dummy1, array, dummy2):
qml.RY(0.5 * array[0,1], wires=0)
qml.RY(-0.5 * array[1,1], wires=0)
qml.RY(array[1,0], wires=1)
return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)) # returns a 2-vector
grad_target = (np.array(1.), np.array([[0.5, 0.43879, 0], [0, -0.43879, 0]]), np.array(-0.4))
cost_target = 1.03257
args = (Variable(0.46), Variable([[2., 3., 0.3], [7., 4., 2.1]]), Variable(-0.13))
def cost(x, array, y):
c = tf.cast(circuit(tf.constant(0.111), array, tf.constant(4.5)), tf.float32)
c = c[0] # get a scalar
return c +0.5*array[0,0] +x -0.4*y
with tf.GradientTape() as tape:
cost_res = cost(*args)
grad_res = np.array([i.numpy() for i in tape.gradient(cost_res, [args[0], args[2]])])
assert np.allclose(cost_res.numpy(), cost_target, atol=tol, rtol=0)
assert np.allclose(grad_res, np.fromiter(grad_target[::2], dtype=np.float32), atol=tol, rtol=0)
def test_array_parameters_evaluate(self, qubit_device_2_wires, tol):
"""Test that array parameters gives same result as positional arguments."""
a, b, c = tf.constant(0.5), tf.constant(0.54), tf.constant(0.3)
def ansatz(x, y, z):
qml.QubitStateVector(np.array([1, 0, 1, 1])/np.sqrt(3), wires=[0, 1])
qml.Rot(x, y, z, wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1))
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit1(x, y, z):
return ansatz(x, y, z)
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit2(x, array):
return ansatz(x, array[0], array[1])
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit3(array):
return ansatz(*array)
positional_res = circuit1(a, b, c)
array_res1 = circuit2(a, Variable([b, c]))
array_res2 = circuit3(Variable([a, b, c]))
assert np.allclose(positional_res.numpy(), array_res1.numpy(), atol=tol, rtol=0)
assert np.allclose(positional_res.numpy(), array_res2.numpy(), atol=tol, rtol=0)
def test_multiple_expectation_different_wires(self, qubit_device_2_wires, tol):
"""Tests that qnodes return multiple expectation values."""
a, b, c = Variable(0.5), Variable(0.54), Variable(0.3)
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(x, y, z):
qml.RX(x, wires=[0])
qml.RZ(y, wires=[0])
qml.CNOT(wires=[0, 1])
qml.RY(y, wires=[0])
qml.RX(z, wires=[0])
return qml.expval(qml.PauliY(0)), qml.expval(qml.PauliZ(1))
res = circuit(a, b, c)
out_state = np.kron(Rotx(c.numpy()), I) @ np.kron(Roty(b.numpy()), I) @ CNOT \
@ np.kron(Rotz(b.numpy()), I) @ np.kron(Rotx(a.numpy()), I) @ np.array([1, 0, 0, 0])
ex0 = np.vdot(out_state, np.kron(Y, I) @ out_state)
ex1 = np.vdot(out_state, np.kron(I, Z) @ out_state)
ex = np.array([ex0, ex1])
assert np.allclose(ex, res.numpy(), atol=tol, rtol=0)
def test_multiple_keywordargs_used(self, qubit_device_2_wires, tol):
"""Tests that qnodes use multiple keyword arguments."""
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(w, x=None, y=None):
qml.RX(x, wires=[0])
qml.RX(y, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
c = circuit(tf.constant(1.), x=np.pi, y=np.pi)
assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0)
def test_multidimensional_keywordargs_used(self, qubit_device_2_wires, tol):
"""Tests that qnodes use multi-dimensional keyword arguments."""
def circuit(w, x=None):
qml.RX(x[0], wires=[0])
qml.RX(x[1], wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
c = circuit(tf.constant(1.), x=[np.pi, np.pi])
assert np.allclose(c.numpy(), [-1., -1.], atol=tol, rtol=0)
def test_keywordargs_for_wires(self, qubit_device_2_wires, tol):
"""Tests that wires can be passed as keyword arguments."""
default_q = 0
def circuit(x, q=default_q):
qml.RY(x, wires=0)
return qml.expval(qml.PauliZ(q))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
c = circuit(tf.constant(np.pi), q=1)
assert np.allclose(c, 1., atol=tol, rtol=0)
c = circuit(tf.constant(np.pi))
assert np.allclose(c.numpy(), -1., atol=tol, rtol=0)
def test_keywordargs_used(self, qubit_device_1_wire, tol):
"""Tests that qnodes use keyword arguments."""
def circuit(w, x=None):
qml.RX(x, wires=[0])
return qml.expval(qml.PauliZ(0))
circuit = qml.QNode(circuit, qubit_device_1_wire).to_tf()
c = circuit(tf.constant(1.), x=np.pi)
assert np.allclose(c.numpy(), -1., atol=tol, rtol=0)
def test_mixture_numpy_tensors(self, qubit_device_2_wires, tol):
"""Tests that qnodes work with python types and tensors."""
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit(w, x, y):
qml.RX(x, wires=[0])
qml.RX(y, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
c = circuit(tf.constant(1.), np.pi, np.pi).numpy()
assert np.allclose(c, [-1., -1.], atol=tol, rtol=0)
def test_keywordarg_updated_in_multiple_calls(self, qubit_device_2_wires):
"""Tests that qnodes update keyword arguments in consecutive calls."""
def circuit(w, x=None):
qml.RX(w, wires=[0])
qml.RX(x, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
c1 = circuit(tf.constant(0.1), x=tf.constant(0.))
c2 = circuit(tf.constant(0.1), x=np.pi)
assert c1[1] != c2[1]
def test_keywordarg_passes_through_classicalnode(self, qubit_device_2_wires, tol):
"""Tests that qnodes' keyword arguments pass through classical nodes."""
def circuit(w, x=None):
qml.RX(w, wires=[0])
qml.RX(x, wires=[1])
return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliZ(1))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
def classnode(w, x=None):
return circuit(w, x=x)
c = classnode(tf.constant(0.), x=np.pi)
assert np.allclose(c.numpy(), [1., -1.], atol=tol, rtol=0)
def test_keywordarg_gradient(self, qubit_device_2_wires, tol):
"""Tests that qnodes' keyword arguments work with gradients"""
def circuit(x, y, input_state=np.array([0, 0])):
qml.BasisState(input_state, wires=[0, 1])
qml.RX(x, wires=[0])
qml.RY(y, wires=[0])
return qml.expval(qml.PauliZ(0))
circuit = qml.QNode(circuit, qubit_device_2_wires).to_tf()
x = 0.543
y = 0.45632
expected_grad = np.array([np.sin(x)*np.cos(y), np.sin(y)*np.cos(x)])
x_t = Variable(x)
y_t = Variable(y)
# test first basis state against analytic result
with tf.GradientTape() as tape:
c = circuit(x_t, y_t, input_state=np.array([0, 0]))
grads = np.array(tape.gradient(c, [x_t, y_t]))
assert np.allclose(grads, -expected_grad, atol=tol, rtol=0)
# test third basis state against analytic result
with tf.GradientTape() as tape:
c = circuit(x_t, y_t, input_state=np.array([1, 0]))
grads = np.array(tape.gradient(c, [x_t, y_t]))
assert np.allclose(grads, expected_grad, atol=tol, rtol=0)
# test first basis state via the default keyword argument against analytic result
with tf.GradientTape() as tape:
c = circuit(x_t, y_t)
grads = np.array(tape.gradient(c, [x_t, y_t]))
assert np.allclose(grads, -expected_grad, atol=tol, rtol=0)
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestIntegration:
"""Integration tests to ensure the TensorFlow QNode agrees with the NumPy QNode"""
def test_qnode_evaluation_agrees(self, qubit_device_2_wires, tol):
"""Tests that simple example is consistent."""
@qml.qnode(qubit_device_2_wires, interface='autograd')
def circuit(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit_tf(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
phi = [0.5, 0.1]
theta = [0.2]
phi_t = Variable(phi)
theta_t = Variable(theta)
autograd_eval = circuit(phi, theta)
tf_eval = circuit_tf(phi_t, theta_t)
assert np.allclose(autograd_eval, tf_eval.numpy(), atol=tol, rtol=0)
def test_qnode_gradient_agrees(self, qubit_device_2_wires, tol):
"""Tests that simple gradient example is consistent."""
@qml.qnode(qubit_device_2_wires, interface='autograd')
def circuit(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(qubit_device_2_wires, interface='tf')
def circuit_tf(phi, theta):
qml.RX(phi[0], wires=0)
qml.RY(phi[1], wires=1)
qml.CNOT(wires=[0, 1])
qml.PhaseShift(theta[0], wires=0)
return qml.expval(qml.PauliZ(0))
phi = [0.5, 0.1]
theta = [0.2]
phi_t = Variable(phi)
theta_t = Variable(theta)
dcircuit = qml.grad(circuit, [0, 1])
autograd_grad = dcircuit(phi, theta)
with tf.GradientTape() as g:
g.watch([phi_t, theta_t])
y = circuit_tf(phi_t, theta_t)
tf_grad = g.gradient(y, [phi_t, theta_t])
assert np.allclose(autograd_grad[0], tf_grad[0], atol=tol, rtol=0)
assert np.allclose(autograd_grad[1], tf_grad[1], atol=tol, rtol=0)
gradient_test_data = [
(0.5, -0.1),
(0.0, np.pi),
(-3.6, -3.6),
(1.0, 2.5),
]
@pytest.mark.usefixtures("skip_if_no_tf_support")
class TestTFGradients:
"""Integration tests involving gradients of QNodes and hybrid computations using the tf interface"""
@pytest.fixture
def qnodes(self):
"""Two QNodes to be used for the gradient tests"""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, interface="tf")
def f(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev, interface="tf")
def g(y):
qml.RY(y, wires=0)
return qml.expval(qml.PauliX(0))
return f, g
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_addition_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of addition of two QNode circuits"""
f, g = qnodes
def add(a, b):
return a + b
xt = Variable(x)
yt = Variable(y)
# addition
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = add(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1.0
assert grad[1].numpy() == 1.0
# same tensor added to itself
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
y = add(a, a)
grad = tape.gradient(y, [a, a])
assert grad[0].numpy() == 2.0
assert grad[1].numpy() == 2.0
# different qnodes with same input parameter added together
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(xt)
y = add(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1.0
assert grad[1].numpy() == 1.0
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_subtraction_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of subtraction of two QNode circuits"""
f, g = qnodes
def subtract(a, b):
return a - b
xt = Variable(x)
yt = Variable(y)
# subtraction
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = subtract(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1.0
assert grad[1].numpy() == -1.0
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_multiplication_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of multiplication of two QNode circuits"""
f, g = qnodes
def mult(a, b):
return a * b
xt = Variable(x)
yt = Variable(y)
# multiplication
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = mult(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == b.numpy()
assert grad[1].numpy() == a.numpy()
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_division_qnodes_gradient(self, qnodes, x, y, tol):
"""Test the gradient of division of two QNode circuits"""
f, g = qnodes
def div(a, b):
return a / b
xt = Variable(x)
yt = Variable(y)
# division
with tf.GradientTape() as tape:
tape.watch([xt, yt])
a = f(xt)
b = g(yt)
y = div(a, b)
grad = tape.gradient(y, [a, b])
assert grad[0].numpy() == 1 / b.numpy()
assert np.allclose(grad[1].numpy(), -a.numpy() / b.numpy() ** 2, atol=tol, rtol=0)
@pytest.mark.parametrize("x, y", gradient_test_data)
def test_composition_qnodes_gradient(self, qnodes, x, y):
"""Test the gradient of composition of two QNode circuits"""
f, g = qnodes
xt = Variable(x)
yt = Variable(y)
# compose function with xt as input
with tf.GradientTape() as tape:
tape.watch([xt])
y = f(xt)
grad1 = tape.gradient(y, xt)
with tf.GradientTape() as tape:
tape.watch([xt])
y = f(xt)
grad2 = tape.gradient(y, xt)
assert tf.equal(grad1, grad2)
# compose function with a as input
with tf.GradientTape() as tape:
tape.watch([xt])
a = f(xt)
y = f(a)
grad1 = tape.gradient(y, a)
with tf.GradientTape() as tape:
tape.watch([xt])
a = f(xt)
y = f(a)
grad2 = tape.gradient(y, a)
assert tf.equal(grad1, grad2)
# compose function with b as input
with tf.GradientTape() as tape:
tape.watch([xt])
b = g(xt)
y = g(b)
grad1 = tape.gradient(y, b)
with tf.GradientTape() as tape:
tape.watch([xt])
b = g(xt)
y = g(b)
grad2 = tape.gradient(y, b)
assert tf.equal(grad1, grad2)
| 35.577808 | 115 | 0.588924 | [
"MIT"
] | MattePalte/Bugs-Quantum-Computing-Platforms | artifacts/old_dataset_versions/minimal_commits/pennylane/pennylane#385/after/test_tf.py | 26,292 | Python |
from django import forms
from utilities.forms import BootstrapMixin, ExpandableIPAddressField
__all__ = (
'IPAddressBulkCreateForm',
)
class IPAddressBulkCreateForm(BootstrapMixin, forms.Form):
pattern = ExpandableIPAddressField(
label='Address pattern'
)
| 20 | 68 | 0.764286 | [
"Apache-2.0"
] | AS207960/netbox | netbox/ipam/forms/bulk_create.py | 280 | Python |
# Version of the library that will be used to upload to pypi
__version__ = "0.28.0.dev0"
# Git tag that will be checked to determine whether to trigger upload to pypi
__release_tag__ = None
| 31.833333 | 77 | 0.769634 | [
"Apache-2.0"
] | AKIRA-natsu/ml-agents | gym-unity/gym_unity/__init__.py | 191 | Python |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.distrib}.
"""
from os.path import abspath
from xml.dom.minidom import parseString
try:
import pwd
except ImportError:
pwd = None
from zope.interface.verify import verifyObject
from twisted.python import filepath, failure
from twisted.internet import reactor, defer
from twisted.trial import unittest
from twisted.spread import pb
from twisted.spread.banana import SIZE_LIMIT
from twisted.web import distrib, client, resource, static, server
from twisted.web.test.test_web import DummyRequest, DummyChannel
from twisted.web.test._util import _render
from twisted.test import proto_helpers
from twisted.web.http_headers import Headers
from twisted.logger import globalLogPublisher
class MySite(server.Site):
pass
class PBServerFactory(pb.PBServerFactory):
"""
A PB server factory which keeps track of the most recent protocol it
created.
@ivar proto: L{None} or the L{Broker} instance most recently returned
from C{buildProtocol}.
"""
proto = None
def buildProtocol(self, addr):
self.proto = pb.PBServerFactory.buildProtocol(self, addr)
return self.proto
class ArbitraryError(Exception):
"""
An exception for this test.
"""
class DistribTests(unittest.TestCase):
port1 = None
port2 = None
sub = None
f1 = None
def tearDown(self):
"""
Clean up all the event sources left behind by either directly by
test methods or indirectly via some distrib API.
"""
dl = [defer.Deferred(), defer.Deferred()]
if self.f1 is not None and self.f1.proto is not None:
self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None))
else:
dl[0].callback(None)
if self.sub is not None and self.sub.publisher is not None:
self.sub.publisher.broker.notifyOnDisconnect(
lambda: dl[1].callback(None))
self.sub.publisher.broker.transport.loseConnection()
else:
dl[1].callback(None)
if self.port1 is not None:
dl.append(self.port1.stopListening())
if self.port2 is not None:
dl.append(self.port2.stopListening())
return defer.gatherResults(dl)
def testDistrib(self):
# site1 is the publisher
r1 = resource.Resource()
r1.putChild(b"there", static.Data(b"root", "text/plain"))
site1 = server.Site(r1)
self.f1 = PBServerFactory(distrib.ResourcePublisher(site1))
self.port1 = reactor.listenTCP(0, self.f1)
self.sub = distrib.ResourceSubscription("127.0.0.1",
self.port1.getHost().port)
r2 = resource.Resource()
r2.putChild(b"here", self.sub)
f2 = MySite(r2)
self.port2 = reactor.listenTCP(0, f2)
agent = client.Agent(reactor)
url = "http://127.0.0.1:{}/here/there".format(
self.port2.getHost().port)
url = url.encode("ascii")
d = agent.request(b"GET", url)
d.addCallback(client.readBody)
d.addCallback(self.assertEqual, b'root')
return d
def _setupDistribServer(self, child):
"""
Set up a resource on a distrib site using L{ResourcePublisher}.
@param child: The resource to publish using distrib.
@return: A tuple consisting of the host and port on which to contact
the created site.
"""
distribRoot = resource.Resource()
distribRoot.putChild(b"child", child)
distribSite = server.Site(distribRoot)
self.f1 = distribFactory = PBServerFactory(
distrib.ResourcePublisher(distribSite))
distribPort = reactor.listenTCP(
0, distribFactory, interface="127.0.0.1")
self.addCleanup(distribPort.stopListening)
addr = distribPort.getHost()
self.sub = mainRoot = distrib.ResourceSubscription(
addr.host, addr.port)
mainSite = server.Site(mainRoot)
mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1")
self.addCleanup(mainPort.stopListening)
mainAddr = mainPort.getHost()
return mainPort, mainAddr
def _requestTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with the result of the request.
"""
mainPort, mainAddr = self._setupDistribServer(child)
agent = client.Agent(reactor)
url = "http://%s:%s/child" % (mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = agent.request(b"GET", url, **kwargs)
d.addCallback(client.readBody)
return d
def _requestAgentTest(self, child, **kwargs):
"""
Set up a resource on a distrib site using L{ResourcePublisher} and
then retrieve it from a L{ResourceSubscription} via an HTTP client.
@param child: The resource to publish using distrib.
@param **kwargs: Extra keyword arguments to pass to L{Agent.request} when
requesting the resource.
@return: A L{Deferred} which fires with a tuple consisting of a
L{twisted.test.proto_helpers.AccumulatingProtocol} containing the
body of the response and an L{IResponse} with the response itself.
"""
mainPort, mainAddr = self._setupDistribServer(child)
url = "http://{}:{}/child".format(mainAddr.host, mainAddr.port)
url = url.encode("ascii")
d = client.Agent(reactor).request(b"GET", url, **kwargs)
def cbCollectBody(response):
protocol = proto_helpers.AccumulatingProtocol()
response.deliverBody(protocol)
d = protocol.closedDeferred = defer.Deferred()
d.addCallback(lambda _: (protocol, response))
return d
d.addCallback(cbCollectBody)
return d
def test_requestHeaders(self):
"""
The request headers are available on the request object passed to a
distributed resource's C{render} method.
"""
requestHeaders = {}
logObserver = proto_helpers.EventLoggingObserver()
globalLogPublisher.addObserver(logObserver)
req = [None]
class ReportRequestHeaders(resource.Resource):
def render(self, request):
req[0] = request
requestHeaders.update(dict(
request.requestHeaders.getAllRawHeaders()))
return b""
def check_logs():
msgs = [e["log_format"] for e in logObserver]
self.assertIn('connected to publisher', msgs)
self.assertIn(
"could not connect to distributed web service: {msg}",
msgs
)
self.assertIn(req[0], msgs)
globalLogPublisher.removeObserver(logObserver)
request = self._requestTest(
ReportRequestHeaders(), headers=Headers({'foo': ['bar']}))
def cbRequested(result):
self.f1.proto.notifyOnDisconnect(check_logs)
self.assertEqual(requestHeaders[b'Foo'], [b'bar'])
request.addCallback(cbRequested)
return request
def test_requestResponseCode(self):
"""
The response code can be set by the request object passed to a
distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200)
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"OK")
request.addCallback(cbRequested)
return request
def test_requestResponseCodeMessage(self):
"""
The response code and message can be set by the request object passed to
a distributed resource's C{render} method.
"""
class SetResponseCode(resource.Resource):
def render(self, request):
request.setResponseCode(200, b"some-message")
return ""
request = self._requestAgentTest(SetResponseCode())
def cbRequested(result):
self.assertEqual(result[0].data, b"")
self.assertEqual(result[1].code, 200)
self.assertEqual(result[1].phrase, b"some-message")
request.addCallback(cbRequested)
return request
def test_largeWrite(self):
"""
If a string longer than the Banana size limit is passed to the
L{distrib.Request} passed to the remote resource, it is broken into
smaller strings to be transported over the PB connection.
"""
class LargeWrite(resource.Resource):
def render(self, request):
request.write(b'x' * SIZE_LIMIT + b'y')
request.finish()
return server.NOT_DONE_YET
request = self._requestTest(LargeWrite())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_largeReturn(self):
"""
Like L{test_largeWrite}, but for the case where C{render} returns a
long string rather than explicitly passing it to L{Request.write}.
"""
class LargeReturn(resource.Resource):
def render(self, request):
return b'x' * SIZE_LIMIT + b'y'
request = self._requestTest(LargeReturn())
request.addCallback(self.assertEqual, b'x' * SIZE_LIMIT + b'y')
return request
def test_connectionLost(self):
"""
If there is an error issuing the request to the remote publisher, an
error response is returned.
"""
# Using pb.Root as a publisher will cause request calls to fail with an
# error every time. Just what we want to test.
self.f1 = serverFactory = PBServerFactory(pb.Root())
self.port1 = serverPort = reactor.listenTCP(0, serverFactory)
self.sub = subscription = distrib.ResourceSubscription(
"127.0.0.1", serverPort.getHost().port)
request = DummyRequest([b''])
d = _render(subscription, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 500)
# This is the error we caused the request to fail with. It should
# have been logged.
errors = self.flushLoggedErrors(pb.NoSuchMethod)
self.assertEqual(len(errors), 1)
# The error page is rendered as HTML.
expected = [
b'',
b'<html>',
b' <head><title>500 - Server Connection Lost</title></head>',
b' <body>',
b' <h1>Server Connection Lost</h1>',
b' <p>Connection to distributed server lost:'
b'<pre>'
b'[Failure instance: Traceback from remote host -- '
b'twisted.spread.flavors.NoSuchMethod: '
b'No such method: remote_request',
b']</pre></p>',
b' </body>',
b'</html>',
b''
]
self.assertEqual([b'\n'.join(expected)], request.written)
d.addCallback(cbRendered)
return d
def test_logFailed(self):
"""
When a request fails, the string form of the failure is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
f = failure.Failure(ArbitraryError())
request = DummyRequest([b''])
issue = distrib.Issue(request)
issue.failed(f)
self.assertEquals(1, len(logObserver))
self.assertIn(
"Failure instance",
logObserver[0]["log_format"]
)
def test_requestFail(self):
"""
When L{twisted.web.distrib.Request}'s fail is called, the failure
is logged.
"""
logObserver = proto_helpers.EventLoggingObserver.createWithCleanup(
self,
globalLogPublisher
)
err = ArbitraryError()
f = failure.Failure(err)
req = distrib.Request(DummyChannel())
req.fail(f)
self.flushLoggedErrors(ArbitraryError)
self.assertEquals(1, len(logObserver))
self.assertIs(logObserver[0]["log_failure"], f)
class _PasswordDatabase:
def __init__(self, users):
self._users = users
def getpwall(self):
return iter(self._users)
def getpwnam(self, username):
for user in self._users:
if user[0] == username:
return user
raise KeyError()
class UserDirectoryTests(unittest.TestCase):
"""
Tests for L{UserDirectory}, a resource for listing all user resources
available on a system.
"""
def setUp(self):
self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh')
self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh')
self.database = _PasswordDatabase([self.alice, self.bob])
self.directory = distrib.UserDirectory(self.database)
def test_interface(self):
"""
L{UserDirectory} instances provide L{resource.IResource}.
"""
self.assertTrue(verifyObject(resource.IResource, self.directory))
def _404Test(self, name):
"""
Verify that requesting the C{name} child of C{self.directory} results
in a 404 response.
"""
request = DummyRequest([name])
result = self.directory.getChild(name, request)
d = _render(result, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
def test_getInvalidUser(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which does not correspond to any known
user.
"""
return self._404Test('carol')
def test_getUserWithoutResource(self):
"""
L{UserDirectory.getChild} returns a resource which renders a 404
response when passed a string which corresponds to a known user who has
neither a user directory nor a user distrib socket.
"""
return self._404Test('alice')
def test_getPublicHTMLChild(self):
"""
L{UserDirectory.getChild} returns a L{static.File} instance when passed
the name of a user with a home directory containing a I{public_html}
directory.
"""
home = filepath.FilePath(self.bob[-2])
public_html = home.child('public_html')
public_html.makedirs()
request = DummyRequest(['bob'])
result = self.directory.getChild('bob', request)
self.assertIsInstance(result, static.File)
self.assertEqual(result.path, public_html.path)
def test_getDistribChild(self):
"""
L{UserDirectory.getChild} returns a L{ResourceSubscription} instance
when passed the name of a user suffixed with C{".twistd"} who has a
home directory containing a I{.twistd-web-pb} socket.
"""
home = filepath.FilePath(self.bob[-2])
home.makedirs()
web = home.child('.twistd-web-pb')
request = DummyRequest(['bob'])
result = self.directory.getChild('bob.twistd', request)
self.assertIsInstance(result, distrib.ResourceSubscription)
self.assertEqual(result.host, 'unix')
self.assertEqual(abspath(result.port), web.path)
def test_invalidMethod(self):
"""
L{UserDirectory.render} raises L{UnsupportedMethod} in response to a
non-I{GET} request.
"""
request = DummyRequest([''])
request.method = 'POST'
self.assertRaises(
server.UnsupportedMethod, self.directory.render, request)
def test_render(self):
"""
L{UserDirectory} renders a list of links to available user content
in response to a I{GET} request.
"""
public_html = filepath.FilePath(self.alice[-2]).child('public_html')
public_html.makedirs()
web = filepath.FilePath(self.bob[-2])
web.makedirs()
# This really only works if it's a unix socket, but the implementation
# doesn't currently check for that. It probably should someday, and
# then skip users with non-sockets.
web.child('.twistd-web-pb').setContent(b"")
request = DummyRequest([''])
result = _render(self.directory, request)
def cbRendered(ignored):
document = parseString(b''.join(request.written))
# Each user should have an li with a link to their page.
[alice, bob] = document.getElementsByTagName('li')
self.assertEqual(alice.firstChild.tagName, 'a')
self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/')
self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)')
self.assertEqual(bob.firstChild.tagName, 'a')
self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/')
self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)')
result.addCallback(cbRendered)
return result
def test_passwordDatabase(self):
"""
If L{UserDirectory} is instantiated with no arguments, it uses the
L{pwd} module as its password database.
"""
directory = distrib.UserDirectory()
self.assertIdentical(directory._pwd, pwd)
if pwd is None:
test_passwordDatabase.skip = "pwd module required"
| 34.636364 | 83 | 0.61308 | [
"MIT"
] | 12123ads/learn_python3_spider | stackoverflow/venv/lib/python3.6/site-packages/twisted/web/test/test_distrib.py | 18,288 | Python |
from datetime import datetime
from chess_game.daos.player_dao import PlayerDao
from chess_game.models.player import Player
def test_player_dao_init(mongo_database):
player_dao = PlayerDao(mongo_database)
assert mongo_database == player_dao._mongo_database
def test_dao_create_and_find_player(mongo_database):
start_date = datetime.now()
player = Player(name="_Obi", stats={}, games=[], start_date=start_date)
player_dao = PlayerDao(mongo_database)
player_id = player_dao.create(player)
loaded_player = player_dao.find_by_id(player_id)
assert loaded_player['_id']
assert "_Obi" == loaded_player['name']
assert {} == loaded_player['stats']
assert [] == loaded_player['games']
assert f'{start_date:%Y-%m-%d %H:%M:%S}' == loaded_player['start_date']
def test_dao_create_and_find_players(mongo_database):
player = Player()
player_dao = PlayerDao(mongo_database)
player_dao.create(player)
player_id = player_dao.create(player)
loaded_players = player_dao.find_all()
assert len(loaded_players) > 1
assert len([player for player in loaded_players if player_id == str(player['_id'])])
| 31.432432 | 88 | 0.736028 | [
"MIT"
] | jrj92280/python-eve-backend | test/integration/daos/test_player_dao.py | 1,163 | Python |
import pathlib
from setuptools import setup
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="MCsniperPY",
version="0.20.6",
description="Minecraft name sniper written in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MCsniperPY/MCsniperPY",
author="Kqzz",
license="MIT",
packages=["mcsniperpy", "mcsniperpy.util", "mcsniperpy.util.classes"],
install_requires=["typer", "aiohttp", "colorama", "bs4"],
entry_points={"console_scripts": ["mcsniperpy=mcsniperpy.cli:cli"]},
python_requires=">=3.8",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License", # Again, pick a license
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 33.733333 | 75 | 0.652174 | [
"MIT"
] | tropicbliss/MCsniperPY | setup.py | 1,012 | Python |
from typing import List
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
m = len(grid)
if m == 0:
return 0
if m == 1:
return sum(grid[0])
n = len(grid[0])
dp = [[0] * n for _ in range(m)]
for x in range(n):
for y in range(m):
if x == y == 0:
dp[0][0] = grid[0][0]
elif x == 0:
dp[y][x] = dp[y - 1][x] + grid[y][x]
elif y == 0:
dp[y][x] = dp[y][x - 1] + grid[y][x]
else:
dp[y][x] = min(dp[y][x - 1], dp[y - 1][x]) + grid[y][x]
return dp[-1][-1]
s = Solution()
print(s.minPathSum([[1, 3, 1], [1, 5, 1], [4, 2, 1]]))
print(s.minPathSum([[1, 3, 1]]))
print(s.minPathSum([]))
print(s.minPathSum([[1, 2, 5], [3, 2, 1]]))
| 24.527778 | 75 | 0.383918 | [
"MIT"
] | Sanster/LeetCode | 064_minimum_path_sum.py | 883 | Python |
'''
This module hooks fast.ai Learners to Weights & Biases through a callback.
Requested logged data can be configured through the callback constructor.
Examples:
WandbCallback can be used when initializing the Learner::
```
from wandb.fastai import WandbCallback
[...]
learn = Learner(data, ..., callback_fns=WandbCallback)
learn.fit(epochs)
```
Custom parameters can be given using functools.partial::
```
from wandb.fastai import WandbCallback
from functools import partial
[...]
learn = Learner(data, ..., callback_fns=partial(WandbCallback, ...))
learn.fit(epochs)
```
Finally, it is possible to use WandbCallback only when starting
training. In this case it must be instantiated::
```
learn.fit(..., callbacks=WandbCallback(learn))
```
or, with custom parameters::
```
learn.fit(..., callbacks=WandbCallback(learn, ...))
```
'''
import wandb
import fastai
from fastai.callbacks import TrackerCallback
from pathlib import Path
import random
try:
import matplotlib
matplotlib.use('Agg') # non-interactive backend (avoid tkinter issues)
import matplotlib.pyplot as plt
except:
print('Warning: matplotlib required if logging sample image predictions')
class WandbCallback(TrackerCallback):
"""
Automatically saves model topology, losses & metrics.
Optionally logs weights, gradients, sample predictions and best trained model.
Args:
learn (fastai.basic_train.Learner): the fast.ai learner to hook.
log (str): "gradients", "parameters", "all", or None. Losses & metrics are always logged.
save_model (bool): save model at the end of each epoch. It will also load best model at the end of training.
monitor (str): metric to monitor for saving best model. None uses default TrackerCallback monitor value.
mode (str): "auto", "min" or "max" to compare "monitor" values and define best model.
input_type (str): "images" or None. Used to display sample predictions.
validation_data (list): data used for sample predictions if input_type is set.
predictions (int): number of predictions to make if input_type is set and validation_data is None.
seed (int): initialize random generator for sample predictions if input_type is set and validation_data is None.
"""
# Record if watch has been called previously (even in another instance)
_watch_called = False
def __init__(self,
learn,
log="gradients",
save_model=True,
monitor=None,
mode='auto',
input_type=None,
validation_data=None,
predictions=36,
seed=12345):
# Check if wandb.init has been called
if wandb.run is None:
raise ValueError(
'You must call wandb.init() before WandbCallback()')
# Adapted from fast.ai "SaveModelCallback"
if monitor is None:
# use default TrackerCallback monitor value
super().__init__(learn, mode=mode)
else:
super().__init__(learn, monitor=monitor, mode=mode)
self.save_model = save_model
self.model_path = Path(wandb.run.dir) / 'bestmodel.pth'
self.log = log
self.input_type = input_type
self.best = None
# Select items for sample predictions to see evolution along training
self.validation_data = validation_data
if input_type and not self.validation_data:
wandbRandom = random.Random(seed) # For repeatability
predictions = min(predictions, len(learn.data.valid_ds))
indices = wandbRandom.sample(range(len(learn.data.valid_ds)),
predictions)
self.validation_data = [learn.data.valid_ds[i] for i in indices]
def on_train_begin(self, **kwargs):
"Call watch method to log model topology, gradients & weights"
# Set self.best, method inherited from "TrackerCallback" by "SaveModelCallback"
super().on_train_begin()
# Ensure we don't call "watch" multiple times
if not WandbCallback._watch_called:
WandbCallback._watch_called = True
# Logs model topology and optionally gradients and weights
wandb.watch(self.learn.model, log=self.log)
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
"Logs training loss, validation loss and custom metrics & log prediction samples & save model"
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
print(
'Better model found at epoch {} with {} value: {}.'.format(
epoch, self.monitor, current))
self.best = current
# Save within wandb folder
with self.model_path.open('wb') as model_file:
self.learn.save(model_file)
# Log sample predictions if learn.predict is available
if self.validation_data:
try:
self._wandb_log_predictions()
except FastaiError as e:
wandb.termwarn(e.message)
self.validation_data = None # prevent from trying again on next loop
except Exception as e:
wandb.termwarn("Unable to log prediction samples.\n{}".format(e))
self.validation_data=None # prevent from trying again on next loop
# Log losses & metrics
# Adapted from fast.ai "CSVLogger"
logs = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] +
last_metrics))
}
wandb.log(logs)
def on_train_end(self, **kwargs):
"Load the best model."
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
if self.model_path.is_file():
with self.model_path.open('rb') as model_file:
self.learn.load(model_file, purge=False)
print('Loaded best saved model from {}'.format(
self.model_path))
def _wandb_log_predictions(self):
"Log prediction samples"
pred_log = []
for x, y in self.validation_data:
try:
pred=self.learn.predict(x)
except:
raise FastaiError('Unable to run "predict" method from Learner to log prediction samples.')
# scalar -> likely to be a category
if not pred[1].shape:
pred_log.append(
wandb.Image(
x.data,
caption='Ground Truth: {}\nPrediction: {}'.format(
y, pred[0])))
# most vision datasets have a "show" function we can use
elif hasattr(x, "show"):
# log input data
pred_log.append(
wandb.Image(x.data, caption='Input data', grouping=3))
# log label and prediction
for im, capt in ((pred[0], "Prediction"),
(y, "Ground Truth")):
# Resize plot to image resolution
# from https://stackoverflow.com/a/13714915
my_dpi = 100
fig = plt.figure(frameon=False, dpi=my_dpi)
h, w = x.size
fig.set_size_inches(w / my_dpi, h / my_dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# Superpose label or prediction to input image
x.show(ax=ax, y=im)
pred_log.append(wandb.Image(fig, caption=capt))
plt.close(fig)
# likely to be an image
elif hasattr(y, "shape") and (
(len(y.shape) == 2) or
(len(y.shape) == 3 and y.shape[0] in [1, 3, 4])):
pred_log.extend([
wandb.Image(x.data, caption='Input data', grouping=3),
wandb.Image(pred[0].data, caption='Prediction'),
wandb.Image(y.data, caption='Ground Truth')
])
# we just log input data
else:
pred_log.append(wandb.Image(x.data, caption='Input data'))
wandb.log({"Prediction Samples": pred_log}, commit=False)
class FastaiError(wandb.Error):
pass
| 37.944206 | 120 | 0.575274 | [
"Apache-2.0"
] | MPGek/client | wandb/fastai/__init__.py | 8,841 | Python |
import os
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from Cython.Distutils import build_ext
import numpy as np
from os.path import join as pjoin
from setup_cuda import cuda_setup
mpi_compile_args = os.popen("mpic++ --showme:compile").read().strip().split(' ')
mpi_link_args = os.popen("mpic++ --showme:link").read().strip().split(' ')
def find_in_path(name, path):
"""Find a file in a search path"""
# Adapted fom http://code.activestate.com/recipes/52224
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
nvcc = find_in_path('nvcc', os.environ['PATH'])
if isinstance(nvcc, str):
print('CUDA')
# setup(name='PackageName',
# author='Nina Herrmann',
# version='1.0',
# description='This is a package for Muesli',
# ext_modules=cythonize(cuda_setup.get_module()),
# cmdclass={'build_ext': cuda_setup.custom_build_ext()}
# )
else:
module = Extension('_da', sources=['da.cxx', 'da_wrap.cxx'],
include_dirs=[np.get_include(), 'src'],
library_dirs=['/usr/include/boost/'],
language="c++",
swig_opts=['-c++'],
libraries=['/usr/include/boost/chrono'],
extra_compile_args=(["-fopenmp"] + mpi_compile_args),
extra_link_args=(["-fopenmp"] + mpi_link_args)
)
setup(name='da',
author='Nina Herrmann',
version='1.0',
description='This is a package for Muesli',
ext_modules=[module],
py_modules=["da"]
)
| 33.333333 | 80 | 0.587895 | [
"MIT"
] | NinaHerrmann/muesli2py | swig_muesli/muesli/da/setup_da.py | 1,900 | Python |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
def length_normalize(matrix):
"""Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix
"""
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis]
def mean_center(matrix):
"""Performs mean centering across axis 0
Args:
matrix (np.ndarray): Input matrix that needs to be mean centered
"""
avg = np.mean(matrix, axis=0)
matrix -= avg
def reduce_dims(matrix, target_dim):
"""Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to.
"""
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix)
| 24.977778 | 78 | 0.685943 | [
"MIT"
] | 154King154/recommenders | reco_utils/recommender/geoimc/geoimc_utils.py | 1,124 | Python |
from common import IssueProcess, Common
from typing import Any, List
import os
# assignee dict which will be assigned to handle issues
_GO_OWNER = {'ArcturusZhang'}
# 'github assignee': 'token'
_ASSIGNEE_TOKEN_GO = {'ArcturusZhang': os.getenv('AZURESDK_BOT_TOKEN')}
class IssueProcessGo(IssueProcess):
pass
class Go(Common):
def __init__(self, issues, assignee_token, language_owner):
super(Go, self).__init__(issues, assignee_token, language_owner)
self.file_out_name = 'release_go_status.md'
def go_process(issues: List[Any]):
instance = Go(issues, _ASSIGNEE_TOKEN_GO, _GO_OWNER)
instance.run()
| 25.48 | 72 | 0.744113 | [
"MIT"
] | AFengKK/azure-sdk-for-python | scripts/release_helper/go.py | 637 | Python |
from airflow.hooks.base_hook import BaseHook
class AzureBlobStorageCredentials(BaseHook):
def __init__(self, conn_id="azure_blob_storage_default"):
self.conn_id = conn_id
def get_credentials(self):
connection_object = self.get_connection(self.conn_id)
extras = connection_object.extra_dejson
credentials = dict()
if connection_object.login:
credentials["account_name"] = connection_object.login
if connection_object.password:
credentials["account_key"] = connection_object.password
credentials.update(extras)
return credentials
| 33.052632 | 67 | 0.710191 | [
"Apache-2.0"
] | FHoffmannCode/dbnd | modules/dbnd-airflow/src/dbnd_airflow_contrib/credentials_helper_azure.py | 628 | Python |
# Generated by Django 3.2.3 on 2021-05-19 08:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20210519_0849'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='year',
field=models.CharField(blank=True, choices=[('FR', 'Freshman'), ('SO', 'Sophomore'), ('JR', 'Junior'), ('SR', 'Senior')], default='FR', max_length=2, verbose_name='year'),
),
]
| 27.052632 | 183 | 0.589494 | [
"MIT"
] | EricLiclair/testapi | api/migrations/0003_alter_profile_year.py | 514 | Python |
# -*- coding: utf-8 -*-
import io
import sys
import textwrap
from itertools import chain
from pprint import pprint
import pytest
import canmatrix.canmatrix
import canmatrix.formats.sym
def test_colliding_mux_values():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[MuxedId]
ID=0h
Mux=TheMux 0,1 0h
Var=Signal unsigned 1,1
[MuxedId]
Mux=FirstMux 0,1 1h
Var=Signal unsigned 1,1
[MuxedId]
Mux=SecondMux 0,1 1h
Var=Signal unsigned 1,1
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
error, = matrix.load_errors
line_number = 16
assert len(matrix.load_errors) == 1
assert isinstance(error, canmatrix.formats.sym.DuplicateMuxIdError)
assert error.line_number == line_number
error_string = str(error)
assert error_string.startswith(
'line {line_number}: '.format(line_number=line_number),
)
assert 'FirstMux' in error_string
assert 'SecondMux' in error_string
def test_parse_longname_with_colon():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="a file"
{SEND}
[pass]
DLC=8
Var=Password unsigned 16,16 /ln:"Access Level : Password"
''',
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.attributes['LongName'] == 'Access Level : Password'
@pytest.mark.parametrize(
'is_float, value, expected',
(
(False, '37', '37'),
(True, '37.1', '37.1'),
),
)
def test_export_default_decimal_places(is_float, value, expected):
matrix = canmatrix.canmatrix.CanMatrix()
frame = canmatrix.canmatrix.Frame()
matrix.add_frame(frame)
signal = canmatrix.canmatrix.Signal(
size=32,
is_float=is_float,
is_signed=False,
initial_value=value,
)
frame.add_signal(signal)
s = canmatrix.formats.sym.create_signal(db=matrix, signal=signal)
start = '/d:'
d, = (
segment
for segment in s.split()
if segment.startswith(start)
)
d = d[len(start):]
assert d == expected
@pytest.mark.parametrize(
'variable_type, bit_length',
(
('float', 32),
('double', 64),
)
)
def tests_parse_float(variable_type, bit_length):
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled"
{{SENDRECEIVE}}
[Symbol1]
ID=000h
DLC=8
Var=a_signal {variable_type} 0,{bit_length}
'''.format(
variable_type=variable_type,
bit_length=bit_length,
),
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == []
frame = matrix.frames[0]
signal = frame.signals[0]
assert signal.is_float
def test_unterminated_enum():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="Untitled
{ENUMS}
enum Categories(0="Animal", 1="Vegetable", 3="Mineral"
{SENDRECEIVE}
[Symbol1]
ID=000h
DLC=8
Var=Signal unsigned 0,16
'''
).encode('utf-8'),
)
# Missing ')' at the end of enum used to cause infinite loop
matrix = canmatrix.formats.sym.load(f)
assert len(matrix.load_errors) == 1
if sys.version_info > (3, 0):
assert isinstance(matrix.load_errors[0], EOFError)
else:
assert isinstance(matrix.load_errors[0], StopIteration)
def test_title_read_and_write():
f = io.BytesIO(
textwrap.dedent(
'''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
'''
).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.attribute("Title") == "An Example Title"
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
assert f_out.getvalue().decode('utf-8').splitlines()[1] == 'Title="An Example Title"'
@pytest.mark.parametrize(
'enum_str, enum_dict, enum_label',
(
('enum Animal(0="Dog", 1="Cat", 2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Simple enum"),
('''\
enum Animal(0="Dog", //A Comment
1="Cat",
2="Fox")''',
{"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Multiline enum"),
('enum Animal(0="Dog",1="Cat",2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "No Space in Separator"),
)
)
def test_enums_read(enum_str, enum_dict, enum_label):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{{ENUMS}}
{}
'''.format(enum_str).encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label)
assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label)
def test_enums_export():
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="An Example Title"
{ENUMS}
enum Animal(0="Dog",1="Cat",2="Fox")
{SENDRECEIVE}
[Frame1]
ID=000h
DLC=8
Var=Signal1 unsigned 0,16
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
assert matrix.load_errors == [], "Failed to load canmatrix"
# Add an enum to Signal1
matrix.frame_by_name("Frame1").signal_by_name("Signal1").enumeration = "Plants"
matrix.frame_by_name("Frame1").signal_by_name("Signal1").values = {0: "Grass", 1: "Flower", 2: "Tree"}
# Export and reimport
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_in = io.BytesIO(f_out.getvalue())
new_matrix = canmatrix.formats.sym.load(f_in)
# Check that Enums from Enums table exported and reimported correctly
assert new_matrix.value_tables["Animal"] == {0: "Dog", 1: "Cat", 2: "Fox"}
# Check that Enums from a Signal.Values property exported and reimported correctly
assert new_matrix.value_tables["Plants"] == {0: "Grass", 1: "Flower", 2: "Tree"}
def test_types_read():
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="Types Test"
{ENUMS}
enum EnumAnimals(0="Cat", // An enum value for cats
1="Dog", // An enum value for dogs
2="Horse", 3="Monkey",
4="Lion")// An enum with a comment for the final value
{SENDRECEIVE}
[SymbolLengths]
ID=000h
DLC=8
Var="1Bit" unsigned 0,1
Var="3Bits" unsigned 1,3
Var="4Bits" unsigned 4,4
Var="21Bits" unsigned 8,21
Var="6Bits" unsigned 29,6
Var="29Bits" unsigned 35,29
[SymbolTypes]
ID=001h
DLC=8
Var=Bit bit 0,1
Var=Char char 1,8
Var=String string 16,16
Var=Signed signed 32,4
Var=Unsigned unsigned 36,4
Var=Enum EnumAnimals 40,4
Var=Raw raw 48,16
[SymbolDouble]
ID=002h
DLC=8
Var=Double double 0,64 // Must be 8 Bytes according to PCAN Symbol Editor V5
[SymbolFloat]
ID=003h
DLC=4
Var=Float float 0,32 // Must be 4 Bytes according to PCAN Symbol Editor V5
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
# Check no errors loading the matrix
assert matrix.load_errors == []
f_out = io.BytesIO()
canmatrix.formats.sym.dump(matrix, f_out)
f_out_bytes = f_out.getvalue()
f_out_string = f_out_bytes.decode("utf-8")
# Check that types are preserved when saving back to .SYM format
assert "Var=Bit bit" in f_out_string
assert "Var=Char char" in f_out_string
assert "Var=String string" in f_out_string
assert "Var=Signed signed" in f_out_string
assert 'Var="21Bits" unsigned' in f_out_string
assert 'Var=Float float' in f_out_string
assert 'Var=Double double' in f_out_string
# Read matrix back in to check all symbols/frames preserved
f_in = io.BytesIO(f_out_bytes)
new_matrix = canmatrix.formats.sym.load(f_in)
# Check no errors loading the matrix
assert new_matrix.load_errors == []
# Check that both matrices have the same Frames
frames = [f.name for f in matrix.frames]
new_frames = [f.name for f in new_matrix.frames]
assert sorted(frames) == sorted(new_frames)
# Check that both matrices have the same signals, and that all the expected signals are present
signals = chain(*[[s.name for s in frame.signals] for frame in matrix.frames])
new_signals = chain(*[[s.name for s in frame.signals] for frame in new_matrix.frames])
assert sorted(signals) == sorted(new_signals) == sorted([
"1Bit",
"3Bits",
"4Bits",
"21Bits",
"6Bits",
"29Bits",
"Bit",
"Char",
"String",
"Signed",
"Unsigned",
"Enum",
"Raw",
"Double",
"Float", ])
@pytest.mark.parametrize(
'var_name,data,raw_value',
(
('VarMux1', bytearray([1, 12, 0, 0, 0, 0, 0, 0]), 12),
('VarMux2', bytearray([2, 0, 0, 0, 23, 0, 0, 0]), 23),
('VarMux200', bytearray([200, 0, 0, 0, 0, 0, 34, 0]), 34),
)
)
def test_mux_decode(var_name,data,raw_value):
f = io.BytesIO('''\
FormatVersion=5.0 // Do not edit this line!
Title="Types Test"
FormatVersion=5.0 // Do not edit this line!
Title="Test Symbols File"
{SENDRECEIVE}
[MuxTestFrame]
ID=002h
DLC=8
Mux=Mux1 0,8 1
Var=VarMux1 unsigned 8,8
[MuxTestFrame]
DLC=8
Mux=Mux2 0,8 2
Var=VarMux2 unsigned 32,8
[MuxTestFrame]
DLC=8
Mux=Mux200 0,8 C8h
Var=VarMux200 unsigned 48,8
'''.encode('utf-8'),
)
matrix = canmatrix.formats.sym.load(f)
# Check no errors loading the matrix
assert matrix.load_errors == []
frame = matrix.frame_by_name("MuxTestFrame")
r = frame.decode(data)
assert var_name in r.keys(), "Signal {}, not decoded. Only : {}".format(var_name, ','.join(r for r in r.keys()))
assert r[var_name].raw_value == raw_value | 27.8175 | 119 | 0.560528 | [
"BSD-2-Clause"
] | AjinkyaPasalkar/canmatrix | src/canmatrix/tests/test_sym.py | 11,127 | Python |
#!/usr/bin/env python
import rospy
from apriltag_ros.msg import AprilTagDetectionArray
from apriltag_ros.msg import AprilTagDetection
from find_object_2d.msg import ObjectsStamped
import tf
import geometry_msgs.msg
objFramePrefix_ = "object"
distanceMax_ = 0.0
def callback(data):
global objFramePrefix_
global distanceMax_
if len(data.objects.data) > 0:
output = AprilTagDetectionArray()
output.header = data.header
for i in range(0,len(data.objects.data),12):
try:
objId = data.objects.data[i]
(trans,quat) = listener.lookupTransform(data.header.frame_id, objFramePrefix_+'_'+str(int(objId)), data.header.stamp)
tag = AprilTagDetection()
tag.id.append(objId)
tag.pose.pose.pose.position.x = trans[0]
tag.pose.pose.pose.position.y = trans[1]
tag.pose.pose.pose.position.z = trans[2]
tag.pose.pose.pose.orientation.x = quat[0]
tag.pose.pose.pose.orientation.y = quat[1]
tag.pose.pose.pose.orientation.z = quat[2]
tag.pose.pose.pose.orientation.w = quat[3]
tag.pose.header = output.header
if distanceMax_ <= 0.0 or trans[2] < distanceMax_:
output.detections.append(tag)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
if len(output.detections) > 0:
pub.publish(output)
if __name__ == '__main__':
pub = rospy.Publisher('tag_detections', AprilTagDetectionArray, queue_size=10)
rospy.init_node('objects_to_tags', anonymous=True)
rospy.Subscriber("objectsStamped", ObjectsStamped, callback)
objFramePrefix_ = rospy.get_param('~object_prefix', objFramePrefix_)
distanceMax_ = rospy.get_param('~distance_max', distanceMax_)
listener = tf.TransformListener()
rospy.spin()
| 41.744681 | 133 | 0.646789 | [
"BSD-3-Clause"
] | BrainGardenAI/rtabmap_ros | scripts/objects_to_tags.py | 1,962 | Python |
from django.forms.utils import flatatt
from django.utils.html import format_html, format_html_join
from django.utils.translation import gettext as _
from wagtail.core import blocks
from wagtail.core.blocks import PageChooserBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtailmarkdown.utils import render_markdown
from wagtailmedia.blocks import AbstractMediaChooserBlock
class MediaBlock(AbstractMediaChooserBlock):
def render_basic(self, value, context=None):
if not value:
return ''
video_not_supported_text = _("Your browser does not support video playback.")
audio_not_supported_text = _("Your browser does not support audio playback.")
# Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link.
download_video_text = _('If you cannot view the above video, you can'
' instead %(start_link)sdownload it%(end_link)s.') % {
'start_link': '<a href={2} download>',
'end_link': '</a>'
}
# Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link.
download_audio_text = _('If you cannot listen to the above audio, you can'
' instead %(start_link)sdownload it%(end_link)s.') % {
'start_link': '<a href={2} download>',
'end_link': '</a>'
}
if value.type == 'video':
player_code = '''
<div>
<video width="320" height="240" {1} controls>
{0}
''' + video_not_supported_text + '''
</video>
</div>
<p class='article__content--video'>''' + download_video_text + '''</p>
'''
else:
player_code = '''
<div>
<audio controls>
{0}
''' + audio_not_supported_text + '''
</audio>
</div>
<p class='article__content--audio'>''' + download_audio_text + '''</p>
'''
thumbnail = f'poster={value.thumbnail.url}' if value.thumbnail else ''
return format_html(player_code, format_html_join(
'\n', "<source{0}>",
[[flatatt(s)] for s in value.sources]
), thumbnail, value.url)
class SocialMediaLinkBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=255)
link = blocks.URLBlock()
image = ImageChooserBlock(template='blocks/image.html')
class Meta:
icon = 'site'
class SocialMediaShareButtonBlock(blocks.StructBlock):
platform = blocks.CharBlock(max_length=255)
is_active = blocks.BooleanBlock(required=False)
image = ImageChooserBlock(template='blocks/image.html', required=False)
class Meta:
icon = 'site'
class EmbeddedQuestionnaireChooserBlock(blocks.PageChooserBlock):
class Meta:
icon = 'form'
class EmbeddedQuestionnaireBlock(blocks.StructBlock):
direct_display = blocks.BooleanBlock(required=False)
class EmbeddedPollBlock(EmbeddedQuestionnaireBlock):
poll = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Poll')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
poll = value.get('poll')
if poll and poll.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': poll.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class EmbeddedSurveyBlock(EmbeddedQuestionnaireBlock):
survey = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Survey')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
survey = value.get('survey')
if survey and survey.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': survey.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class EmbeddedQuizBlock(EmbeddedQuestionnaireBlock):
quiz = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Quiz')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
quiz = value.get('quiz')
if quiz and quiz.live:
context.update({
'direct_display': value['direct_display'],
'questionnaire': quiz.specific,
})
return context
class Meta:
template = 'questionnaires/tags/questionnaire_wrapper.html'
class PageButtonBlock(blocks.StructBlock):
page = blocks.PageChooserBlock()
text = blocks.CharBlock(required=False, max_length=255)
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
button_page = value.get('page')
if button_page and button_page.live:
context.update({
'button_page': button_page.specific,
'text': value.get('text') or button_page.title
})
return context
class Meta:
template = 'blocks/page_button.html'
class ArticleBlock(blocks.StructBlock):
display_section_title = blocks.BooleanBlock(required=False)
article = PageChooserBlock(target_model='home.Article')
def get_context(self, value, parent_context=None):
context = super().get_context(value, parent_context)
article = value.get('article')
if article and article.live:
context.update({
'display_section_title': value['display_section_title'],
'article': article.specific,
})
return context
class Meta:
template = 'blocks/article.html'
class NumberedListBlock(blocks.ListBlock):
def render_basic(self, value, context=None):
children = format_html_join(
'\n', '<li>{0}</li>',
[
(self.child_block.render(child_value, context=context),)
for child_value in value
]
)
return format_html("<ol>{0}</ol>", children)
class RawHTMLBlock(blocks.RawHTMLBlock):
def render_basic(self, value, context=None):
result = super(RawHTMLBlock, self).render_basic(value, context)
return render_markdown(result)
class OfflineAppButtonBlock(blocks.StructBlock):
smartphone_text = blocks.CharBlock(
help_text=_('This text appears when it is possible for the user to install the app on their phone.'))
feature_phone_text = blocks.CharBlock(required=False,
help_text=_('This text appears when the user is using a feature phone and thus cannot install the app '
'(the button will be disabled in this case). [Currently not implemented]'))
offline_text = blocks.CharBlock(required=False,
help_text=_('This text appears when the user is navigating the site via the offline app and '
'thus it doesn\'t make sense to install the offline app again '
'(the button will be disabled in this case). [Currently not implemented]'))
class Meta:
template = 'blocks/offline_app_button.html'
| 38.086538 | 314 | 0.639611 | [
"BSD-2-Clause"
] | edmoss345/iogt | home/blocks.py | 7,922 | Python |
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
'''
#最长连续公共子串
l1=len(text1)
l2=len(text2)
if l1==0 or l2==0:
return 0
dp = [[0 for i in range(l2)] for i in range(l1)]
res = 0
if text1[0]==text2[0]:
dp[0][0]=1
res=1
for i in range(1,l2):
if text2[i]==text1[0]:
dp[0][i]=1
res=1
for i in range(1,l1):
if text1[i]==text2[0]:
dp[i][0]=1
res=1
for i in range(1,l1):
for j in range(1,l2):
if text1[i]==text2[j]:
dp[i][j]=dp[i-1][j-1]+1
res=max(res,dp[i][j])
return res
'''
'''
#最长子串(可不连续):其实就是在问text1[:i+1]和text2[:j+1]有多少个相同的字母
l1 = len(text1)
l2 = len(text2)
if l1 == 0 or l2 == 0:
return 0
dp = [[0 for i in range(l2)] for i in range(l1)]
if text1[0] == text2[0]:
dp[0][0] = 1
for i in range(1, l2):
if text2[i] == text1[0] or dp[0][0]==1 or dp[0][i-1]==1:
dp[0][i] = 1
for i in range(1, l1):
if text1[i] == text2[0] or dp[0][0]==1 or dp[i-1][0]==1:
dp[i][0] = 1
for i in range(1, l1):
for j in range(1, l2):
if text1[i] == text2[j]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j]=max(dp[i][j-1],dp[i-1][j])
return dp[-1][-1]
'''
#recursion
#exit case
if len(text1)==0 or len(text2)==0:
return 0
if text1[-1]==text2[-1]:
return 1+self.longestCommonSubsequence(text1[:-1],text2[:-1])
else:
return max(self.longestCommonSubsequence(text1[:-1],text2),self.longestCommonSubsequence(text1,text2[:-1]))
if __name__ == '__main__':
sol=Solution()
text1 ="ylqpejqbalahwr"
text2 ="yrkzavgdmdgtqpg"
# "hofubmnylkra"
# "pqhgxgdofcvmr"
print(sol.longestCommonSubsequence(text1,text2))
| 27.225 | 119 | 0.435721 | [
"Apache-2.0"
] | Rylie-W/LeetRecord | DP/Leetcode1143.py | 2,248 | Python |
from __future__ import print_function
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from constant import *
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderGRU(nn.Module):
def __init__(self,
vocab_size,emb_dim,emb,
hidden_dim,
nlayers,
pad_token,
bidir=False):
#emb---np wordVec vocab_size=len(emb)
super(EncoderGRU,self).__init__()
#self.word_emb=nn.Embedding(vocab_size,emb_dim,pad_token)
#self.word_emb.weight.data.copy_(torch.from_numpy(emb))
#self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
#self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
self.hidden_dim=hidden_dim
self.emb_dim=emb_dim+dimWPE*2
self.nlayers=nlayers
self.bidir=bidir
#using gru
self.gru=nn.GRU(
self.emb_dim//2 if bidir else self.emb_dim,
self.hidden_dim,
self.nlayers,
bidirectional=bidir,
batch_first=True
)
def forward(self,input_,pos1,pos2):
embd=self.word_emb(input_)
pos1=self.pos1_emb(pos1)
pos2=self.pos2_emb(pos2)
embd=torch.cat((embd,pos1,pos2),2)
#using gru
_,h_t_=self.encoder(embed)
h_t=torch.cat((h_t_[-1],h_t_[-2]),1)if self.bidir else h_t_[-1]
return h_t
class EncoderCNN(nn.Module):
def __init__(self,
vocab_size,emb,emb_dim=dimWE,
hidden_dim=dimC,lang=0):
#emb---np wordVec vocab_size=len(emb)
super(EncoderCNN,self).__init__()
self.lang=lang
self.word_emb=nn.Embedding(vocab_size,emb_dim)
self.word_emb.weight.data.copy_(torch.from_numpy(emb))
self.pos1_emb=nn.Embedding(MaxPos,dimWPE)
self.pos2_emb=nn.Embedding(MaxPos,dimWPE)
self.maxPooling=nn.MaxPool1d(SenLen[self.lang]-2)
self.emb_dim=emb_dim+dimWPE*2
self.hidden_dim=hidden_dim
#using CNN
self.tanh=nn.Tanh()
self.conv=nn.Conv1d(self.emb_dim,hidden_dim,filter_size)
self.dropout=nn.Dropout(p=CNNDropout)
def forward(self,inp,pos1,pos2):
Len=inp.size(0)
embd=self.word_emb(inp)
pos1=self.pos1_emb(pos1)
pos2=self.pos2_emb(pos2)
embd=torch.cat((embd,pos1,pos2),2).transpose(1,2)
conved=self.conv(embd)
pooled=self.maxPooling(conved).view(Len,dimC)
out=self.tanh(pooled)
return self.dropout(out)
class CNNEncoder(nn.Module):
def __init__(self,vocab_en,emb_en,vocab_zh,emb_zh):
super(CNNEncoder,self).__init__()
self.encoder_en=EncoderCNN(vocab_en,emb_en,dimWE,dimC,0)
self.encoder_zh=EncoderCNN(vocab_zh,emb_zh,dimWE,dimC,1)
def forward(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
return self.encoder_en(wordsEn,pos1En,pos2En),self.encoder_zh(wordsZh,pos1Zh,pos2Zh)
class Discriminator(nn.Module):
def __init__(self,
dis_input_dim=Encodered_dim,
nlayers=dis_layers,
hidden_dim=dis_hidden_dim,
input_dropout=dis_input_dropout,
dropout=dis_dropout):
super(Discriminator,self).__init__()
self.dis_input=dis_input_dim
layers=[nn.Dropout(input_dropout)]
for i in range(0,nlayers+1):
input_dim=self.dis_input if i==0 else hidden_dim
output_dim=1 if i==nlayers else hidden_dim
layers.append(nn.Linear(input_dim,output_dim))
if i<nlayers:
layers.append(nn.LeakyReLU(0.2))
layers.append(nn.Dropout(dropout))
layers.append(nn.Sigmoid())
self.layers=nn.Sequential(*layers)
def forward(self,inp):
assert inp.dim()==2 and inp.size(1)==self.dis_input
return self.layers(inp).view(-1)
class MultiRE(nn.Module):
def __init__(self):
super(MultiRE,self).__init__()
self.relation_emb=nn.Embedding(dimR,Encodered_dim)
self.dropout=nn.Dropout(p=Att_dropout)
#self.softmax=nn.Softmax()
#self.logsoftmax=nn.LogSoftmax()
self.M=nn.Linear(Encodered_dim,dimR)
def forward(self,inp_en,r_en,l_en,inp_zh,r_zh,l_zh,re_mask):
NumRe=r_en.size(0)
NumIn=l_zh.size(0)
relation_en=self.relation_emb(r_en)
relation_zh=self.relation_emb(r_zh)
attn_en=torch.sum(relation_en*inp_en,2)
attn_zh=torch.sum(relation_zh*inp_zh,2)
p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0))
L_en=0
L_zh=0
R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
for i in range(0,NumIn):
R_en=L_en+l_en[i].data[0]
R_zh=L_zh+l_zh[i].data[0]
if R_en>L_en and R_zh>L_zh:
Att=F.softmax(torch.cat((attn_en[:,L_en:R_en],attn_zh[:,L_zh:R_zh]),1),1)
S[i]=self.dropout(torch.matmul(Att,torch.cat((inp_en[L_en:R_en],inp_zh[L_zh:R_zh]),0)))
R_vec[i]=relation_en[:,L_en,:]
elif R_en>L_en:
Att=F.softmax(attn_en[:,L_en:R_en],1)
S[i]=self.dropout(torch.matmul(Att,inp_en[L_en:R_en]))
R_vec[i]=relation_en[:,L_en,:]
elif R_zh>L_zh:
Att=F.softmax(attn_zh[:,L_zh:R_zh],1)
S[i]=self.dropout(torch.matmul(Att,inp_zh[L_zh:R_zh]))
R_vec[i]=relation_zh[:,L_zh,:]
else:
print("ERR NO sentences")
exit()
L_en=R_en
L_zh=R_zh
p_n=F.log_softmax(self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1),2).view(NumIn,NumRe,dimR)
return p_n[re_mask].view(NumIn,NumRe)
class MonoRE(nn.Module):
def __init__(self):
super(MonoRE,self).__init__()
self.relation_emb=nn.Embedding(dimR,Encodered_dim)
self.dropout=nn.Dropout(p=Att_dropout)
#self.softmax=nn.Softmax()
#self.logsoftmax=nn.LogSoftmax()
self.M=nn.Linear(Encodered_dim,dimR)
def forward(self,inp,r,l,re_mask):
NumRe=r.size(0)
NumIn=l.size(0)
relation=self.relation_emb(r)
attn=torch.sum(relation*inp,2)
p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0))
L=0
R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0))
for i in range(0,NumIn):
R=L+l[i].data[0]
if R>L:
Att=F.softmax(attn[:,L:R],1)
S[i]=self.dropout(torch.matmul(Att,inp[L:R]))
R_vec[i]=relation[:,L,:]
L=R
p_n=F.log_softmax((self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1)),2).view(NumIn,NumRe,dimR)
return p_n[re_mask].view(NumIn,NumRe)
class AMRE(nn.Module):
def __init__(self,emb_en,emb_zh):
super(AMRE,self).__init__()
self.encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda()
self.enRE=MonoRE().cuda()
self.zhRE=MonoRE().cuda()
def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask):
inp_en,inp_zh=self.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
return self.enRE(inp_en,rEn,lEn,re_mask)+self.zhRE(inp_zh,rZh,lZh,re_mask)
class MARE(nn.Module):
def __init__(self,emb_en,emb_zh):
super(MARE,self).__init__()
self.D=Discriminator().cuda()
self.share_encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda()
self.multiRE=MultiRE().cuda()
self.monoRE=AMRE(emb_en,emb_zh)
def Orth_con(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh):
share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
mono_en,mono_zh=self.monoRE.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
share=torch.cat((share_en,share_zh),0)
mono=torch.cat((mono_en,mono_zh),0)
share-=torch.mean(share,0)
mono-=torch.mean(mono,0)
share=F.normalize(share,2,1)
mono=F.normalize(mono,2,1)
correlation_mat=torch.matmul(share.transpose(0,1),mono)
cost=torch.mean(correlation_mat*correlation_mat)
return cost
def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask):
share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh)
return self.monoRE(wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask)+self.multiRE(share_en,rEn,lEn,share_zh,rZh,lZh,re_mask)
| 43.422886 | 151 | 0.635655 | [
"MIT"
] | thunlp/AMNRE | CNN/src/models.py | 8,728 | Python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : utils_node.py
@Time : 2022/03/08 14:35:13
@Author : Jianwen Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2021-2022, SAIL-Lab
'''
######################################## import area ########################################
# common library
import os
import random
import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
from sklearn import metrics
from torch.optim.lr_scheduler import _LRScheduler
######################################## function area ########################################
def seed_everything(seed=2021):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def initialize_weights(model):
"""
Initializes the weights of a model in place.
:param model: An nn.Module.
"""
for param in model.parameters():
if param.dim() > 1:
nn.init.xavier_normal_(param)
def loop(data_loader, model, optimizer, scheduler, device):
batch_size = data_loader.batch_size
data_loader = tqdm(data_loader) if optimizer is not None else data_loader
loss_sum, y_true, y_pred = 0.0, list(), list()
for batch in data_loader:
smiles, mols, batch_node_features, batch_edge_features, batch_distance_matrix, labels = batch
# add mask
batch_masks = torch.sum(torch.abs(batch_node_features), dim=-1) != 0
# (batch, max_length, node_dim)
batch_node_features = batch_node_features.to(device)
# (batch, max_length, max_length, edge_dim)
batch_edge_features = batch_edge_features.to(device)
# (batch, max_length, max_length)
batch_distance_matrix = batch_distance_matrix.to(device)
# (batch, max_length)
batch_masks = batch_masks.to(device)
# (batch, max_length, 1)
labels = labels.to(device)
# (batch, max_length, 1)
outputs = model(batch_node_features, batch_edge_features, batch_distance_matrix, batch_masks, device)
# loss calculation
loss = cal_loss(y_true=labels, y_pred=outputs, device=device)
loss_sum += loss.item()
if optimizer is not None:
# clear gradients for this training step
optimizer.zero_grad()
# back propagation, compute gradients
loss.backward()
# apply gradients
optimizer.step()
# NormLR need step every batch
if scheduler is not None:
scheduler.step()
# collect result
labels = labels.detach().cpu().numpy()
outputs = outputs.detach().cpu().numpy()
y_true.append([])
y_pred.append([])
for label, output in zip(labels, outputs):
label, output = label.flatten(), output.flatten()
for l, o in zip(label, output):
if l != 0.0:
y_true[-1].append(l)
y_pred[-1].append(o)
# clear cuda cache
torch.cuda.empty_cache()
# metric calculation
results = cal_metric(y_true=y_true, y_pred=y_pred)
results['loss'] = loss_sum / (len(data_loader) * batch_size)
return results
def cal_loss(y_true, y_pred, device):
y_true, y_pred = y_true.flatten(), y_pred.flatten()
y_mask = torch.where(y_true != 0.0, torch.full_like(y_true, 1), torch.full_like(y_true, 0))
loss = torch.sum(torch.abs(y_true - y_pred) * y_mask) / torch.sum(y_mask)
return loss
def cal_metric(y_true, y_pred):
concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1)
mae = metrics.mean_absolute_error(concatenate_true, concatenate_pred)
r2 = metrics.r2_score(concatenate_true, concatenate_pred)
return {'mae':mae, 'r2':r2}
class NoamLR(_LRScheduler):
"""
Noam learning rate scheduler with piecewise linear increase and exponential decay.
The learning rate increases linearly from init_lr to max_lr over the course of
the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch).
Then the learning rate decreases exponentially from max_lr to final_lr over the
course of the remaining total_steps - warmup_steps (where total_steps =
total_epochs * steps_per_epoch). This is roughly based on the learning rate
schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762).
"""
def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr):
"""
Initializes the learning rate scheduler.
:param optimizer: A PyTorch optimizer.
:param warmup_epochs: The number of epochs during which to linearly increase the learning rate.
:param total_epochs: The total number of epochs.
:param steps_per_epoch: The number of steps (batches) per epoch.
:param init_lr: The initial learning rate.
:param max_lr: The maximum learning rate (achieved after warmup_epochs).
:param final_lr: The final learning rate (achieved after total_epochs).
"""
assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr)
self.num_lrs = len(optimizer.param_groups)
self.optimizer = optimizer
self.warmup_epochs = np.array(warmup_epochs)
self.total_epochs = np.array(total_epochs)
self.steps_per_epoch = steps_per_epoch
self.init_lr = np.array(init_lr)
self.max_lr = np.array(max_lr)
self.final_lr = np.array(final_lr)
self.current_step = 0
self.lr = init_lr
self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int)
self.total_steps = self.total_epochs * self.steps_per_epoch
self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps
self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps))
super(NoamLR, self).__init__(optimizer)
def get_lr(self):
"""Gets a list of the current learning rates."""
return list(self.lr)
def step(self, current_step: int = None):
"""
Updates the learning rate by taking a step.
:param current_step: Optionally specify what step to set the learning rate to.
If None, current_step = self.current_step + 1.
"""
if current_step is not None:
self.current_step = current_step
else:
self.current_step += 1
for i in range(self.num_lrs):
if self.current_step <= self.warmup_steps[i]:
self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i]
elif self.current_step <= self.total_steps[i]:
self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i]))
else: # theoretically this case should never be reached since training should stop at total_steps
self.lr[i] = self.final_lr[i]
self.optimizer.param_groups[i]['lr'] = self.lr[i]
| 37.331658 | 133 | 0.63481 | [
"MIT"
] | jcchan23/SAIL | Repeat/CoMPT/utils_node.py | 7,429 | Python |
from djangocms_style.cms_plugins import StylePlugin
from cms.plugin_pool import plugin_pool
from django.utils.translation import gettext_lazy as _
from .models import TaccsiteSection
# Plugins
@plugin_pool.register_plugin
class TaccsiteSectionPlugin(StylePlugin):
"""
Patterns > "Section" Plugin
https://confluence.tacc.utexas.edu/x/c5TtDg
"""
module = 'TACC Site'
model = TaccsiteSection
name = _('Section')
# Copied from djangocms_style sans 'Inline style settings'
# FAQ: If user wants to override spacing, they may:
# - use Style plugin (if they have permission)
# - request Design & Dev standardize use case
# https://github.com/django-cms/djangocms-style/blob/3.0.0/djangocms_style/cms_plugins.py#L15-L40
fieldsets = (
(None, {
'fields': (
'label',
('class_name', 'tag_type'),
)
}),
(_('Advanced settings'), {
'classes': ('collapse',),
'fields': (
'additional_classes',
'id_name',
'template',
'attributes',
),
}),
)
| 28.119048 | 101 | 0.58171 | [
"BSD-2-Clause"
] | tacc-wbomar/Core-CMS-Plugin-Section | djangocms_tacc_section/cms_plugins.py | 1,181 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Location,Category,Image
# Register your models here.
admin.site.register(Location)
admin.site.register(Category)
admin.site.register(Image)
class Image(admin.ModelAdmin):
search_fields = ('image_category') | 23.571429 | 43 | 0.778788 | [
"MIT"
] | leigh90/TheLumiere | shots/admin.py | 330 | Python |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
Module: Collect/SRTM
Description:
This module downloads DEM data from http://earlywarning.usgs.gov/hydrodata/.
Use the DEM functions to download and create DEM images in Gtiff format.
Examples:
from pyWAPOR.Collect import SRTM
SRTM.DEM(Dir='C:/TempDEM4/', latlim=[29, 32], lonlim=[-113, -109])
"""
from .DEM import main as DEM
__all__ = ['DEM']
__version__ = '0.1'
| 20.95 | 76 | 0.711217 | [
"Apache-2.0"
] | DHI-GRAS/wapor-et-look | pyWAPOR/Collect/SRTM/__init__.py | 419 | Python |
"""
Selects a matplotlib backend so you can run without a GUI/tkinter. Supports:
- PyQt5
- PySide2
- WX
- Tkinter
"""
from pyNastran.gui import IS_DEV
if IS_DEV:
# there is no interactive backend when testing on TravisCI
matplotlib_backend = 'Agg'
else:
# fails if using the terminal and PyQt/PySide & qtpy are installed
# how do I check if there is a terminal vs just running in command line?
#
try:
from pyNastran.gui.qt_version import qt_int
matplotlib_backend = 'Qt%iAgg' % qt_int
except ImportError:
try:
# hasn't been tested on a machine without a backend...
# default matplotlib backend
import tkinter
matplotlib_backend = 'tkAgg'
except ImportError:
# no-gui backend
matplotlib_backend = 'Agg'
| 28.965517 | 77 | 0.642857 | [
"BSD-3-Clause"
] | 214929177/pyNastran | pyNastran/gui/matplotlib_backend.py | 840 | Python |
# The new config inherits a base config to highlight the necessary modification
_base_ = '../retinanet_r50_fpn_1x_coco.py'
# We also need to change the num_classes in head to match the dataset's annotation
model = dict(
pretrained=None,
)
# Modify dataset related settings
dataset_type = 'COCODataset'
classes = ('Cấm ngược chiều', 'Cấm dừng và đỗ', 'Cấm rẽ', 'Giới hạn tốc độ', 'Cấm còn lại', 'Nguy hiểm', 'Hiệu lệnh')
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
data = dict(
samples_per_gpu=2, # Batch size of a single GPU
workers_per_gpu=2, # Worker to pre-fetch data for each single GPU
train=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/train.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(1622, 622),
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
),
val=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/val.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1622, 622),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
),
test=dict(
classes=classes,
img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/images/',
ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/test.json',
pipeline= [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1622, 622),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
),
)
| 37.493976 | 117 | 0.54563 | [
"Apache-2.0"
] | tuanphan09/mmdetection | configs/retinanet/traffic_sign/retinanet_r50_fpn_1x_traffic_sign.py | 3,151 | Python |
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test header version."""
import unittest
import os
import fnmatch
ignoredProtos = [
'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba/clients/studio/plugins/ThymioVPL/UsageProfile.proto',
'projects/samples/tutorials/protos/FourWheelsRobot.proto'
]
skippedDirectories = [
'dependencies',
'distribution',
'.git'
]
class TestHeaderVersion(unittest.TestCase):
"""Unit test of the PROTO and world headers."""
def setUp(self):
"""Get all the PROTO files to be tested."""
# 1. Get Webots version (without revision)
self.version = None
with open(os.environ['WEBOTS_HOME'] + os.sep + 'resources' + os.sep + 'version.txt') as file:
content = file.read()
self.version = content.splitlines()[0].strip().split()[0]
# 2. Get all the PROTO files
self.files = []
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.proto'):
proto = os.path.join(rootPath, fileName)
shouldIgnore = False
for ignoredProto in ignoredProtos:
path = os.environ['WEBOTS_HOME'] + os.sep + ignoredProto.replace('/', os.sep)
if proto == path:
shouldIgnore = True
break
if not shouldIgnore:
self.files.append((proto, '#VRML_SIM %s utf8' % self.version))
# 3. Get all the world files
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbt'):
world = os.path.join(rootPath, fileName)
self.files.append((world, '#VRML_SIM %s utf8' % self.version))
# 4. Get all the .wbproj files
for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']):
dirNames[:] = [d for d in dirNames if d not in skippedDirectories]
for fileName in fnmatch.filter(fileNames, '*.wbproj'):
projFile = os.path.join(rootPath, fileName)
self.files.append((projFile, 'Webots Project File version %s' % self.version))
def test_header_version(self):
"""Test that the PROTO and world files have the correct header."""
for currentFile in self.files:
fileToTest = currentFile[0]
with open(fileToTest) as file:
content = file.read()
if content == '':
continue
line = content.splitlines()[0].strip()
self.assertTrue(
line.startswith(currentFile[1]),
msg='Wrong header in file: "%s"' % fileToTest
)
if __name__ == '__main__':
unittest.main()
| 40.943182 | 120 | 0.611157 | [
"Apache-2.0"
] | junjihashimoto/webots | tests/sources/test_header_version.py | 3,603 | Python |
import sys, imp, atexit, os
sys.path.append("/home/courses/cs3214/software/pexpect-dpty/");
import pexpect, shellio, signal, time, os, re, proc_check
# Determine the path this file is in
thisdir = os.path.dirname(os.path.realpath(__file__))
#Ensure the shell process is terminated
def force_shell_termination(shell_process):
c.close(force=True)
# pulling in the regular expression and other definitions
# this should be the eshoutput.py file of the hosting shell, see usage above
definitions_scriptname = sys.argv[1]
def_module = imp.load_source('', definitions_scriptname)
# you can define logfile=open("log.txt", "w") in your eshoutput.py if you want logging!
logfile = None
if hasattr(def_module, 'logfile'):
logfile = def_module.logfile
#spawn an instance of the shell, note the -p flags
c = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile, args=['-p', thisdir])
atexit.register(force_shell_termination, shell_process=c)
# set timeout for all following 'expect*' calls to 5 seconds
c.timeout = 5
#############################################################################
#
# Actual Test
assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (1)"
c.sendline("systemInfo")
assert c.expect('------------------------------------------------\r\n') == 0, "Shell did not print out expected values";
assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (2)"
shellio.success()
| 34.714286 | 120 | 0.687243 | [
"MIT"
] | mikefeneley/school | Systems/esh-spring-2015.git/src/plugins/systemInfo_test.py | 1,458 | Python |
import os
import numpy as np
import pandas as pd
from sklearn.datasets.samples_generator import make_swiss_roll
import torch
import torchvision
from torchvision import transforms
import glob
import random
import config as cfg
import utils.metadata as meta
from . import csv_loader
from . import img_loader
# Datasets
# pytorch.org/docs/master/torchvision/datasets.html
# https://github.com/bfortuner/pytorch-cheatsheet/blob/master/pytorch-cheatsheet.ipynb
def get_iris_data():
fpath = "../data/iris.csv"
url = "https://raw.githubusercontent.com/pydata/pandas/master/pandas/tests/data/iris.csv"
df = csv_loader.load_or_download_df(fpath, url)
return df
def get_sin_data():
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
return X,y
def get_housing_data():
# https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html
fpath = "../data/housing.csv"
url = "https://raw.githubusercontent.com/ggallo/boston-housing/master/housing.csv"
df = csv_loader.load_or_download_df(fpath, url)
return df
def get_advertising_data():
fpath = "../data/advertising.csv"
url = "http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv"
df = csv_loader.load_or_download_df(fpath, url)
df = df.drop(df.columns[0], axis=1)
return df
def get_swiss_roll_data(n_samples=1000):
noise = 0.2
X, _ = make_swiss_roll(n_samples, noise)
X = X.astype('float32')[:, [0, 2]]
return X, _
def get_swiss_roll_loader(n_samples=1000):
X, _ = get_swiss_roll_data(n_samples)
dataset = torch.utils.data.dataset.TensorDataset(
torch.FloatTensor(X), torch.FloatTensor(_))
loader = torch.utils.data.dataloader.DataLoader(
dataset, batch_size=100, shuffle=True)
return loader
def get_mnist_loader():
MNIST_MEAN = np.array([0.1307,])
MNIST_STD = np.array([0.3081,])
normTransform = transforms.Normalize(MNIST_MEAN, MNIST_STD)
trainTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = torchvision.datasets.MNIST(root='../data', train=True,
download=True, transform=trainTransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,
shuffle=True, num_workers=2)
testset = torchvision.datasets.MNIST(root='../data', train=False,
download=True, transform=testTransform)
testloader = torch.utils.data.DataLoader(testset, batch_size=128,
shuffle=False, num_workers=2)
return trainloader, testloader
def get_cifar_loader():
# https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py
CIFAR_MEAN = np.array([0.49139968, 0.48215827, 0.44653124])
CIFAR_STD = np.array([0.24703233, 0.24348505, 0.26158768])
normTransform = transforms.Normalize(CIFAR_MEAN, CIFAR_STD)
trainTransform = transforms.Compose([
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
trainset = torchvision.datasets.CIFAR10(root='../data', train=True,
download=True, transform=trainTransform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='../data', train=False,
download=True, transform=testTransform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, classes
def get_catsdogs_loader(imgs_dir):
# Need to download Kaggle cats/dogs competition
# And move ALL images into single directory
classes = ['cat','dog']
class_to_idx, idx_to_class = meta.get_key_int_maps(classes)
def get_targs_from_fpaths(fpaths):
targs = []
for fpath in fpaths:
classname = fpath.split('/')[-1].split('.')[0]
# For one-hot sigmoid
#targ = meta.onehot_encode_class(
# class_to_idx, classname)
targs.append(class_to_idx[classname])
return targs
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainTransform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
testTransform = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
fpaths = glob.glob(imgs_dir + '*.jpg')
random.shuffle(fpaths)
trn_fpaths = fpaths[:20000]
val_fpaths = fpaths[20000:]
trn_targs = get_targs_from_fpaths(trn_fpaths)
val_targs = get_targs_from_fpaths(val_fpaths)
img_reader = 'pil'
trn_dataset = FileDataset(
trn_fpaths, img_reader, trn_targs, trainTransform)
val_dataset = FileDataset(
val_fpaths, img_reader, val_targs, testTransform)
trn_loader = torch.utils.data.DataLoader(
trn_dataset, batch_size=64,
shuffle=True, num_workers=4)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=64,
shuffle=False, num_workers=2)
return trn_loader, val_loader, classes
loaders = {
'pil': img_loader.pil_loader,
'tns': img_loader.tensor_loader,
'npy': img_loader.numpy_loader,
'io': img_loader.io_loader
}
class FileDataset(torch.utils.data.Dataset):
def __init__(self, fpaths,
img_loader='pil',
targets=None,
transform=None,
target_transform=None):
self.fpaths = fpaths
self.loader = self._get_loader(img_loader)
self.targets = targets
self.transform = transform
self.target_transform = target_transform
def _get_loader(self, loader_type):
return loaders[loader_type]
def _get_target(self, index):
if self.targets is None:
return 1
target = self.targets[index]
if self.target_transform is not None:
return self.target_transform(target)
return int(target)
def _get_input(self, index):
img_path = self.fpaths[index]
img = self.loader(img_path)
if self.transform is not None:
img = self.transform(img)
return img
def __getitem__(self, index):
input_ = self._get_input(index)
target = self._get_target(index)
img_path = self.fpaths[index]
return input_, target, img_path
def __len__(self):
return len(self.fpaths)
| 31.571429 | 96 | 0.639106 | [
"MIT"
] | bfortuner/machine-learning | utils/datasets.py | 7,293 | Python |
#!/usr/bin/env python
import sys
import subprocess
try:
import gtk
except:
print >> sys.stderr, "You need to install the python gtk bindings"
sys.exit(1)
# import vte
try:
import vte
except:
error = gtk.MessageDialog (None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
'You need to install python bindings for libvte')
error.run()
sys.exit (1)
def on_key_press_event(widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
'''print "Key %s (%d) was pressed" % (keyname, event.keyval)
v.feed_child(keyname, len(keyname))
v2.feed_child(keyname, len(keyname))'''
for i in terms:
i.emit("key-press-event", event)
if (event.keyval == 65293):
text.set_text("")
nbterm = 3
terms = []
if __name__ == '__main__':
w = gtk.Window()
hbox = gtk.HBox()
x = 0
y = 0
for i in range(0, len(sys.argv)):
v = vte.Terminal ()
v.connect ("child-exited", lambda term: gtk.main_quit())
v.fork_command()
window = gtk.Window()
if (i > 0):
print sys.argv[i]
r=subprocess.Popen(["/bin/bash", "-i", "-c", sys.argv[i]], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#v.feed_child(sys.argv[i], len(sys.argv[i]))
#line=r.stdout.readline()
#print line
v.feed_child(sys.argv[i], len(sys.argv[i]))
e = gtk.gdk.Event(gtk.gdk.KEY_PRESS)
e.keyval = 65293
e.send_event = True
window.set_title("Window %s" % (sys.argv[i]))
else:
window.set_title("Window %d" % (i+1))
terms.append(v)
window.add(v)
window.connect('delete-event', lambda window, event: gtk.main_quit())
window.move(x, y)
window.set_default_size(200, 100)
#window.set_title("Window %d" % (i+1))
window.show_all()
if (i > 0):
e.window = window.get_window()
v.emit("key-press-event", e)
x += 780
if (i-1 % 3 == 0):
y += 450
x = 0
text = gtk.Entry()
text.connect("key_press_event", on_key_press_event)
w.set_default_size(200, 15)
w.move(0, 0)
hbox.pack_start(text, True, True, 0)
w.add(hbox)
w.connect('delete-event', lambda window, event: gtk.main_quit())
w.show_all()
text.set_can_focus(True)
text.grab_focus()
gtk.main()
| 27.325843 | 153 | 0.576891 | [
"MIT"
] | sylvainmouquet/multipleterm | multipleterm.py | 2,432 | Python |
import torch
import numpy as np
import torch.utils.data
from net import SurnameLSTM
from data import SurnameDataset
if __name__ == '__main__':
net = SurnameLSTM()
state_dict = torch.load('model.pth')
net.load_state_dict(state_dict)
dataset = SurnameDataset(subset='val')
data_loader = torch.utils.data.DataLoader(dataset, batch_size=64, shuffle=True)
sample = iter(data_loader).__next__()
pred = np.argmax(net(sample['values']).detach().numpy(), axis=1)
gt = np.array(sample['raw_label'])
accuracy = np.average(np.where(pred == gt, 1, 0))
print('Accuracy on the validation data: {:.1f} %'.format(accuracy * 100))
print('Please enter a surname to val:')
input_name = input()
name = input_name.lower()
name_ascii = np.array([ord(c) for c in name])
name_ascii = np.pad(name_ascii, ((0, 12 - name_ascii.__len__())), mode='constant', constant_values=0).astype(
np.float32)
name_ascii = torch.tensor([name_ascii])
pred = np.argmax(net(name_ascii).detach().numpy(), axis=1)
print('Mr / Ms. {}, I guess you are {}!'.format(input_name, ['English', 'Chinese', 'Japanese'][pred[0]]))
| 36.21875 | 113 | 0.670406 | [
"MIT"
] | linkinpark213/pytorch-lstm-toy | test.py | 1,159 | Python |
__all__ = ['read_cif','cif_site_labels']
from ase.io import read
from ase.spacegroup import spacegroup
import sys
import os
import logging
from math import *
import numpy as np
import pkg_resources
import warnings
warnings.filterwarnings("ignore")
path = '.temp_files/'
filepath = pkg_resources.resource_filename(__name__,path)
'''
NOTE ABOUT CIF FILE FORMATS:
CIFs must include '_symmetry_Int_Taables_number' to be read by ASE.
If this is not included please edit your CIF file to include this information.
'''
def get_atom_lines(alllines):
order = []
for i,line in enumerate(alllines):
if '_atom' in line:
order.append(line)
start = i+1
end = None
for i,line in enumerate(alllines[start:]):
if len(line.split()) == 0:
end = start+i-1
break
if not end:
end = len(alllines)-1
new_order = []
for i,o in enumerate(order):
if 'site_label' in o:
new_order.append(i)
if 'site_type_symbol' in o:
new_order.append(i)
if 'fract_x' in o:
new_order.append(i)
if 'fract_y' in o:
new_order.append(i)
if 'fract_z' in o:
new_order.append(i)
return start,end,new_order
def fix_cif(cif):
f = open(cif,"r")
alllines = f.readlines()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0} \n'.format(fields[-1])
if '_atom_site_type_symbol' in line and '_atom_site_label' in alllines[i+1]:
alllines[i],alllines[i+1] = alllines[i+1],alllines[i]
file_name = cif.rstrip('.cif')
temp_file = '{0}/{1}_temp.cif'.format(filepath,file_name.split('/')[-1])
f = open(temp_file,"w")
f.writelines(alllines)
f.close()
atoms = read(temp_file);
os.remove(temp_file)
return atoms, alllines
def get_tsites(cif):
from ase.geometry import get_distances
tsites = []
tpos = []
z,alllines = fix_cif(cif)
si = [atom.index for atom in z if atom.symbol!='O']
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'Si' in line or 'T' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
if 'Si' in temp_label:
temp_label = temp_label.replace('Si','T')
tsites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
tpos.append([round(num,2) for num in pos])
tpos = np.array(tpos)
pos = z[si].get_scaled_positions()
tinds = []
tmults = []
t_class = []
for tp in tpos:
for i,p in enumerate(pos):
p = [round(num,2) for num in p]
diff = abs(tp-p)
if sum(diff) <= 0.03:
tinds.append(si[i])
for i in range(1,len(tsites)):
tmults.append(tinds[i]-tinds[i-1])
tmults.append(si[-1]-tinds[-1]+1)
#
# si = [atom.index for atom in z if atom.symbol=='Si']
# o = [atom.index for atom in z if atom.symbol=='O']
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(si_pos,si_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in tinds:
# orig_ind = si.index(i)
# dists = sorted(distances[orig_ind])
# t_class.append([round(num,2) for num in dists])
#
#
# for i,d in enumerate(t_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# if np.array_equal(dist,d):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.1:
# tmults[i]+=1
n = len(si)
sn = sum(tmults)
if n != sn:
print('Something Went Wrong With T Sites')
return tsites, tmults, tinds
def get_osites(cif):
from ase.geometry import get_distances
osites = []
opos = []
z,alllines = fix_cif(cif)
start,end,order = get_atom_lines(alllines)
for line in alllines[start:end+1]:
if 'O' in line:
line = line.split()
temp_label = line[order[0]]
if not any(str.isdigit(c) for c in temp_label):
temp_label = line[order[1]]
osites.append(temp_label)
pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])]
opos.append([round(num,2) for num in pos])
opos = np.array(opos)
pos = z.get_scaled_positions()
oinds = []
omults = []
o_class = []
si = [atom.index for atom in z if atom.symbol=='Si']
o = [atom.index for atom in z if atom.symbol=='O']
o_pos = z[o].get_scaled_positions()
for op in opos:
for i,p in enumerate(o_pos):
p = np.array([round(num,2) for num in p])
diff = abs(op-p)
if sum(diff) <= 0.02:
oinds.append(o[i])
for i in range(1,len(osites)):
omults.append(oinds[i]-oinds[i-1])
omults.append(o[-1]-oinds[-1]+1)
# all_pos = z.positions
# o_pos = z[o].positions
# si_pos = z[si].positions
# cell = z.cell
# distances = get_distances(o_pos,all_pos,cell=cell,pbc=[1,1,1])[1]
#
# for i in oinds:
# orig_ind = o.index(i)
# dists = sorted(distances[orig_ind])
# o_class.append([round(num,2) for num in dists])
#
# for i,d in enumerate(o_class):
# for j,t in enumerate(distances):
# dist = [round(num,2) for num in sorted(t)]
# d = np.array(d)
# dist = np.array(dist)
# diff = abs(d - dist)
# if sum(diff) <= 0.05:
# omults[i]+=1
n = len(o)
sn = sum(omults)
if n != sn:
print('Something Went Wrong With O Sites')
return osites, omults, oinds
def read_cif(cif):
atoms, alllines = fix_cif(cif)
ts,tm,tinds = get_tsites(cif)
os,om,oinds = get_osites(cif)
return atoms,ts,tm,tinds,os,om,oinds
def cif_site_labels(cif):
atoms,ts,tm,tinds,os,om,oinds = read_cif(cif)
labels = {}
for i,t in enumerate(ts):
for j in range(tm[i]):
labels[tinds[i]+j] = t
for i,o in enumerate(os):
for j in range(om[i]):
labels[oinds[i]+j] = o
return labels
''' DEPRECRATED FUNCTIONS'''
def float_with_error(x):
"""
some value in cif accompanies error like "1.234(5)
"""
if "?" in x:
return 0
pos = x.find("(")
if pos >= 0:
x = x[:pos]
return float(x)
def get_mults(cif):
# read the cif file
F = open(cif,"r")
alllines = F.readlines()
F.close()
# Parse out data from the cif file
for i,line in enumerate(alllines):
if '_cell_length_a' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
La = field
if '_cell_length_b' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lb = field
if '_cell_length_c' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
Lc = field
if '_cell_angle_alpha' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
alpha = field
if '_cell_angle_beta' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
beta = field
if '_cell_angle_gamma' in line:
fields = line.split()
field = fields[-1]
field = float_with_error(field)
gamma = field
if '_space_group_symop' in line or '_symmetry_equiv_pos' in line or '_space_group' in line:
n = i
lastline = len(alllines)
loops = []
for i,line in enumerate(alllines):
if 'loop' in line:
loops.append(i)
ops = []
for i in range(n+1,loops[1]):
n+=1
line = alllines[i]
if 'x' in line or 'X' in line:
ops.append(line.replace("'",''))
for i in range(len(ops)):
ops[i] = ops[i].replace("0/", "0./") # also for e.g. 10/9
ops[i] = ops[i].replace("1/", "1./")
ops[i] = ops[i].replace("2/", "2./")
ops[i] = ops[i].replace("3/", "3./")
ops[i] = ops[i].replace("4/", "4./")
ops[i] = ops[i].replace("5/", "5./")
ops[i] = ops[i].replace("6/", "6./")
ops[i] = ops[i].replace("7/", "7./")
ops[i] = ops[i].replace("8/", "8./")
ops[i] = ops[i].replace("9/", "9./")
osites = []
tsites = []
atoms = []
for j in range(n,lastline):
line = alllines[j]
if '_' not in line:
fields = line.split()
if len(fields) >3:
tmp = (fields[0],float(fields[2]),float(fields[3]),float(fields[4]))
if 'O' in fields[0]:
osites.append(fields[0])
if 'T' in fields[0]:
tsites.append(fields[0])
atoms.append(tmp)
for i in range(len(atoms)):
(name,xn,yn,zn) = atoms[i]
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
atoms[i] = (name,xn,yn,zn)
# perfrom symmetry operations
label_list = []
symbols = []
positions = []
for i in atoms:
label_list.append(i[0])
eps = 0.01
imax = len(atoms)
i=0
while (i<imax):
label,x,y,z=atoms[i]
for op in ops:
op = op.replace("'",'')
op = op.lower()
xn,yn,zn = eval(op)
xn = (xn + 10.0) % 1.0
yn = (yn + 10.0) % 1.0
zn = (zn + 10.0) % 1.0
new_atom = True
for at in atoms:
if (abs(at[1]-xn) < eps and abs(at[2]-yn) < eps and abs(at[3]-zn) < eps):
new_atom = False
if new_atom:
p1 = np.array([at[1],at[2],at[3]])
p2 = np.array([xn,yn,zn])
diff = abs(p1-p2)
diff = np.round(diff,2)
count = np.count_nonzero(diff)
if count ==1 and 1 in diff:
new_atom = False
if new_atom:
atoms.append( (label,xn,yn,zn) )
label_list.append(label)
i += 1
imax =len(atoms)
#atoms2 = Atoms(symbols,scaled_positions=positions,cell = [La,Lb,Lc,alpha,beta,gamma])
# count up the osits
label_list = sorted(label_list)
omults = []
for o in osites:
count = label_list.count(o)
omults.append(count)
tmults = []
for t in tsites:
count = label_list.count(t)
tmults.append(count)
return tsites, tmults, osites, omults
def get_indices(cif):
'''
This is a tool that will read a CIF file and return the unique T-sites,
their multiplicities, and an example atom index.
It also does the same for the unique O-sites in the framework.
This tool only works on CIFs that are formatted the same way as the IZA
Structure Database CIFs.
'''
tsites, tmults, osites, omults = get_mults(cif)
f = open(cif,"r")
alllines = f.read()
f.close()
for i, line in enumerate(alllines):
if 'IT_coordinate_system_code' in line:
fields = line.split()
alllines[i] = '_symmetry_space_group_setting {0}'.format(fields[-1])
atoms = read(cif)
oinds = [atom.index for atom in atoms if atom.symbol=='O']
index = 0
first_os = []
for i,m in enumerate(omults):
first_os.append(oinds[index])
index+=m
tinds = [atom.index for atom in atoms if atom.symbol !='O']
index = 0
first_ts = []
for i,m, in enumerate(tmults):
first_ts.append(tinds[index])
index+=m
return tsites,tmults,first_ts, osites, omults, first_os
| 29.632212 | 99 | 0.532814 | [
"MIT"
] | cwaitt/zse | cif_tools.py | 12,327 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import grpc
from google.cloud.texttospeech_v1beta1.gapic import enums
from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config
from google.cloud.texttospeech_v1beta1.gapic.transports import (
text_to_speech_grpc_transport,
)
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2
from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-texttospeech"
).version
class TextToSpeechClient(object):
"""Service that implements Google Cloud Text-to-Speech API."""
SERVICE_ADDRESS = "texttospeech.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TextToSpeechClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.TextToSpeechGrpcTransport,
Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = text_to_speech_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_voices(
self,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns a list of ``Voice`` supported for synthesis.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> response = client.list_voices()
Args:
language_code (str): Optional (but recommended)
`BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag.
If specified, the ListVoices call will only return voices that can be
used to synthesize this language\_code. E.g. when specifying "en-NZ",
you will get supported "en-*" voices; when specifying "no", you will get
supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices;
specifying "zh" will also get supported "cmn-*" voices; specifying
"zh-hk" will also get supported "yue-\*" voices.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_voices" not in self._inner_api_calls:
self._inner_api_calls[
"list_voices"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_voices,
default_retry=self._method_configs["ListVoices"].retry,
default_timeout=self._method_configs["ListVoices"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code)
return self._inner_api_calls["list_voices"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def synthesize_speech(
self,
input_,
voice,
audio_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Synthesizes speech synchronously: receive results after all text input
has been processed.
Example:
>>> from google.cloud import texttospeech_v1beta1
>>>
>>> client = texttospeech_v1beta1.TextToSpeechClient()
>>>
>>> # TODO: Initialize `input_`:
>>> input_ = {}
>>>
>>> # TODO: Initialize `voice`:
>>> voice = {}
>>>
>>> # TODO: Initialize `audio_config`:
>>> audio_config = {}
>>>
>>> response = client.synthesize_speech(input_, voice, audio_config)
Args:
input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput`
voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams`
audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "synthesize_speech" not in self._inner_api_calls:
self._inner_api_calls[
"synthesize_speech"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.synthesize_speech,
default_retry=self._method_configs["SynthesizeSpeech"].retry,
default_timeout=self._method_configs["SynthesizeSpeech"].timeout,
client_info=self._client_info,
)
request = cloud_tts_pb2.SynthesizeSpeechRequest(
input=input_, voice=voice, audio_config=audio_config
)
return self._inner_api_calls["synthesize_speech"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| 43.980583 | 161 | 0.634216 | [
"Apache-2.0"
] | Abd-Elrazek/google-cloud-python | texttospeech/google/cloud/texttospeech_v1beta1/gapic/text_to_speech_client.py | 13,590 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-03-18 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0146_auto_20190308_1626'),
]
operations = [
migrations.AddField(
model_name='returntype',
name='return_type',
field=models.CharField(choices=[('sheet', 'Sheet'), ('question', 'Question'), ('data', 'Data')], default='sheet', max_length=30, verbose_name='Type'),
),
]
| 27.666667 | 162 | 0.628227 | [
"Apache-2.0"
] | Djandwich/wildlifecompliance | wildlifecompliance/migrations/0147_returntype_return_type.py | 581 | Python |
from python_dwd.additionals.functions import check_parameters, retrieve_time_resolution_from_filename,\
retrieve_parameter_from_filename, retrieve_period_type_from_filename, determine_parameters
from python_dwd.enumerations.period_type_enumeration import PeriodType
from python_dwd.enumerations.time_resolution_enumeration import TimeResolution
from python_dwd.enumerations.parameter_enumeration import Parameter
def test_check_parameters():
assert check_parameters(Parameter.PRECIPITATION, TimeResolution.MINUTE_10, PeriodType.HISTORICAL)
def test_retrieve_time_resolution_from_filename():
assert retrieve_time_resolution_from_filename('10minutenwerte_2019.csv') == TimeResolution.MINUTE_10
assert retrieve_time_resolution_from_filename('1minutenwerte_2019.csv') == TimeResolution.MINUTE_1
assert retrieve_time_resolution_from_filename('tageswerte__2019.csv') == TimeResolution.DAILY
assert retrieve_time_resolution_from_filename('tageswerte2019.csv') == None
def test_retrieve_parameter_from_filename():
assert retrieve_parameter_from_filename('bidb_!!_st_.xml', TimeResolution.HOURLY) == Parameter.SOLAR
assert retrieve_parameter_from_filename('10000_historical_nieder_.txt', TimeResolution.MINUTE_1) \
== Parameter.PRECIPITATION
assert retrieve_parameter_from_filename('klima_climate_kl_.csv', TimeResolution.DAILY) == Parameter.CLIMATE_SUMMARY
assert retrieve_parameter_from_filename('klima_climate_kl_.csv', TimeResolution.MINUTE_1) is None
def test_retrieve_period_type_from_filename():
assert retrieve_period_type_from_filename('_hist.xml') == PeriodType.HISTORICAL
assert retrieve_period_type_from_filename('no_period_type') is None
def test_determine_parameters():
assert determine_parameters('10minutenwerte_hist_nieder_') == (Parameter.PRECIPITATION,
TimeResolution.MINUTE_10,
PeriodType.HISTORICAL)
| 53.289474 | 119 | 0.788148 | [
"MIT"
] | ikamensh/python_dwd | tests/additionals/test_functions.py | 2,025 | Python |
# Copyright 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mparser
from . import environment
from . import coredata
from . import dependencies
from . import mlog
from . import build
from . import optinterpreter
from . import compilers
from .wrap import wrap, WrapMode
from . import mesonlib
from .mesonlib import FileMode, MachineChoice, Popen_safe, listify, extract_as_list, has_path_sep, unholder
from .dependencies import ExternalProgram
from .dependencies import InternalDependency, Dependency, NotFoundDependency, DependencyException
from .depfile import DepFile
from .interpreterbase import InterpreterBase
from .interpreterbase import check_stringlist, flatten, noPosargs, noKwargs, stringArgs, permittedKwargs, noArgsFlattening
from .interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from .interpreterbase import InterpreterObject, MutableInterpreterObject, Disabler, disablerIfNotFound
from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs
from .interpreterbase import ObjectHolder
from .modules import ModuleReturnValue
from .cmake import CMakeInterpreter
from .backend.backends import TestProtocol
from pathlib import Path, PurePath
import os
import shutil
import uuid
import re
import shlex
import subprocess
import collections
import functools
import typing as T
import importlib
permitted_method_kwargs = {
'partial_dependency': {'compile_args', 'link_args', 'links', 'includes',
'sources'},
}
def stringifyUserArguments(args):
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['%s : %s' % (stringifyUserArguments(k), stringifyUserArguments(v)) for k, v in args.items()])
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return "'%s'" % args
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
class OverrideProgram(dependencies.ExternalProgram):
pass
class FeatureOptionHolder(InterpreterObject, ObjectHolder):
def __init__(self, env, name, option):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, option)
if option.is_auto():
self.held_object = env.coredata.builtins['auto_features']
self.name = name
self.methods.update({'enabled': self.enabled_method,
'disabled': self.disabled_method,
'auto': self.auto_method,
})
@noPosargs
@permittedKwargs({})
def enabled_method(self, args, kwargs):
return self.held_object.is_enabled()
@noPosargs
@permittedKwargs({})
def disabled_method(self, args, kwargs):
return self.held_object.is_disabled()
@noPosargs
@permittedKwargs({})
def auto_method(self, args, kwargs):
return self.held_object.is_auto()
def extract_required_kwarg(kwargs, subproject, feature_check=None, default=True):
val = kwargs.get('required', default)
disabled = False
required = False
feature = None
if isinstance(val, FeatureOptionHolder):
if not feature_check:
feature_check = FeatureNew('User option "feature"', '0.47.0')
feature_check.use(subproject)
option = val.held_object
feature = val.name
if option.is_disabled():
disabled = True
elif option.is_enabled():
required = True
elif isinstance(val, bool):
required = val
else:
raise InterpreterException('required keyword argument must be boolean or a feature option')
# Keep boolean value in kwargs to simplify other places where this kwarg is
# checked.
kwargs['required'] = required
return disabled, required, feature
def extract_search_dirs(kwargs):
search_dirs = mesonlib.stringlistify(kwargs.get('dirs', []))
search_dirs = [Path(d).expanduser() for d in search_dirs]
for d in search_dirs:
if mesonlib.is_windows() and d.root.startswith('\\'):
# a Unix-path starting with `/` that is not absolute on Windows.
# discard without failing for end-user ease of cross-platform directory arrays
continue
if not d.is_absolute():
raise InvalidCode('Search directory {} is not an absolute path.'.format(d))
return list(map(str, search_dirs))
class TryRunResultHolder(InterpreterObject):
def __init__(self, res):
super().__init__()
self.res = res
self.methods.update({'returncode': self.returncode_method,
'compiled': self.compiled_method,
'stdout': self.stdout_method,
'stderr': self.stderr_method,
})
@noPosargs
@permittedKwargs({})
def returncode_method(self, args, kwargs):
return self.res.returncode
@noPosargs
@permittedKwargs({})
def compiled_method(self, args, kwargs):
return self.res.compiled
@noPosargs
@permittedKwargs({})
def stdout_method(self, args, kwargs):
return self.res.stdout
@noPosargs
@permittedKwargs({})
def stderr_method(self, args, kwargs):
return self.res.stderr
class RunProcess(InterpreterObject):
def __init__(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir=False, check=False, capture=True):
super().__init__()
if not isinstance(cmd, ExternalProgram):
raise AssertionError('BUG: RunProcess must be passed an ExternalProgram')
self.capture = capture
pc, self.stdout, self.stderr = self.run_command(cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check)
self.returncode = pc.returncode
self.methods.update({'returncode': self.returncode_method,
'stdout': self.stdout_method,
'stderr': self.stderr_method,
})
def run_command(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check=False):
command_array = cmd.get_command() + args
menv = {'MESON_SOURCE_ROOT': source_dir,
'MESON_BUILD_ROOT': build_dir,
'MESON_SUBDIR': subdir,
'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in mesonintrospect]),
}
if in_builddir:
cwd = os.path.join(build_dir, subdir)
else:
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(menv)
child_env = env.get_env(child_env)
stdout = subprocess.PIPE if self.capture else subprocess.DEVNULL
mlog.debug('Running command:', ' '.join(command_array))
try:
p, o, e = Popen_safe(command_array, stdout=stdout, env=child_env, cwd=cwd)
if self.capture:
mlog.debug('--- stdout ---')
mlog.debug(o)
else:
o = ''
mlog.debug('--- stdout disabled ---')
mlog.debug('--- stderr ---')
mlog.debug(e)
mlog.debug('')
if check and p.returncode != 0:
raise InterpreterException('Command "{}" failed with status {}.'.format(' '.join(command_array), p.returncode))
return p, o, e
except FileNotFoundError:
raise InterpreterException('Could not execute command "%s".' % ' '.join(command_array))
@noPosargs
@permittedKwargs({})
def returncode_method(self, args, kwargs):
return self.returncode
@noPosargs
@permittedKwargs({})
def stdout_method(self, args, kwargs):
return self.stdout
@noPosargs
@permittedKwargs({})
def stderr_method(self, args, kwargs):
return self.stderr
class ConfigureFileHolder(InterpreterObject, ObjectHolder):
def __init__(self, subdir, sourcename, targetname, configuration_data):
InterpreterObject.__init__(self)
obj = build.ConfigureFile(subdir, sourcename, targetname, configuration_data)
ObjectHolder.__init__(self, obj)
class EnvironmentVariablesHolder(MutableInterpreterObject, ObjectHolder):
def __init__(self, initial_values=None):
MutableInterpreterObject.__init__(self)
ObjectHolder.__init__(self, build.EnvironmentVariables())
self.methods.update({'set': self.set_method,
'append': self.append_method,
'prepend': self.prepend_method,
})
if isinstance(initial_values, dict):
for k, v in initial_values.items():
self.set_method([k, v], {})
elif isinstance(initial_values, list):
for e in initial_values:
if '=' not in e:
raise InterpreterException('Env var definition must be of type key=val.')
(k, val) = e.split('=', 1)
k = k.strip()
val = val.strip()
if ' ' in k:
raise InterpreterException('Env var key must not have spaces in it.')
self.set_method([k, val], {})
elif initial_values:
raise AssertionError('Unsupported EnvironmentVariablesHolder initial_values')
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.held_object.envvars)
def add_var(self, method, args, kwargs):
if not isinstance(kwargs.get("separator", ""), str):
raise InterpreterException("EnvironmentVariablesHolder methods 'separator'"
" argument needs to be a string.")
if len(args) < 2:
raise InterpreterException("EnvironmentVariablesHolder methods require at least"
"2 arguments, first is the name of the variable and"
" following one are values")
# Warn when someone tries to use append() or prepend() on an env var
# which already has an operation set on it. People seem to think that
# multiple append/prepend operations stack, but they don't.
if method != self.held_object.set and self.held_object.has_name(args[0]):
mlog.warning('Overriding previous value of environment variable {!r} with a new one'
.format(args[0]), location=self.current_node)
self.held_object.add_var(method, args[0], args[1:], kwargs)
@stringArgs
@permittedKwargs({'separator'})
def set_method(self, args, kwargs):
self.add_var(self.held_object.set, args, kwargs)
@stringArgs
@permittedKwargs({'separator'})
def append_method(self, args, kwargs):
self.add_var(self.held_object.append, args, kwargs)
@stringArgs
@permittedKwargs({'separator'})
def prepend_method(self, args, kwargs):
self.add_var(self.held_object.prepend, args, kwargs)
class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder):
def __init__(self, pv, initial_values=None):
MutableInterpreterObject.__init__(self)
self.used = False # These objects become immutable after use in configure_file.
ObjectHolder.__init__(self, build.ConfigurationData(), pv)
self.methods.update({'set': self.set_method,
'set10': self.set10_method,
'set_quoted': self.set_quoted_method,
'has': self.has_method,
'get': self.get_method,
'get_unquoted': self.get_unquoted_method,
'merge_from': self.merge_from_method,
})
if isinstance(initial_values, dict):
for k, v in initial_values.items():
self.set_method([k, v], {})
elif initial_values:
raise AssertionError('Unsupported ConfigurationDataHolder initial_values')
def is_used(self):
return self.used
def mark_used(self):
self.used = True
def validate_args(self, args, kwargs):
if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2:
mlog.deprecation('Passing a list as the single argument to '
'configuration_data.set is deprecated. This will '
'become a hard error in the future.',
location=self.current_node)
args = args[0]
if len(args) != 2:
raise InterpreterException("Configuration set requires 2 arguments.")
if self.used:
raise InterpreterException("Can not set values on configuration object that has been used.")
name, val = args
if not isinstance(val, (int, str)):
msg = 'Setting a configuration data value to {!r} is invalid, ' \
'and will fail at configure_file(). If you are using it ' \
'just to store some values, please use a dict instead.'
mlog.deprecation(msg.format(val), location=self.current_node)
desc = kwargs.get('description', None)
if not isinstance(name, str):
raise InterpreterException("First argument to set must be a string.")
if desc is not None and not isinstance(desc, str):
raise InterpreterException('Description must be a string.')
return name, val, desc
@noArgsFlattening
def set_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
self.held_object.values[name] = (val, desc)
def set_quoted_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
if not isinstance(val, str):
raise InterpreterException("Second argument to set_quoted must be a string.")
escaped_val = '\\"'.join(val.split('"'))
self.held_object.values[name] = ('"' + escaped_val + '"', desc)
def set10_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
if val:
self.held_object.values[name] = (1, desc)
else:
self.held_object.values[name] = (0, desc)
def has_method(self, args, kwargs):
return args[0] in self.held_object.values
@FeatureNew('configuration_data.get()', '0.38.0')
@noArgsFlattening
def get_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get method takes one or two arguments.')
name = args[0]
if name in self.held_object:
return self.held_object.get(name)[0]
if len(args) > 1:
return args[1]
raise InterpreterException('Entry %s not in configuration data.' % name)
@FeatureNew('configuration_data.get_unquoted()', '0.44.0')
def get_unquoted_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get method takes one or two arguments.')
name = args[0]
if name in self.held_object:
val = self.held_object.get(name)[0]
elif len(args) > 1:
val = args[1]
else:
raise InterpreterException('Entry %s not in configuration data.' % name)
if val[0] == '"' and val[-1] == '"':
return val[1:-1]
return val
def get(self, name):
return self.held_object.values[name] # (val, desc)
def keys(self):
return self.held_object.values.keys()
def merge_from_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Merge_from takes one positional argument.')
from_object = args[0]
if not isinstance(from_object, ConfigurationDataHolder):
raise InterpreterException('Merge_from argument must be a configuration data object.')
from_object = from_object.held_object
for k, v in from_object.values.items():
self.held_object.values[k] = v
# Interpreter objects can not be pickled so we must have
# these wrappers.
class DependencyHolder(InterpreterObject, ObjectHolder):
def __init__(self, dep, pv):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, dep, pv)
self.methods.update({'found': self.found_method,
'type_name': self.type_name_method,
'version': self.version_method,
'name': self.name_method,
'get_pkgconfig_variable': self.pkgconfig_method,
'get_configtool_variable': self.configtool_method,
'get_variable': self.variable_method,
'partial_dependency': self.partial_dependency_method,
'include_type': self.include_type_method,
'as_system': self.as_system_method,
})
def found(self):
return self.found_method([], {})
@noPosargs
@permittedKwargs({})
def type_name_method(self, args, kwargs):
return self.held_object.type_name
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
if self.held_object.type_name == 'internal':
return True
return self.held_object.found()
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return self.held_object.get_version()
@noPosargs
@permittedKwargs({})
def name_method(self, args, kwargs):
return self.held_object.get_name()
@permittedKwargs({'define_variable', 'default'})
def pkgconfig_method(self, args, kwargs):
args = listify(args)
if len(args) != 1:
raise InterpreterException('get_pkgconfig_variable takes exactly one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Variable name must be a string.')
return self.held_object.get_pkgconfig_variable(varname, kwargs)
@FeatureNew('dep.get_configtool_variable', '0.44.0')
@permittedKwargs({})
def configtool_method(self, args, kwargs):
args = listify(args)
if len(args) != 1:
raise InterpreterException('get_configtool_variable takes exactly one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Variable name must be a string.')
return self.held_object.get_configtool_variable(varname)
@FeatureNew('dep.partial_dependency', '0.46.0')
@noPosargs
@permittedKwargs(permitted_method_kwargs['partial_dependency'])
def partial_dependency_method(self, args, kwargs):
pdep = self.held_object.get_partial_dependency(**kwargs)
return DependencyHolder(pdep, self.subproject)
@FeatureNew('dep.get_variable', '0.51.0')
@noPosargs
@permittedKwargs({'cmake', 'pkgconfig', 'configtool', 'internal', 'default_value', 'pkgconfig_define'})
@FeatureNewKwargs('dep.get_variable', '0.54.0', ['internal'])
def variable_method(self, args, kwargs):
return self.held_object.get_variable(**kwargs)
@FeatureNew('dep.include_type', '0.52.0')
@noPosargs
@permittedKwargs({})
def include_type_method(self, args, kwargs):
return self.held_object.get_include_type()
@FeatureNew('dep.as_system', '0.52.0')
@permittedKwargs({})
def as_system_method(self, args, kwargs):
args = listify(args)
new_is_system = 'system'
if len(args) > 1:
raise InterpreterException('as_system takes only one optional value')
if len(args) == 1:
new_is_system = args[0]
new_dep = self.held_object.generate_system_dependency(new_is_system)
return DependencyHolder(new_dep, self.subproject)
class ExternalProgramHolder(InterpreterObject, ObjectHolder):
def __init__(self, ep, subproject, backend=None):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, ep)
self.subproject = subproject
self.backend = backend
self.methods.update({'found': self.found_method,
'path': self.path_method,
'full_path': self.full_path_method})
self.cached_version = None
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
@noPosargs
@permittedKwargs({})
def path_method(self, args, kwargs):
mlog.deprecation('path() method is deprecated and replaced by full_path()')
return self._full_path()
@noPosargs
@permittedKwargs({})
@FeatureNew('ExternalProgram.full_path', '0.55.0')
def full_path_method(self, args, kwargs):
return self._full_path()
def _full_path(self):
exe = self.held_object
if isinstance(exe, build.Executable):
return self.backend.get_target_filename_abs(exe)
return exe.get_path()
def found(self):
return isinstance(self.held_object, build.Executable) or self.held_object.found()
def get_command(self):
return self.held_object.get_command()
def get_name(self):
exe = self.held_object
if isinstance(exe, build.Executable):
return exe.name
return exe.get_name()
def get_version(self, interpreter):
if isinstance(self.held_object, build.Executable):
return self.held_object.project_version
if not self.cached_version:
raw_cmd = self.get_command() + ['--version']
cmd = [self, '--version']
res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True)
if res.returncode != 0:
m = 'Running {!r} failed'
raise InterpreterException(m.format(raw_cmd))
output = res.stdout.strip()
if not output:
output = res.stderr.strip()
match = re.search(r'([0-9][0-9\.]+)', output)
if not match:
m = 'Could not find a version number in output of {!r}'
raise InterpreterException(m.format(raw_cmd))
self.cached_version = match.group(1)
return self.cached_version
class ExternalLibraryHolder(InterpreterObject, ObjectHolder):
def __init__(self, el, pv):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, el, pv)
self.methods.update({'found': self.found_method,
'type_name': self.type_name_method,
'partial_dependency': self.partial_dependency_method,
})
def found(self):
return self.held_object.found()
@noPosargs
@permittedKwargs({})
def type_name_method(self, args, kwargs):
return self.held_object.type_name
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
def get_name(self):
return self.held_object.name
def get_compile_args(self):
return self.held_object.get_compile_args()
def get_link_args(self):
return self.held_object.get_link_args()
def get_exe_args(self):
return self.held_object.get_exe_args()
@FeatureNew('dep.partial_dependency', '0.46.0')
@noPosargs
@permittedKwargs(permitted_method_kwargs['partial_dependency'])
def partial_dependency_method(self, args, kwargs):
pdep = self.held_object.get_partial_dependency(**kwargs)
return DependencyHolder(pdep, self.subproject)
class GeneratorHolder(InterpreterObject, ObjectHolder):
@FeatureNewKwargs('generator', '0.43.0', ['capture'])
def __init__(self, interp, args, kwargs):
self.interpreter = interp
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, build.Generator(args, kwargs), interp.subproject)
self.methods.update({'process': self.process_method})
@FeatureNewKwargs('generator.process', '0.45.0', ['preserve_path_from'])
@permittedKwargs({'extra_args', 'preserve_path_from'})
def process_method(self, args, kwargs):
extras = mesonlib.stringlistify(kwargs.get('extra_args', []))
if 'preserve_path_from' in kwargs:
preserve_path_from = kwargs['preserve_path_from']
if not isinstance(preserve_path_from, str):
raise InvalidArguments('Preserve_path_from must be a string.')
preserve_path_from = os.path.normpath(preserve_path_from)
if not os.path.isabs(preserve_path_from):
# This is a bit of a hack. Fix properly before merging.
raise InvalidArguments('Preserve_path_from must be an absolute path for now. Sorry.')
else:
preserve_path_from = None
gl = self.held_object.process_files('Generator', args, self.interpreter,
preserve_path_from, extra_args=extras)
return GeneratedListHolder(gl)
class GeneratedListHolder(InterpreterObject, ObjectHolder):
def __init__(self, arg1, extra_args=None):
InterpreterObject.__init__(self)
if isinstance(arg1, GeneratorHolder):
ObjectHolder.__init__(self, build.GeneratedList(arg1.held_object, extra_args if extra_args is not None else []))
else:
ObjectHolder.__init__(self, arg1)
def __repr__(self):
r = '<{}: {!r}>'
return r.format(self.__class__.__name__, self.held_object.get_outputs())
def add_file(self, a):
self.held_object.add_file(a)
# A machine that's statically known from the cross file
class MachineHolder(InterpreterObject, ObjectHolder):
def __init__(self, machine_info):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, machine_info)
self.methods.update({'system': self.system_method,
'cpu': self.cpu_method,
'cpu_family': self.cpu_family_method,
'endian': self.endian_method,
})
@noPosargs
@permittedKwargs({})
def cpu_family_method(self, args, kwargs):
return self.held_object.cpu_family
@noPosargs
@permittedKwargs({})
def cpu_method(self, args, kwargs):
return self.held_object.cpu
@noPosargs
@permittedKwargs({})
def system_method(self, args, kwargs):
return self.held_object.system
@noPosargs
@permittedKwargs({})
def endian_method(self, args, kwargs):
return self.held_object.endian
class IncludeDirsHolder(InterpreterObject, ObjectHolder):
def __init__(self, idobj):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, idobj)
class Headers(InterpreterObject):
def __init__(self, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.install_subdir = kwargs.get('subdir', '')
if os.path.isabs(self.install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
self.custom_install_dir = kwargs.get('install_dir', None)
self.custom_install_mode = kwargs.get('install_mode', None)
if self.custom_install_dir is not None:
if not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def set_install_subdir(self, subdir):
self.install_subdir = subdir
def get_install_subdir(self):
return self.install_subdir
def get_sources(self):
return self.sources
def get_custom_install_dir(self):
return self.custom_install_dir
def get_custom_install_mode(self):
return self.custom_install_mode
class DataHolder(InterpreterObject, ObjectHolder):
def __init__(self, data):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, data)
def get_source_subdir(self):
return self.held_object.source_subdir
def get_sources(self):
return self.held_object.sources
def get_install_dir(self):
return self.held_object.install_dir
class InstallDir(InterpreterObject):
def __init__(self, src_subdir, inst_subdir, install_dir, install_mode, exclude, strip_directory):
InterpreterObject.__init__(self)
self.source_subdir = src_subdir
self.installable_subdir = inst_subdir
self.install_dir = install_dir
self.install_mode = install_mode
self.exclude = exclude
self.strip_directory = strip_directory
class Man(InterpreterObject):
def __init__(self, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.validate_sources()
self.custom_install_dir = kwargs.get('install_dir', None)
self.custom_install_mode = kwargs.get('install_mode', None)
if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def validate_sources(self):
for s in self.sources:
try:
num = int(s.split('.')[-1])
except (IndexError, ValueError):
num = 0
if num < 1 or num > 8:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 8')
def get_custom_install_dir(self):
return self.custom_install_dir
def get_custom_install_mode(self):
return self.custom_install_mode
def get_sources(self):
return self.sources
class GeneratedObjectsHolder(InterpreterObject, ObjectHolder):
def __init__(self, held_object):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, held_object)
class TargetHolder(InterpreterObject, ObjectHolder):
def __init__(self, target, interp):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, target, interp.subproject)
self.interpreter = interp
class BuildTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'extract_objects': self.extract_objects_method,
'extract_all_objects': self.extract_all_objects_method,
'name': self.name_method,
'get_id': self.get_id_method,
'outdir': self.outdir_method,
'full_path': self.full_path_method,
'private_dir_include': self.private_dir_include_method,
})
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.filename)
def is_cross(self):
return not self.held_object.environment.machines.matches_build_machine(self.held_object.for_machine)
@noPosargs
@permittedKwargs({})
def private_dir_include_method(self, args, kwargs):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[self.interpreter.backend.get_target_private_dir(self.held_object)]))
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
@noPosargs
@permittedKwargs({})
def outdir_method(self, args, kwargs):
return self.interpreter.backend.get_target_dir(self.held_object)
@permittedKwargs({})
def extract_objects_method(self, args, kwargs):
gobjs = self.held_object.extract_objects(args)
return GeneratedObjectsHolder(gobjs)
@FeatureNewKwargs('extract_all_objects', '0.46.0', ['recursive'])
@noPosargs
@permittedKwargs({'recursive'})
def extract_all_objects_method(self, args, kwargs):
recursive = kwargs.get('recursive', False)
gobjs = self.held_object.extract_all_objects(recursive)
if gobjs.objlist and 'recursive' not in kwargs:
mlog.warning('extract_all_objects called without setting recursive '
'keyword argument. Meson currently defaults to '
'non-recursive to maintain backward compatibility but '
'the default will be changed in the future.',
location=self.current_node)
return GeneratedObjectsHolder(gobjs)
@noPosargs
@permittedKwargs({})
def get_id_method(self, args, kwargs):
return self.held_object.get_id()
@FeatureNew('name', '0.54.0')
@noPosargs
@permittedKwargs({})
def name_method(self, args, kwargs):
return self.held_object.name
class ExecutableHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class StaticLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class SharedLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
# Set to True only when called from self.func_shared_lib().
target.shared_library_only = False
class BothLibrariesHolder(BuildTargetHolder):
def __init__(self, shared_holder, static_holder, interp):
# FIXME: This build target always represents the shared library, but
# that should be configurable.
super().__init__(shared_holder.held_object, interp)
self.shared_holder = shared_holder
self.static_holder = static_holder
self.methods.update({'get_shared_lib': self.get_shared_lib_method,
'get_static_lib': self.get_static_lib_method,
})
def __repr__(self):
r = '<{} {}: {}, {}: {}>'
h1 = self.shared_holder.held_object
h2 = self.static_holder.held_object
return r.format(self.__class__.__name__, h1.get_id(), h1.filename, h2.get_id(), h2.filename)
@noPosargs
@permittedKwargs({})
def get_shared_lib_method(self, args, kwargs):
return self.shared_holder
@noPosargs
@permittedKwargs({})
def get_static_lib_method(self, args, kwargs):
return self.static_holder
class SharedModuleHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class JarHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class CustomTargetIndexHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'full_path': self.full_path_method,
})
@FeatureNew('custom_target[i].full_path', '0.54.0')
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
class CustomTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'full_path': self.full_path_method,
'to_list': self.to_list_method,
})
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.command)
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
@FeatureNew('custom_target.to_list', '0.54.0')
@noPosargs
@permittedKwargs({})
def to_list_method(self, args, kwargs):
result = []
for i in self.held_object:
result.append(CustomTargetIndexHolder(i, self.interpreter))
return result
def __getitem__(self, index):
return CustomTargetIndexHolder(self.held_object[index], self.interpreter)
def __setitem__(self, index, value): # lgtm[py/unexpected-raise-in-special-method]
raise InterpreterException('Cannot set a member of a CustomTarget')
def __delitem__(self, index): # lgtm[py/unexpected-raise-in-special-method]
raise InterpreterException('Cannot delete a member of a CustomTarget')
def outdir_include(self):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[os.path.join('@BUILD_ROOT@', self.interpreter.backend.get_target_dir(self.held_object))]))
class RunTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.command)
class Test(InterpreterObject):
def __init__(self, name: str, project: str, suite: T.List[str], exe: build.Executable,
depends: T.List[T.Union[build.CustomTarget, build.BuildTarget]],
is_parallel: bool, cmd_args: T.List[str], env: build.EnvironmentVariables,
should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str,
priority: int):
InterpreterObject.__init__(self)
self.name = name
self.suite = suite
self.project_name = project
self.exe = exe
self.depends = depends
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.protocol = TestProtocol.from_str(protocol)
self.priority = priority
def get_exe(self):
return self.exe
def get_name(self):
return self.name
class SubprojectHolder(InterpreterObject, ObjectHolder):
def __init__(self, subinterpreter, subproject_dir, name, warnings=0, disabled_feature=None,
exception=None):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, subinterpreter)
self.name = name
self.warnings = warnings
self.disabled_feature = disabled_feature
self.exception = exception
self.subproject_dir = subproject_dir
self.methods.update({'get_variable': self.get_variable_method,
'found': self.found_method,
})
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
def found(self):
return self.held_object is not None
@permittedKwargs({})
@noArgsFlattening
def get_variable_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get_variable takes one or two arguments.')
if not self.found():
raise InterpreterException('Subproject "%s/%s" disabled can\'t get_variable on it.' % (
self.subproject_dir, self.name))
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Get_variable first argument must be a string.')
try:
return self.held_object.variables[varname]
except KeyError:
pass
if len(args) == 2:
return args[1]
raise InvalidArguments('Requested variable "{0}" not found.'.format(varname))
header_permitted_kwargs = set([
'required',
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
])
find_library_permitted_kwargs = set([
'has_headers',
'required',
'dirs',
'static',
])
find_library_permitted_kwargs |= set(['header_' + k for k in header_permitted_kwargs])
class CompilerHolder(InterpreterObject):
def __init__(self, compiler, env, subproject):
InterpreterObject.__init__(self)
self.compiler = compiler
self.environment = env
self.subproject = subproject
self.methods.update({'compiles': self.compiles_method,
'links': self.links_method,
'get_id': self.get_id_method,
'get_linker_id': self.get_linker_id_method,
'compute_int': self.compute_int_method,
'sizeof': self.sizeof_method,
'get_define': self.get_define_method,
'check_header': self.check_header_method,
'has_header': self.has_header_method,
'has_header_symbol': self.has_header_symbol_method,
'run': self.run_method,
'has_function': self.has_function_method,
'has_member': self.has_member_method,
'has_members': self.has_members_method,
'has_type': self.has_type_method,
'alignment': self.alignment_method,
'version': self.version_method,
'cmd_array': self.cmd_array_method,
'find_library': self.find_library_method,
'has_argument': self.has_argument_method,
'has_function_attribute': self.has_func_attribute_method,
'get_supported_function_attributes': self.get_supported_function_attributes_method,
'has_multi_arguments': self.has_multi_arguments_method,
'get_supported_arguments': self.get_supported_arguments_method,
'first_supported_argument': self.first_supported_argument_method,
'has_link_argument': self.has_link_argument_method,
'has_multi_link_arguments': self.has_multi_link_arguments_method,
'get_supported_link_arguments': self.get_supported_link_arguments_method,
'first_supported_link_argument': self.first_supported_link_argument_method,
'unittest_args': self.unittest_args_method,
'symbols_have_underscore_prefix': self.symbols_have_underscore_prefix_method,
'get_argument_syntax': self.get_argument_syntax_method,
})
def _dep_msg(self, deps, endl):
msg_single = 'with dependency {}'
msg_many = 'with dependencies {}'
if not deps:
return endl
if endl is None:
endl = ''
tpl = msg_many if len(deps) > 1 else msg_single
names = []
for d in deps:
if isinstance(d, dependencies.ExternalLibrary):
name = '-l' + d.name
else:
name = d.name
names.append(name)
return tpl.format(', '.join(names)) + endl
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return self.compiler.version
@noPosargs
@permittedKwargs({})
def cmd_array_method(self, args, kwargs):
return self.compiler.exelist
def determine_args(self, kwargs, mode='link'):
nobuiltins = kwargs.get('no_builtin_args', False)
if not isinstance(nobuiltins, bool):
raise InterpreterException('Type of no_builtin_args not a boolean.')
args = []
incdirs = extract_as_list(kwargs, 'include_directories')
for i in incdirs:
if not isinstance(i, IncludeDirsHolder):
raise InterpreterException('Include directories argument must be an include_directories object.')
for idir in i.held_object.get_incdirs():
idir = os.path.join(self.environment.get_source_dir(),
i.held_object.get_curdir(), idir)
args += self.compiler.get_include_args(idir, False)
if not nobuiltins:
for_machine = Interpreter.machine_from_native_kwarg(kwargs)
opts = self.environment.coredata.compiler_options[for_machine][self.compiler.language]
args += self.compiler.get_option_compile_args(opts)
if mode == 'link':
args += self.compiler.get_option_link_args(opts)
args += mesonlib.stringlistify(kwargs.get('args', []))
return args
def determine_dependencies(self, kwargs, endl=':'):
deps = kwargs.get('dependencies', None)
if deps is not None:
deps = listify(deps)
final_deps = []
for d in deps:
try:
d = d.held_object
except Exception:
pass
if isinstance(d, InternalDependency) or not isinstance(d, Dependency):
raise InterpreterException('Dependencies must be external dependencies')
final_deps.append(d)
deps = final_deps
return deps, self._dep_msg(deps, endl)
@permittedKwargs({
'prefix',
'args',
'dependencies',
})
def alignment_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Alignment method takes exactly one positional argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of alignment must be a string.')
extra_args = mesonlib.stringlistify(kwargs.get('args', []))
deps, msg = self.determine_dependencies(kwargs)
result = self.compiler.alignment(typename, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
mlog.log('Checking for alignment of', mlog.bold(typename, True), msg, result)
return result
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def run_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Run method takes exactly one positional argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result = self.compiler.run(code, self.environment, extra_args=extra_args,
dependencies=deps)
if len(testname) > 0:
if not result.compiled:
h = mlog.red('DID NOT COMPILE')
elif result.returncode == 0:
h = mlog.green('YES')
else:
h = mlog.red('NO (%d)' % result.returncode)
mlog.log('Checking if', mlog.bold(testname, True), msg, 'runs:', h)
return TryRunResultHolder(result)
@noPosargs
@permittedKwargs({})
def get_id_method(self, args, kwargs):
return self.compiler.get_id()
@noPosargs
@permittedKwargs({})
@FeatureNew('compiler.get_linker_id', '0.53.0')
def get_linker_id_method(self, args, kwargs):
return self.compiler.get_linker_id()
@noPosargs
@permittedKwargs({})
def symbols_have_underscore_prefix_method(self, args, kwargs):
'''
Check if the compiler prefixes _ (underscore) to global C symbols
See: https://en.wikipedia.org/wiki/Name_mangling#C
'''
return self.compiler.symbols_have_underscore_prefix(self.environment)
@noPosargs
@permittedKwargs({})
def unittest_args_method(self, args, kwargs):
'''
This function is deprecated and should not be used.
It can be removed in a future version of Meson.
'''
if not hasattr(self.compiler, 'get_feature_args'):
raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language()))
build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir())
return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src)
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_member_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Has_member takes exactly two arguments.')
check_stringlist(args)
typename, membername = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_member must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_members(typename, [membername], prefix,
self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking whether type', mlog.bold(typename, True),
'has member', mlog.bold(membername, True), msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_members_method(self, args, kwargs):
if len(args) < 2:
raise InterpreterException('Has_members needs at least two arguments.')
check_stringlist(args)
typename, *membernames = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_members must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_members(typename, membernames, prefix,
self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
members = mlog.bold(', '.join(['"{}"'.format(m) for m in membernames]))
mlog.log('Checking whether type', mlog.bold(typename, True),
'has members', members, msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_function_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_function takes exactly one argument.')
check_stringlist(args)
funcname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_function must be a string.')
extra_args = self.determine_args(kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_function(funcname, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for function', mlog.bold(funcname, True), msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_type_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_type takes exactly one argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_type must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_type(typename, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for type', mlog.bold(typename, True), msg, hadtxt, cached)
return had
@FeatureNew('compiler.compute_int', '0.40.0')
@permittedKwargs({
'prefix',
'low',
'high',
'guess',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def compute_int_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Compute_int takes exactly one argument.')
check_stringlist(args)
expression = args[0]
prefix = kwargs.get('prefix', '')
low = kwargs.get('low', None)
high = kwargs.get('high', None)
guess = kwargs.get('guess', None)
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of compute_int must be a string.')
if low is not None and not isinstance(low, int):
raise InterpreterException('Low argument of compute_int must be an int.')
if high is not None and not isinstance(high, int):
raise InterpreterException('High argument of compute_int must be an int.')
if guess is not None and not isinstance(guess, int):
raise InterpreterException('Guess argument of compute_int must be an int.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
res = self.compiler.compute_int(expression, low, high, guess, prefix,
self.environment, extra_args=extra_args,
dependencies=deps)
mlog.log('Computing int of', mlog.bold(expression, True), msg, res)
return res
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def sizeof_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Sizeof takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of sizeof must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
esize = self.compiler.sizeof(element, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
mlog.log('Checking for size of', mlog.bold(element, True), msg, esize)
return esize
@FeatureNew('compiler.get_define', '0.40.0')
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def get_define_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_define() takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of get_define() must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
value, cached = self.compiler.get_define(element, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Fetching value of define', mlog.bold(element, True), msg, value, cached)
return value
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def compiles_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('compiles method takes exactly one argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result, cached = self.compiler.compiles(code, self.environment,
extra_args=extra_args,
dependencies=deps)
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Checking if', mlog.bold(testname, True), msg, 'compiles:', h, cached)
return result
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def links_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('links method takes exactly one argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result, cached = self.compiler.links(code, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Checking if', mlog.bold(testname, True), msg, 'links:', h, cached)
return result
@FeatureNew('compiler.check_header', '0.47.0')
@FeatureNewKwargs('compiler.check_header', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def check_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('check_header method takes exactly one argument.')
check_stringlist(args)
hname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Check usable header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.check_header(hname, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if required and not haz:
raise InterpreterException('{} header {!r} not usable'.format(self.compiler.get_display_language(), hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Check usable header', mlog.bold(hname, True), msg, h, cached)
return haz
@FeatureNewKwargs('compiler.has_header', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def has_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('has_header method takes exactly one argument.')
check_stringlist(args)
hname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Has header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.has_header(hname, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if required and not haz:
raise InterpreterException('{} header {!r} not found'.format(self.compiler.get_display_language(), hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Has header', mlog.bold(hname, True), msg, h, cached)
return haz
@FeatureNewKwargs('compiler.has_header_symbol', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def has_header_symbol_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('has_header_symbol method takes exactly two arguments.')
check_stringlist(args)
hname, symbol = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header_symbol must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.has_header_symbol(hname, symbol, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
if required and not haz:
raise InterpreterException('{} symbol {} not found in header {}'.format(self.compiler.get_display_language(), symbol, hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), msg, h, cached)
return haz
def notfound_library(self, libname):
lib = dependencies.ExternalLibrary(libname, None,
self.environment,
self.compiler.language,
silent=True)
return ExternalLibraryHolder(lib, self.subproject)
@FeatureNewKwargs('compiler.find_library', '0.51.0', ['static'])
@FeatureNewKwargs('compiler.find_library', '0.50.0', ['has_headers'])
@FeatureNewKwargs('compiler.find_library', '0.49.0', ['disabler'])
@disablerIfNotFound
@permittedKwargs(find_library_permitted_kwargs)
def find_library_method(self, args, kwargs):
# TODO add dependencies support?
if len(args) != 1:
raise InterpreterException('find_library method takes one argument.')
libname = args[0]
if not isinstance(libname, str):
raise InterpreterException('Library name not a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Library', mlog.bold(libname), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_library(libname)
has_header_kwargs = {k[7:]: v for k, v in kwargs.items() if k.startswith('header_')}
has_header_kwargs['required'] = required
headers = mesonlib.stringlistify(kwargs.get('has_headers', []))
for h in headers:
if not self.has_header_method([h], has_header_kwargs):
return self.notfound_library(libname)
search_dirs = extract_search_dirs(kwargs)
libtype = mesonlib.LibType.PREFER_SHARED
if 'static' in kwargs:
if not isinstance(kwargs['static'], bool):
raise InterpreterException('static must be a boolean')
libtype = mesonlib.LibType.STATIC if kwargs['static'] else mesonlib.LibType.SHARED
linkargs = self.compiler.find_library(libname, self.environment, search_dirs, libtype)
if required and not linkargs:
raise InterpreterException(
'{} library {!r} not found'.format(self.compiler.get_display_language(), libname))
lib = dependencies.ExternalLibrary(libname, linkargs, self.environment,
self.compiler.language)
return ExternalLibraryHolder(lib, self.subproject)
@permittedKwargs({})
def has_argument_method(self, args: T.Sequence[str], kwargs) -> bool:
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_argument takes exactly one argument.')
return self.has_multi_arguments_method(args, kwargs)
@permittedKwargs({})
def has_multi_arguments_method(self, args: T.Sequence[str], kwargs: dict):
args = mesonlib.stringlistify(args)
result, cached = self.compiler.has_multi_arguments(args, self.environment)
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log(
'Compiler for {} supports arguments {}:'.format(
self.compiler.get_display_language(), ' '.join(args)),
h, cached)
return result
@FeatureNew('compiler.get_supported_arguments', '0.43.0')
@permittedKwargs({})
def get_supported_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
supported_args = []
for arg in args:
if self.has_argument_method(arg, kwargs):
supported_args.append(arg)
return supported_args
@permittedKwargs({})
def first_supported_argument_method(self, args: T.Sequence[str], kwargs: dict) -> T.List[str]:
for arg in mesonlib.stringlistify(args):
if self.has_argument_method(arg, kwargs):
mlog.log('First supported argument:', mlog.bold(arg))
return [arg]
mlog.log('First supported argument:', mlog.red('None'))
return []
@FeatureNew('compiler.has_link_argument', '0.46.0')
@permittedKwargs({})
def has_link_argument_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_link_argument takes exactly one argument.')
return self.has_multi_link_arguments_method(args, kwargs)
@FeatureNew('compiler.has_multi_link_argument', '0.46.0')
@permittedKwargs({})
def has_multi_link_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
result, cached = self.compiler.has_multi_link_arguments(args, self.environment)
cached = mlog.blue('(cached)') if cached else ''
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log(
'Compiler for {} supports link arguments {}:'.format(
self.compiler.get_display_language(), ' '.join(args)),
h, cached)
return result
@FeatureNew('compiler.get_supported_link_arguments_method', '0.46.0')
@permittedKwargs({})
def get_supported_link_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
supported_args = []
for arg in args:
if self.has_link_argument_method(arg, kwargs):
supported_args.append(arg)
return supported_args
@FeatureNew('compiler.first_supported_link_argument_method', '0.46.0')
@permittedKwargs({})
def first_supported_link_argument_method(self, args, kwargs):
for i in mesonlib.stringlistify(args):
if self.has_link_argument_method(i, kwargs):
mlog.log('First supported link argument:', mlog.bold(i))
return [i]
mlog.log('First supported link argument:', mlog.red('None'))
return []
@FeatureNew('compiler.has_function_attribute', '0.48.0')
@permittedKwargs({})
def has_func_attribute_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_func_attribute takes exactly one argument.')
result, cached = self.compiler.has_func_attribute(args[0], self.environment)
cached = mlog.blue('(cached)') if cached else ''
h = mlog.green('YES') if result else mlog.red('NO')
mlog.log('Compiler for {} supports function attribute {}:'.format(self.compiler.get_display_language(), args[0]), h, cached)
return result
@FeatureNew('compiler.get_supported_function_attributes', '0.48.0')
@permittedKwargs({})
def get_supported_function_attributes_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
return [a for a in args if self.has_func_attribute_method(a, kwargs)]
@FeatureNew('compiler.get_argument_syntax_method', '0.49.0')
@noPosargs
@noKwargs
def get_argument_syntax_method(self, args, kwargs):
return self.compiler.get_argument_syntax()
ModuleState = collections.namedtuple('ModuleState', [
'source_root', 'build_to_src', 'subproject', 'subdir', 'current_lineno', 'environment',
'project_name', 'project_version', 'backend', 'targets',
'data', 'headers', 'man', 'global_args', 'project_args', 'build_machine',
'host_machine', 'target_machine', 'current_node'])
class ModuleHolder(InterpreterObject, ObjectHolder):
def __init__(self, modname, module, interpreter):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, module)
self.modname = modname
self.interpreter = interpreter
def method_call(self, method_name, args, kwargs):
try:
fn = getattr(self.held_object, method_name)
except AttributeError:
raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name))
if method_name.startswith('_'):
raise InvalidArguments('Function {!r} in module {!r} is private.'.format(method_name, self.modname))
if not getattr(fn, 'no-args-flattening', False):
args = flatten(args)
# This is not 100% reliable but we can't use hash()
# because the Build object contains dicts and lists.
num_targets = len(self.interpreter.build.targets)
state = ModuleState(
source_root = self.interpreter.environment.get_source_dir(),
build_to_src=mesonlib.relpath(self.interpreter.environment.get_source_dir(),
self.interpreter.environment.get_build_dir()),
subproject=self.interpreter.subproject,
subdir=self.interpreter.subdir,
current_lineno=self.interpreter.current_lineno,
environment=self.interpreter.environment,
project_name=self.interpreter.build.project_name,
project_version=self.interpreter.build.dep_manifest[self.interpreter.active_projectname],
# The backend object is under-used right now, but we will need it:
# https://github.com/mesonbuild/meson/issues/1419
backend=self.interpreter.backend,
targets=self.interpreter.build.targets,
data=self.interpreter.build.data,
headers=self.interpreter.build.get_headers(),
man=self.interpreter.build.get_man(),
#global_args_for_build = self.interpreter.build.global_args.build,
global_args = self.interpreter.build.global_args.host,
#project_args_for_build = self.interpreter.build.projects_args.build.get(self.interpreter.subproject, {}),
project_args = self.interpreter.build.projects_args.host.get(self.interpreter.subproject, {}),
build_machine=self.interpreter.builtin['build_machine'].held_object,
host_machine=self.interpreter.builtin['host_machine'].held_object,
target_machine=self.interpreter.builtin['target_machine'].held_object,
current_node=self.current_node
)
# Many modules do for example self.interpreter.find_program_impl(),
# so we have to ensure they use the current interpreter and not the one
# that first imported that module, otherwise it will use outdated
# overrides.
self.held_object.interpreter = self.interpreter
if self.held_object.is_snippet(method_name):
value = fn(self.interpreter, state, args, kwargs)
return self.interpreter.holderify(value)
else:
value = fn(state, args, kwargs)
if num_targets != len(self.interpreter.build.targets):
raise InterpreterException('Extension module altered internal state illegally.')
return self.interpreter.module_method_callback(value)
class Summary:
def __init__(self, project_name, project_version):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section, values, kwargs):
bool_yn = kwargs.get('bool_yn', False)
if not isinstance(bool_yn, bool):
raise InterpreterException('bool_yn keyword argument must be boolean')
list_sep = kwargs.get('list_sep')
if list_sep is not None and not isinstance(list_sep, str):
raise InterpreterException('list_sep keyword argument must be string')
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException('Summary section {!r} already have key {!r}'.format(section, k))
formatted_values = []
for i in listify(v):
if not isinstance(i, (str, int)):
m = 'Summary value in section {!r}, key {!r}, must be string, integer or boolean'
raise InterpreterException(m.format(section, k))
if bool_yn and isinstance(i, bool):
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
else:
formatted_values.append(i)
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('') # newline
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
indent = self.max_key_len - len(k) + 3
end = ' ' if v else ''
mlog.log(' ' * indent, k + ':', end=end)
if list_sep is None:
indent = self.max_key_len + 6
list_sep = '\n' + ' ' * indent
mlog.log(*v, sep=list_sep)
mlog.log('') # newline
class MesonMain(InterpreterObject):
def __init__(self, build, interpreter):
InterpreterObject.__init__(self)
self.build = build
self.interpreter = interpreter
self._found_source_scripts = {}
self.methods.update({'get_compiler': self.get_compiler_method,
'is_cross_build': self.is_cross_build_method,
'has_exe_wrapper': self.has_exe_wrapper_method,
'is_unity': self.is_unity_method,
'is_subproject': self.is_subproject_method,
'current_source_dir': self.current_source_dir_method,
'current_build_dir': self.current_build_dir_method,
'source_root': self.source_root_method,
'build_root': self.build_root_method,
'add_install_script': self.add_install_script_method,
'add_postconf_script': self.add_postconf_script_method,
'add_dist_script': self.add_dist_script_method,
'install_dependency_manifest': self.install_dependency_manifest_method,
'override_dependency': self.override_dependency_method,
'override_find_program': self.override_find_program_method,
'project_version': self.project_version_method,
'project_license': self.project_license_method,
'version': self.version_method,
'project_name': self.project_name_method,
'get_cross_property': self.get_cross_property_method,
'get_external_property': self.get_external_property_method,
'backend': self.backend_method,
})
def _find_source_script(self, prog: T.Union[str, ExecutableHolder], args):
if isinstance(prog, ExecutableHolder):
prog_path = self.interpreter.backend.get_target_filename(prog.held_object)
return build.RunScript([prog_path], args)
elif isinstance(prog, ExternalProgramHolder):
return build.RunScript(prog.get_command(), args)
# Prefer scripts in the current source directory
search_dir = os.path.join(self.interpreter.environment.source_dir,
self.interpreter.subdir)
key = (prog, search_dir)
if key in self._found_source_scripts:
found = self._found_source_scripts[key]
else:
found = dependencies.ExternalProgram(prog, search_dir=search_dir)
if found.found():
self._found_source_scripts[key] = found
else:
m = 'Script or command {!r} not found or not executable'
raise InterpreterException(m.format(prog))
return build.RunScript(found.get_command(), args)
def _process_script_args(
self, name: str, args: T.List[T.Union[
str, mesonlib.File, CustomTargetHolder,
CustomTargetIndexHolder, ConfigureFileHolder,
ExternalProgramHolder, ExecutableHolder,
]], allow_built: bool = False) -> T.List[str]:
script_args = [] # T.List[str]
new = False
for a in args:
a = unholder(a)
if isinstance(a, str):
script_args.append(a)
elif isinstance(a, mesonlib.File):
new = True
script_args.append(a.rel_to_builddir(self.interpreter.environment.source_dir))
elif isinstance(a, (build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)):
if not allow_built:
raise InterpreterException('Arguments to {} cannot be built'.format(name))
new = True
script_args.extend([os.path.join(a.get_subdir(), o) for o in a.get_outputs()])
# This feels really hacky, but I'm not sure how else to fix
# this without completely rewriting install script handling.
# This is complicated by the fact that the install target
# depends on all.
if isinstance(a, build.CustomTargetIndex):
a.target.build_by_default = True
else:
a.build_by_default = True
elif isinstance(a, build.ConfigureFile):
new = True
script_args.append(os.path.join(a.subdir, a.targetname))
elif isinstance(a, dependencies.ExternalProgram):
script_args.extend(a.command)
new = True
else:
raise InterpreterException(
'Arguments to {} must be strings, Files, CustomTargets, '
'Indexes of CustomTargets, or ConfigureFiles'.format(name))
if new:
FeatureNew('Calling "{}" with File, CustomTaget, Index of CustomTarget, ConfigureFile, Executable, or ExternalProgram'.format(name), '0.55.0').use(
self.interpreter.subproject)
return script_args
@permittedKwargs(set())
def add_install_script_method(self, args: 'T.Tuple[T.Union[str, ExecutableHolder], T.Union[str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder], ...]', kwargs):
if len(args) < 1:
raise InterpreterException('add_install_script takes one or more arguments')
script_args = self._process_script_args('add_install_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.install_scripts.append(script)
@permittedKwargs(set())
def add_postconf_script_method(self, args, kwargs):
if len(args) < 1:
raise InterpreterException('add_postconf_script takes one or more arguments')
script_args = self._process_script_args('add_postconf_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.postconf_scripts.append(script)
@permittedKwargs(set())
def add_dist_script_method(self, args, kwargs):
if len(args) < 1:
raise InterpreterException('add_dist_script takes one or more arguments')
if len(args) > 1:
FeatureNew('Calling "add_dist_script" with multiple arguments', '0.49.0').use(self.interpreter.subproject)
if self.interpreter.subproject != '':
raise InterpreterException('add_dist_script may not be used in a subproject.')
script_args = self._process_script_args('add_dist_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.dist_scripts.append(script)
@noPosargs
@permittedKwargs({})
def current_source_dir_method(self, args, kwargs):
src = self.interpreter.environment.source_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
@noPosargs
@permittedKwargs({})
def current_build_dir_method(self, args, kwargs):
src = self.interpreter.environment.build_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
@noPosargs
@permittedKwargs({})
def backend_method(self, args, kwargs):
return self.interpreter.backend.name
@noPosargs
@permittedKwargs({})
def source_root_method(self, args, kwargs):
return self.interpreter.environment.source_dir
@noPosargs
@permittedKwargs({})
def build_root_method(self, args, kwargs):
return self.interpreter.environment.build_dir
@noPosargs
@permittedKwargs({})
def has_exe_wrapper_method(self, args, kwargs):
if self.is_cross_build_method(None, None) and \
self.build.environment.need_exe_wrapper():
if self.build.environment.exe_wrapper is None:
return False
# We return True when exe_wrap is defined, when it's not needed, and
# when we're compiling natively. The last two are semantically confusing.
# Need to revisit this.
return True
@noPosargs
@permittedKwargs({})
def is_cross_build_method(self, args, kwargs):
return self.build.environment.is_cross_build()
@permittedKwargs({'native'})
def get_compiler_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_compiler_method must have one and only one argument.')
cname = args[0]
for_machine = Interpreter.machine_from_native_kwarg(kwargs)
clist = self.interpreter.coredata.compilers[for_machine]
if cname in clist:
return CompilerHolder(clist[cname], self.build.environment, self.interpreter.subproject)
raise InterpreterException('Tried to access compiler for unspecified language "%s".' % cname)
@noPosargs
@permittedKwargs({})
def is_unity_method(self, args, kwargs):
optval = self.interpreter.environment.coredata.get_builtin_option('unity')
if optval == 'on' or (optval == 'subprojects' and self.interpreter.is_subproject()):
return True
return False
@noPosargs
@permittedKwargs({})
def is_subproject_method(self, args, kwargs):
return self.interpreter.is_subproject()
@permittedKwargs({})
def install_dependency_manifest_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Must specify manifest install file name')
if not isinstance(args[0], str):
raise InterpreterException('Argument must be a string.')
self.build.dep_manifest_name = args[0]
@FeatureNew('meson.override_find_program', '0.46.0')
@permittedKwargs({})
def override_find_program_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Override needs two arguments')
name, exe = args
if not isinstance(name, str):
raise InterpreterException('First argument must be a string')
if hasattr(exe, 'held_object'):
exe = exe.held_object
if isinstance(exe, mesonlib.File):
abspath = exe.absolute_path(self.interpreter.environment.source_dir,
self.interpreter.environment.build_dir)
if not os.path.exists(abspath):
raise InterpreterException('Tried to override %s with a file that does not exist.' % name)
exe = OverrideProgram(abspath)
if not isinstance(exe, (dependencies.ExternalProgram, build.Executable)):
raise InterpreterException('Second argument must be an external program or executable.')
self.interpreter.add_find_program_override(name, exe)
@FeatureNew('meson.override_dependency', '0.54.0')
@permittedKwargs({'native'})
def override_dependency_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Override needs two arguments')
name = args[0]
dep = args[1]
if not isinstance(name, str) or not name:
raise InterpreterException('First argument must be a string and cannot be empty')
if hasattr(dep, 'held_object'):
dep = dep.held_object
if not isinstance(dep, dependencies.Dependency):
raise InterpreterException('Second argument must be a dependency object')
identifier = dependencies.get_dep_identifier(name, kwargs)
for_machine = self.interpreter.machine_from_native_kwarg(kwargs)
override = self.build.dependency_overrides[for_machine].get(identifier)
if override:
m = 'Tried to override dependency {!r} which has already been resolved or overridden at {}'
location = mlog.get_error_location_string(override.node.filename, override.node.lineno)
raise InterpreterException(m.format(name, location))
self.build.dependency_overrides[for_machine][identifier] = \
build.DependencyOverride(dep, self.interpreter.current_node)
@noPosargs
@permittedKwargs({})
def project_version_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]['version']
@FeatureNew('meson.project_license()', '0.45.0')
@noPosargs
@permittedKwargs({})
def project_license_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]['license']
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return coredata.version
@noPosargs
@permittedKwargs({})
def project_name_method(self, args, kwargs):
return self.interpreter.active_projectname
@noArgsFlattening
@permittedKwargs({})
def get_cross_property_method(self, args, kwargs) -> str:
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Must have one or two arguments.')
propname = args[0]
if not isinstance(propname, str):
raise InterpreterException('Property name must be string.')
try:
props = self.interpreter.environment.properties.host
return props[propname]
except Exception:
if len(args) == 2:
return args[1]
raise InterpreterException('Unknown cross property: %s.' % propname)
@noArgsFlattening
@permittedKwargs({'native'})
@FeatureNew('meson.get_external_property', '0.54.0')
def get_external_property_method(self, args: T.Sequence[str], kwargs: dict) -> str:
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Must have one or two positional arguments.')
propname = args[0]
if not isinstance(propname, str):
raise InterpreterException('Property name must be string.')
def _get_native() -> str:
try:
props = self.interpreter.environment.properties.build
return props[propname]
except Exception:
if len(args) == 2:
return args[1]
raise InterpreterException('Unknown native property: %s.' % propname)
if 'native' in kwargs:
if kwargs['native']:
return _get_native()
else:
return self.get_cross_property_method(args, {})
else: # native: not specified
if self.build.environment.is_cross_build():
return self.get_cross_property_method(args, kwargs)
else:
return _get_native()
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
_base_test_args = {'args', 'depends', 'env', 'should_fail', 'timeout', 'workdir', 'suite', 'priority', 'protocol'}
permitted_kwargs = {'add_global_arguments': {'language', 'native'},
'add_global_link_arguments': {'language', 'native'},
'add_languages': {'required', 'native'},
'add_project_link_arguments': {'language', 'native'},
'add_project_arguments': {'language', 'native'},
'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'},
'benchmark': _base_test_args,
'build_target': known_build_target_kwargs,
'configure_file': {'input',
'output',
'configuration',
'command',
'copy',
'depfile',
'install_dir',
'install_mode',
'capture',
'install',
'format',
'output_format',
'encoding'},
'custom_target': {'input',
'output',
'command',
'install',
'install_dir',
'install_mode',
'build_always',
'capture',
'depends',
'depend_files',
'depfile',
'build_by_default',
'build_always_stale',
'console'},
'dependency': {'default_options',
'embed',
'fallback',
'language',
'main',
'method',
'modules',
'components',
'cmake_module_path',
'optional_modules',
'native',
'not_found_message',
'required',
'static',
'version',
'private_headers',
'cmake_args',
'include_type',
},
'declare_dependency': {'include_directories',
'link_with',
'sources',
'dependencies',
'compile_args',
'link_args',
'link_whole',
'version',
'variables',
},
'executable': build.known_exe_kwargs,
'find_program': {'required', 'native', 'version', 'dirs'},
'generator': {'arguments',
'output',
'depends',
'depfile',
'capture',
'preserve_path_from'},
'include_directories': {'is_system'},
'install_data': {'install_dir', 'install_mode', 'rename', 'sources'},
'install_headers': {'install_dir', 'install_mode', 'subdir'},
'install_man': {'install_dir', 'install_mode'},
'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'},
'jar': build.known_jar_kwargs,
'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'},
'run_command': {'check', 'capture', 'env'},
'run_target': {'command', 'depends'},
'shared_library': build.known_shlib_kwargs,
'shared_module': build.known_shmod_kwargs,
'static_library': build.known_stlib_kwargs,
'both_libraries': known_library_kwargs,
'library': known_library_kwargs,
'subdir': {'if_found'},
'subproject': {'version', 'default_options', 'required'},
'test': set.union(_base_test_args, {'is_parallel'}),
'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'},
}
class Interpreter(InterpreterBase):
def __init__(self, build, backend=None, subproject='', subdir='', subproject_dir='subprojects',
modules = None, default_project_options=None, mock=False, ast=None):
super().__init__(build.environment.get_source_dir(), subdir, subproject)
self.an_unpicklable_object = mesonlib.an_unpicklable_object
self.build = build
self.environment = build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary = {}
if modules is None:
self.modules = {}
else:
self.modules = modules
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(build, self)})
self.generators = []
self.visited_subdirs = {}
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects = {}
self.subproject_stack = []
self.configure_file_outputs = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options = {}
self.build_func_dict()
# build_def_files needs to be defined before parse_project is called
self.build_def_files = [os.path.join(self.subdir, environment.build_filename)]
if not mock:
self.parse_project()
self._redetect_machines()
def _redetect_machines(self):
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
MachineHolder(self.build.environment.machines.build)
self.builtin['host_machine'] = \
MachineHolder(self.build.environment.machines.host)
self.builtin['target_machine'] = \
MachineHolder(self.build.environment.machines.target)
def get_non_matching_default_options(self):
env = self.environment
for def_opt_name, def_opt_value in self.project_default_options.items():
for opts in env.coredata.get_all_options():
cur_opt_value = opts.get(def_opt_name)
if cur_opt_value is not None:
def_opt_value = env.coredata.validate_option_value(def_opt_name, def_opt_value)
if def_opt_value != cur_opt_value.value:
yield (def_opt_name, def_opt_value, cur_opt_value)
def build_func_dict(self):
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_project_arguments': self.func_add_project_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'add_languages': self.func_add_languages,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'generator': self.func_generator,
'gettext': self.func_gettext,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'include_directories': self.func_include_directories,
'import': self.func_import,
'install_data': self.func_install_data,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'warning': self.func_warning,
'option': self.func_option,
'project': self.func_project,
'run_target': self.func_run_target,
'run_command': self.func_run_command,
'set_variable': self.func_set_variable,
'subdir': self.func_subdir,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'both_libraries': self.func_both_lib,
'test': self.func_test,
'vcs_tag': self.func_vcs_tag
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def holderify(self, item):
if isinstance(item, list):
return [self.holderify(x) for x in item]
if isinstance(item, dict):
return {k: self.holderify(v) for k, v in item.items()}
if isinstance(item, build.CustomTarget):
return CustomTargetHolder(item, self)
elif isinstance(item, (int, str, bool, Disabler)) or item is None:
return item
elif isinstance(item, build.Executable):
return ExecutableHolder(item, self)
elif isinstance(item, build.GeneratedList):
return GeneratedListHolder(item)
elif isinstance(item, build.RunTarget):
raise RuntimeError('This is not a pipe.')
elif isinstance(item, build.RunScript):
raise RuntimeError('Do not do this.')
elif isinstance(item, build.Data):
return DataHolder(item)
elif isinstance(item, dependencies.Dependency):
return DependencyHolder(item, self.subproject)
elif isinstance(item, dependencies.ExternalProgram):
return ExternalProgramHolder(item, self.subproject)
elif hasattr(item, 'held_object'):
return item
else:
raise InterpreterException('Module returned a value of unknown type.')
def process_new_values(self, invalues):
invalues = listify(invalues)
for v in invalues:
if isinstance(v, (RunTargetHolder, CustomTargetHolder, BuildTargetHolder)):
v = v.held_object
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.module_method_callback(v)
elif isinstance(v, build.GeneratedList):
pass
elif isinstance(v, build.RunScript):
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, dependencies.ExternalProgram):
return ExternalProgramHolder(v, self.subproject)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif hasattr(v, 'held_object'):
pass
elif isinstance(v, (int, str, bool, Disabler)):
pass
else:
raise InterpreterException('Module returned a value of unknown type.')
def module_method_callback(self, return_object):
if not isinstance(return_object, ModuleReturnValue):
raise InterpreterException('Bug in module, it returned an invalid object')
invalues = return_object.new_objects
self.process_new_values(invalues)
return self.holderify(return_object.return_value)
def get_build_def_files(self):
return self.build_def_files
def add_build_def_file(self, f):
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
f = Path(f).resolve()
if builddir in f.parents:
return
if srcdir in f.parents:
f = f.relative_to(srcdir)
f = str(f)
else:
return
if f not in self.build_def_files:
self.build_def_files.append(f)
def get_variables(self):
return self.variables
def check_stdlibs(self):
for for_machine in MachineChoice:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
if len(di) != 2:
raise InterpreterException('Stdlib definition for %s should have exactly two elements.'
% l)
projname, depname = di
subproj = self.do_subproject(projname, 'meson', {})
self.build.stdlibs.host[l] = subproj.get_variable_method([depname], {})
except KeyError:
pass
except InvalidArguments:
pass
@stringArgs
@noKwargs
def func_import(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Import takes one argument.')
modname = args[0]
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
mlog.warning('Module %s has no backwards or forwards compatibility and might not exist in future releases.' % modname, location=node)
modname = 'unstable_' + plainname
if modname not in self.modules:
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
raise InvalidArguments('Module "%s" does not exist' % (modname, ))
self.modules[modname] = module.initialize(self)
return ModuleHolder(modname, self.modules[modname], self)
@stringArgs
@noKwargs
def func_files(self, node, args, kwargs):
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args]
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@permittedKwargs(permitted_kwargs['declare_dependency'])
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = unholder(extract_as_list(kwargs, 'link_with'))
libs_whole = unholder(extract_as_list(kwargs, 'link_whole'))
sources = extract_as_list(kwargs, 'sources')
sources = unholder(listify(self.source_strings_to_files(sources)))
deps = unholder(extract_as_list(kwargs, 'dependencies'))
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = kwargs.get('variables', {})
if not isinstance(variables, dict):
raise InterpreterException('variables must be a dict.')
if not all(isinstance(v, str) for v in variables.values()):
# Because that is how they will come from pkg-config and cmake
raise InterpreterException('variables values be strings.')
final_deps = []
for d in deps:
try:
d = d.held_object
except Exception:
pass
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables)
return DependencyHolder(dep, self.subproject)
@noKwargs
def func_assert(self, node, args, kwargs):
if len(args) == 1:
FeatureNew('assert function without message argument', '0.53.0').use(self.subproject)
value = args[0]
message = None
elif len(args) == 2:
value, message = args
if not isinstance(message, str):
raise InterpreterException('Assert message not a string.')
else:
raise InterpreterException('Assert takes between one and two arguments')
if not isinstance(value, bool):
raise InterpreterException('Assert value not bool.')
if not value:
if message is None:
from .ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments('Expected %d arguments, got %d.' %
(argcount, len(args)))
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
@FeatureNewKwargs('run_command', '0.50.0', ['env'])
@FeatureNewKwargs('run_command', '0.47.0', ['check', 'capture'])
@permittedKwargs(permitted_kwargs['run_command'])
def func_run_command(self, node, args, kwargs):
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self, node, args, kwargs, in_builddir=False):
if len(args) < 1:
raise InterpreterException('Not enough arguments')
cmd, *cargs = args
capture = kwargs.get('capture', True)
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs.get('check', False)
if not isinstance(check, bool):
raise InterpreterException('Check must be boolean.')
env = self.unpack_env_kwarg(kwargs)
m = 'must be a string, or the output of find_program(), files() '\
'or configure_file(), or a compiler object; not {!r}'
expanded_args = []
if isinstance(cmd, ExternalProgramHolder):
cmd = cmd.held_object
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
msg = 'Program {!r} was overridden with the compiled executable {!r}'\
' and therefore cannot be used during configuration'
raise InterpreterException(msg.format(progname, cmd.description()))
if not cmd.found():
raise InterpreterException('command {!r} not found or not executable'.format(cmd.get_name()))
elif isinstance(cmd, CompilerHolder):
exelist = cmd.compiler.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException('Program {!r} not found '
'or not executable'.format(cmd))
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
elif not isinstance(cmd, str):
raise InterpreterException('First argument ' + m.format(cmd))
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException('Program or command {!r} not found '
'or not executable'.format(cmd))
cmd = prog
for a in listify(cargs):
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgramHolder):
expanded_args.append(a.held_object.get_path())
else:
raise InterpreterException('Arguments ' + m.format(a))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
@stringArgs
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@FeatureNewKwargs('subproject', '0.38.0', ['default_options'])
@permittedKwargs(permitted_kwargs['subproject'])
@stringArgs
def func_subproject(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Subproject takes exactly one argument')
dirname = args[0]
return self.do_subproject(dirname, 'meson', kwargs)
def disabled_subproject(self, dirname, disabled_feature=None, exception=None):
sub = SubprojectHolder(None, self.subproject_dir, dirname,
disabled_feature=disabled_feature, exception=exception)
self.subprojects[dirname] = sub
return sub
def do_subproject(self, dirname: str, method: str, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(dirname), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(dirname, disabled_feature=feature)
default_options = mesonlib.stringlistify(kwargs.get('default_options', []))
default_options = coredata.create_options_dict(default_options)
if dirname == '':
raise InterpreterException('Subproject dir name must not be empty.')
if dirname[0] == '.':
raise InterpreterException('Subproject dir name must not start with a period.')
if '..' in dirname:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(dirname):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(dirname):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if dirname in self.subproject_stack:
fullstack = self.subproject_stack + [dirname]
incpath = ' => '.join(fullstack)
raise InvalidCode('Recursive include of subprojects: %s.' % incpath)
if dirname in self.subprojects:
subproject = self.subprojects[dirname]
if required and not subproject.found():
raise InterpreterException('Subproject "%s/%s" required but not found.' % (
self.subproject_dir, dirname))
return subproject
subproject_dir_abs = os.path.join(self.environment.get_source_dir(), self.subproject_dir)
r = wrap.Resolver(subproject_dir_abs, self.coredata.get_builtin_option('wrap_mode'))
try:
resolved = r.resolve(dirname, method)
except wrap.WrapException as e:
subprojdir = os.path.join(self.subproject_dir, r.directory)
if isinstance(e, wrap.WrapNotFoundException):
# if the reason subproject execution failed was because
# the directory doesn't exist, try to give some helpful
# advice if it's a nested subproject that needs
# promotion...
self.print_nested_info(dirname)
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subprojdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(dirname, exception=e)
raise e
subdir = os.path.join(self.subproject_dir, resolved)
subdir_abs = os.path.join(subproject_dir_abs, resolved)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
mlog.log()
with mlog.nested():
mlog.log('Executing subproject', mlog.bold(dirname), 'method', mlog.bold(method), '\n')
try:
if method == 'meson':
return self._do_subproject_meson(dirname, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(dirname, subdir, subdir_abs, default_options, kwargs)
else:
raise InterpreterException('The method {} is invalid for the subproject {}'.format(method, dirname))
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested():
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(dirname), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(dirname, exception=e)
raise e
def _do_subproject_meson(self, dirname, subdir, default_options, kwargs, ast=None, build_def_files=None):
with mlog.nested():
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, dirname, subdir, self.subproject_dir,
self.modules, default_options, ast=ast)
subi.subprojects = self.subprojects
subi.subproject_stack = self.subproject_stack + [dirname]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(dirname), 'finished.')
mlog.log()
if 'version' in kwargs:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException('Subproject %s version is %s but %s required.' % (dirname, pv, wanted))
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[dirname] = SubprojectHolder(subi, self.subproject_dir, dirname,
warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files = list(set(self.build_def_files + build_def_files))
else:
self.build_def_files = list(set(self.build_def_files + subi.build_def_files))
self.build.merge(subi.build)
self.build.subprojects[dirname] = subi.project_version
self.summary.update(subi.summary)
return self.subprojects[dirname]
def _do_subproject_cmake(self, dirname, subdir, subdir_abs, default_options, kwargs):
with mlog.nested():
new_build = self.build.copy()
prefix = self.coredata.builtins['prefix'].value
cmake_options = mesonlib.stringlistify(kwargs.get('cmake_options', []))
cm_int = CMakeInterpreter(new_build, subdir, subdir_abs, prefix, new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson()
mlog.log()
with mlog.nested():
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from .ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w") as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(dirname, subdir, default_options, kwargs, ast, cm_int.bs_files)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname):
raw_optname = optname
if self.is_subproject():
optname = self.subproject + ':' + optname
for opts in [
self.coredata.base_options, compilers.base_options, self.coredata.builtins,
dict(self.coredata.get_prefixed_options_per_machine(self.coredata.builtins_per_machine)),
dict(self.coredata.flatten_lang_iterator(
self.coredata.get_prefixed_options_per_machine(self.coredata.compiler_options))),
]:
v = opts.get(optname)
if v is None or v.yielding:
v = opts.get(raw_optname)
if v is not None:
return v
try:
opt = self.coredata.user_options[optname]
if opt.yielding and ':' in optname and raw_optname in self.coredata.user_options:
popt = self.coredata.user_options[raw_optname]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(raw_optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException('Tried to access unknown option "%s".' % optname)
@stringArgs
@noKwargs
def func_get_option(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Argument required for get_option.')
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
return FeatureOptionHolder(self.environment, optname, opt)
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@noKwargs
def func_configuration_data(self, node, args, kwargs):
if len(args) > 1:
raise InterpreterException('configuration_data takes only one optional positional arguments')
elif len(args) == 1:
FeatureNew('configuration_data dictionary', '0.49.0').use(self.subproject)
initial_values = args[0]
if not isinstance(initial_values, dict):
raise InterpreterException('configuration_data first argument must be a dictionary')
else:
initial_values = {}
return ConfigurationDataHolder(self.subproject, initial_values)
def set_backend(self):
# The backend is already set when parsing subprojects
if self.backend is not None:
return
backend = self.coredata.get_builtin_option('backend')
from .backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException('Unknown backend "%s".' % backend)
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_builtin_option('backend', self.backend.name)
# Only init backend options on first invocation otherwise it would
# override values previously set from command line.
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.cmd_line_options.items() if k.startswith('backend_')}
self.coredata.set_options(options)
@stringArgs
@permittedKwargs(permitted_kwargs['project'])
def func_project(self, node, args, kwargs):
if len(args) < 1:
raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.')
proj_name, *proj_langs = args
if ':' in proj_name:
raise InvalidArguments("Project name {!r} must not contain ':'".format(proj_name))
if 'meson_version' in kwargs:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException('Meson version is %s but project requires %s' % (cv, pv))
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.merge_user_options(oi.options)
self.add_build_def_file(self.option_file)
# Do not set default_options on reconfigure otherwise it would override
# values previously set from command line. That means that changing
# default_options in a project will trigger a reconfigure but won't
# have any effect.
self.project_default_options = mesonlib.stringlistify(kwargs.get('default_options', []))
self.project_default_options = coredata.create_options_dict(self.project_default_options)
if self.environment.first_invocation:
default_options = self.project_default_options
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
self.project_version = kwargs.get('version', 'undefined')
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = mesonlib.stringlistify(kwargs.get('license', 'unknown'))
self.build.dep_manifest[proj_name] = {'version': self.project_version,
'license': proj_license}
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
if not self.is_subproject() and 'subproject_dir' in kwargs:
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
mesonlib.project_meson_versions[self.subproject] = ''
if 'meson_version' in kwargs:
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
self.add_languages(proj_langs, True, MachineChoice.BUILD)
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@FeatureNewKwargs('add_languages', '0.54.0', ['native'])
@permittedKwargs(permitted_kwargs['add_languages'])
@stringArgs
def func_add_languages(self, node, args, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
for lang in sorted(args, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if 'native' in kwargs:
return self.add_languages(args, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=self.current_node)
success = self.add_languages(args, False, MachineChoice.BUILD)
success &= self.add_languages(args, required, MachineChoice.HOST)
return success
def get_message_string_arg(self, arg):
if isinstance(arg, list):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, dict):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, str):
argstr = arg
elif isinstance(arg, int):
argstr = str(arg)
else:
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
return argstr
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew('message with more than one argument', '0.54.0').use(self.subproject)
args_str = [self.get_message_string_arg(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNewKwargs('summary', '0.54.0', ['list_sep'])
@permittedKwargs({'section', 'bool_yn', 'list_sep'})
@FeatureNew('summary', '0.53.0')
def func_summary(self, node, args, kwargs):
if len(args) == 1:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
elif len(args) == 2:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
else:
raise InterpreterException('Summary accepts at most 2 arguments.')
section = kwargs.get('section', '')
if not isinstance(section, str):
raise InterpreterException('Summary\'s section keyword argument must be string.')
self.summary_impl(section, values, kwargs)
def summary_impl(self, section, values, kwargs):
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(section, values, kwargs)
def _print_summary(self):
# Add automatic 'Supbrojects' section in main project.
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, 'Feature {!r} disabled'.format(subp.disabled_feature)]
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, '{} warnings'.format(subp.warnings)]
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
# Print all summaries, main project last.
mlog.log('') # newline
main_summary = self.summary.pop('', None)
for _, summary in sorted(self.summary.items()):
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew('warning with more than one argument', '0.54.0').use(self.subproject)
args_str = [self.get_message_string_arg(i) for i in args]
mlog.warning(*args_str, location=node)
@noKwargs
def func_error(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
raise InterpreterException('Problem encountered: ' + args[0])
@noKwargs
def func_exception(self, node, args, kwargs):
self.validate_arguments(args, 0, [])
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
if for_machine != MachineChoice.HOST:
return False
if not self.environment.is_cross_build():
return False
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
return should
def add_languages_for(self, args, required, for_machine: MachineChoice):
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
if 'vala' in langs:
if 'c' not in langs:
raise InterpreterException('Compiling Vala requires C. Add C to your project languages and rerun Meson.')
success = True
for lang in sorted(args, key=compilers.sort_clink):
lang = lang.lower()
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = self.environment.detect_compiler_for(lang, for_machine)
if comp is None:
raise InvalidArguments('Tried to use unknown language "%s".' % lang)
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine, prognames, silent):
for p in unholder(prognames):
if isinstance(p, mesonlib.File):
continue # Always points to a local (i.e. self generated) file.
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return ExternalProgramHolder(prog, self.subproject)
return None
def program_from_system(self, args, search_dirs, silent=False):
# Search for scripts relative to current subdir.
# Do not cache found programs because find_program('foobar')
# might give different results when run from different source dirs.
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments('find_program only accepts strings and '
'files, not {!r}'.format(exename))
extprog = dependencies.ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=silent)
progobj = ExternalProgramHolder(extprog, self.subproject)
if progobj.found():
return progobj
def program_from_overrides(self, command_names, silent=False):
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
if not silent:
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(overridden: %s)' % exe.description())
return ExternalProgramHolder(exe, self.subproject, self.backend)
return None
def store_name_lookups(self, command_names):
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name, exe):
if name in self.build.searched_programs:
raise InterpreterException('Tried to override finding of executable "%s" which has already been found.'
% name)
if name in self.build.find_overrides:
raise InterpreterException('Tried to override executable "%s" which has already been overridden.'
% name)
self.build.find_overrides[name] = exe
# TODO update modules to always pass `for_machine`. It is bad-form to assume
# the host machine.
def find_program_impl(self, args, for_machine: MachineChoice = MachineChoice.HOST,
required=True, silent=True, wanted='', search_dirs=None):
if not isinstance(args, list):
args = [args]
progobj = self.program_from_overrides(args, silent=silent)
if progobj is None:
progobj = self.program_from_file_for(for_machine, args, silent=silent)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, silent=silent)
if progobj is None and args[0].endswith('python3'):
prog = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = ExternalProgramHolder(prog, self.subproject)
if required and (progobj is None or not progobj.found()):
raise InvalidArguments('Program(s) {!r} not found or not executable'.format(args))
if progobj is None:
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
# Only store successful lookups
self.store_name_lookups(args)
if wanted:
version = progobj.get_version(self)
is_found, not_found, found = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'),
'found {!r} but need:'.format(version),
', '.join(["'{}'".format(e) for e in not_found]))
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InvalidArguments(m.format(progobj.get_name(), not_found, version))
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
return progobj
@FeatureNewKwargs('find_program', '0.53.0', ['dirs'])
@FeatureNewKwargs('find_program', '0.52.0', ['version'])
@FeatureNewKwargs('find_program', '0.49.0', ['disabler'])
@disablerIfNotFound
@permittedKwargs(permitted_kwargs['find_program'])
def func_find_program(self, node, args, kwargs):
if not args:
raise InterpreterException('No program name specified.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args)), 'skipped: feature', mlog.bold(feature), 'disabled')
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
search_dirs = extract_search_dirs(kwargs)
wanted = mesonlib.stringlistify(kwargs.get('version', []))
for_machine = self.machine_from_native_kwarg(kwargs)
return self.find_program_impl(args, for_machine, required=required,
silent=False, wanted=wanted,
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
def _find_cached_dep(self, name, display_name, kwargs):
# Check if we want this as a build-time / build machine or runt-time /
# host machine dep.
for_machine = self.machine_from_native_kwarg(kwargs)
identifier = dependencies.get_dep_identifier(name, kwargs)
wanted_vers = mesonlib.stringlistify(kwargs.get('version', []))
override = self.build.dependency_overrides[for_machine].get(identifier)
if override:
info = [mlog.blue('(overridden)' if override.explicit else '(cached)')]
cached_dep = override.dep
# We don't implicitly override not-found dependencies, but user could
# have explicitly called meson.override_dependency() with a not-found
# dep.
if not cached_dep.found():
mlog.log('Dependency', mlog.bold(display_name),
'found:', mlog.red('NO'), *info)
return identifier, cached_dep
found_vers = cached_dep.get_version()
if not self.check_version(wanted_vers, found_vers):
mlog.log('Dependency', mlog.bold(name),
'found:', mlog.red('NO'),
'found', mlog.normal_cyan(found_vers), 'but need:',
mlog.bold(', '.join(["'{}'".format(e) for e in wanted_vers])),
*info)
return identifier, NotFoundDependency(self.environment)
else:
info = [mlog.blue('(cached)')]
cached_dep = self.coredata.deps[for_machine].get(identifier)
if cached_dep:
found_vers = cached_dep.get_version()
if not self.check_version(wanted_vers, found_vers):
return identifier, None
if cached_dep:
if found_vers:
info = [mlog.normal_cyan(found_vers), *info]
mlog.log('Dependency', mlog.bold(display_name),
'found:', mlog.green('YES'), *info)
return identifier, cached_dep
return identifier, None
@staticmethod
def check_version(wanted, found):
if not wanted:
return True
if found == 'undefined' or not mesonlib.version_compare_many(found, wanted)[0]:
return False
return True
def notfound_dependency(self):
return DependencyHolder(NotFoundDependency(self.environment), self.subproject)
def verify_fallback_consistency(self, dirname, varname, cached_dep):
subi = self.subprojects.get(dirname)
if not cached_dep or not varname or not subi or not cached_dep.found():
return
dep = subi.get_variable_method([varname], {})
if dep.held_object != cached_dep:
m = 'Inconsistency: Subproject has overridden the dependency with another variable than {!r}'
raise DependencyException(m.format(varname))
def get_subproject_dep(self, name, display_name, dirname, varname, kwargs):
required = kwargs.get('required', True)
wanted = mesonlib.stringlistify(kwargs.get('version', []))
subproj_path = os.path.join(self.subproject_dir, dirname)
dep = self.notfound_dependency()
try:
subproject = self.subprojects[dirname]
_, cached_dep = self._find_cached_dep(name, display_name, kwargs)
if varname is None:
# Assuming the subproject overridden the dependency we want
if cached_dep:
if required and not cached_dep.found():
m = 'Dependency {!r} is not satisfied'
raise DependencyException(m.format(display_name))
return DependencyHolder(cached_dep, self.subproject)
else:
m = 'Subproject {} did not override dependency {}'
raise DependencyException(m.format(subproj_path, display_name))
if subproject.found():
self.verify_fallback_consistency(dirname, varname, cached_dep)
dep = self.subprojects[dirname].get_variable_method([varname], {})
except InvalidArguments:
pass
if not isinstance(dep, DependencyHolder):
raise InvalidCode('Fetched variable {!r} in the subproject {!r} is '
'not a dependency object.'.format(varname, dirname))
if not dep.found():
if required:
raise DependencyException('Could not find dependency {} in subproject {}'
''.format(varname, dirname))
# If the dependency is not required, don't raise an exception
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.red('NO'))
return dep
found = dep.held_object.get_version()
if not self.check_version(wanted, found):
if required:
raise DependencyException('Version {} of subproject dependency {} already '
'cached, requested incompatible version {} for '
'dep {}'.format(found, dirname, wanted, display_name))
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(found), 'but need:',
mlog.bold(', '.join(["'{}'".format(e) for e in wanted])))
return self.notfound_dependency()
found = mlog.normal_cyan(found) if found else None
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.green('YES'), found)
return dep
def _handle_featurenew_dependencies(self, name):
'Do a feature check on dependencies used by this subproject'
if name == 'mpi':
FeatureNew('MPI Dependency', '0.42.0').use(self.subproject)
elif name == 'pcap':
FeatureNew('Pcap Dependency', '0.42.0').use(self.subproject)
elif name == 'vulkan':
FeatureNew('Vulkan Dependency', '0.42.0').use(self.subproject)
elif name == 'libwmf':
FeatureNew('LibWMF Dependency', '0.44.0').use(self.subproject)
elif name == 'openmp':
FeatureNew('OpenMP Dependency', '0.46.0').use(self.subproject)
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_kwargs['dependency'])
def func_dependency(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
name = args[0]
display_name = name if name else '(anonymous)'
mods = extract_as_list(kwargs, 'modules')
if mods:
display_name += ' (modules: {})'.format(', '.join(str(i) for i in mods))
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = self.dependency_impl(name, display_name, kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
# Override this dependency to have consistent results in subsequent
# dependency lookups.
if name and d.found():
for_machine = self.machine_from_native_kwarg(kwargs)
identifier = dependencies.get_dep_identifier(name, kwargs)
if identifier not in self.build.dependency_overrides[for_machine]:
self.build.dependency_overrides[for_machine][identifier] = \
build.DependencyOverride(d.held_object, node, explicit=False)
return d
def dependency_impl(self, name, display_name, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_dependency()
has_fallback = 'fallback' in kwargs
if 'default_options' in kwargs and not has_fallback:
mlog.warning('The "default_options" keyworg argument does nothing without a "fallback" keyword argument.',
location=self.current_node)
# writing just "dependency('')" is an error, because it can only fail
if name == '' and required and not has_fallback:
raise InvalidArguments('Dependency is both required and not-found')
if '<' in name or '>' in name or '=' in name:
raise InvalidArguments('Characters <, > and = are forbidden in dependency names. To specify'
'version\n requirements use the \'version\' keyword argument instead.')
identifier, cached_dep = self._find_cached_dep(name, display_name, kwargs)
if cached_dep:
if has_fallback:
dirname, varname = self.get_subproject_infos(kwargs)
self.verify_fallback_consistency(dirname, varname, cached_dep)
if required and not cached_dep.found():
m = 'Dependency {!r} was already checked and was not found'
raise DependencyException(m.format(display_name))
return DependencyHolder(cached_dep, self.subproject)
# If the dependency has already been configured, possibly by
# a higher level project, try to use it first.
if has_fallback:
dirname, varname = self.get_subproject_infos(kwargs)
if dirname in self.subprojects:
return self.get_subproject_dep(name, display_name, dirname, varname, kwargs)
wrap_mode = self.coredata.get_builtin_option('wrap_mode')
forcefallback = wrap_mode == WrapMode.forcefallback and has_fallback
if name != '' and not forcefallback:
self._handle_featurenew_dependencies(name)
kwargs['required'] = required and not has_fallback
dep = dependencies.find_external_dependency(name, self.environment, kwargs)
kwargs['required'] = required
# Only store found-deps in the cache
# Never add fallback deps to self.coredata.deps since we
# cannot cache them. They must always be evaluated else
# we won't actually read all the build files.
if dep.found():
for_machine = self.machine_from_native_kwarg(kwargs)
self.coredata.deps[for_machine].put(identifier, dep)
return DependencyHolder(dep, self.subproject)
if has_fallback:
return self.dependency_fallback(name, display_name, kwargs)
return self.notfound_dependency()
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
def print_nested_info(self, dependency_name):
message = ['Dependency', mlog.bold(dependency_name), 'not found but it is available in a sub-subproject.\n' +
'To use it in the current project, promote it by going in the project source\n'
'root and issuing']
sprojs = mesonlib.detect_subprojects('subprojects', self.source_root)
if dependency_name not in sprojs:
return
found = sprojs[dependency_name]
if len(found) > 1:
message.append('one of the following commands:')
else:
message.append('the following command:')
command_templ = '\nmeson wrap promote {}'
for l in found:
message.append(mlog.bold(command_templ.format(l[len(self.source_root) + 1:])))
mlog.warning(*message, location=self.current_node)
def get_subproject_infos(self, kwargs):
fbinfo = mesonlib.stringlistify(kwargs['fallback'])
if len(fbinfo) == 1:
FeatureNew('Fallback without variable name', '0.53.0').use(self.subproject)
return fbinfo[0], None
elif len(fbinfo) != 2:
raise InterpreterException('Fallback info must have one or two items.')
return fbinfo
def dependency_fallback(self, name, display_name, kwargs):
required = kwargs.get('required', True)
if self.coredata.get_builtin_option('wrap_mode') == WrapMode.nofallback:
mlog.log('Not looking for a fallback subproject for the dependency',
mlog.bold(display_name), 'because:\nUse of fallback '
'dependencies is disabled.')
if required:
m = 'Dependency {!r} not found and fallback is disabled'
raise DependencyException(m.format(display_name))
return self.notfound_dependency()
elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.forcefallback:
mlog.log('Looking for a fallback subproject for the dependency',
mlog.bold(display_name), 'because:\nUse of fallback dependencies is forced.')
else:
mlog.log('Looking for a fallback subproject for the dependency',
mlog.bold(display_name))
dirname, varname = self.get_subproject_infos(kwargs)
sp_kwargs = {
'default_options': kwargs.get('default_options', []),
'required': required,
}
self.do_subproject(dirname, 'meson', sp_kwargs)
return self.get_subproject_dep(name, display_name, dirname, varname, kwargs)
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@permittedKwargs(permitted_kwargs['executable'])
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, ExecutableHolder)
@permittedKwargs(permitted_kwargs['static_library'])
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, StaticLibraryHolder)
@permittedKwargs(permitted_kwargs['shared_library'])
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, SharedLibraryHolder)
holder.held_object.shared_library_only = True
return holder
@permittedKwargs(permitted_kwargs['both_libraries'])
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(permitted_kwargs['shared_module'])
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedModuleHolder)
@permittedKwargs(permitted_kwargs['library'])
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(permitted_kwargs['jar'])
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, JarHolder)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(permitted_kwargs['build_target'])
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, ExecutableHolder)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, SharedLibraryHolder)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, SharedModuleHolder)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, StaticLibraryHolder)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, JarHolder)
else:
raise InterpreterException('Unknown target_type.')
@permittedKwargs(permitted_kwargs['vcs_tag'])
def func_vcs_tag(self, node, args, kwargs):
if 'input' not in kwargs or 'output' not in kwargs:
raise InterpreterException('Keyword arguments input and output must exist')
if 'fallback' not in kwargs:
FeatureNew('Optional fallback in vcs_tag', '0.41.0').use(self.subproject)
fallback = kwargs.pop('fallback', self.project_version)
if not isinstance(fallback, str):
raise InterpreterException('Keyword argument fallback must be a string.')
replace_string = kwargs.pop('replace_string', '@VCS_TAG@')
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs.get('command', None)
if vcs_cmd and not isinstance(vcs_cmd, list):
vcs_cmd = [vcs_cmd]
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
# Is the command an executable in path or maybe a script in the source tree?
vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0])
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
kwargs['command'] = self.environment.get_build_command() + \
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd
kwargs.setdefault('build_by_default', True)
kwargs.setdefault('build_always_stale', True)
return self._func_custom_target_impl(node, [kwargs['output']], kwargs)
@FeatureNew('subdir_done', '0.46.0')
@stringArgs
def func_subdir_done(self, node, args, kwargs):
if len(kwargs) > 0:
raise InterpreterException('exit does not take named arguments')
if len(args) > 0:
raise InterpreterException('exit does not take any arguments')
raise SubdirDoneRequest()
@stringArgs
@FeatureNewKwargs('custom_target', '0.48.0', ['console'])
@FeatureNewKwargs('custom_target', '0.47.0', ['install_mode', 'build_always_stale'])
@FeatureNewKwargs('custom_target', '0.40.0', ['build_by_default'])
@permittedKwargs(permitted_kwargs['custom_target'])
def func_custom_target(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name')
if 'depfile' in kwargs and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew('substitutions in custom_target depfile', '0.47.0').use(self.subproject)
return self._func_custom_target_impl(node, args, kwargs)
def _func_custom_target_impl(self, node, args, kwargs):
'Implementation-only, without FeatureNew checks, for internal use'
name = args[0]
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'input' in kwargs:
try:
kwargs['input'] = self.source_strings_to_files(extract_as_list(kwargs, 'input'))
except mesonlib.MesonException:
mlog.warning('''Custom target input \'%s\' can\'t be converted to File object(s).
This will become a hard error in the future.''' % kwargs['input'], location=self.current_node)
tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs, backend=self.backend), self)
self.add_target(name, tg.held_object)
return tg
@permittedKwargs(permitted_kwargs['run_target'])
def func_run_target(self, node, args, kwargs):
if len(args) > 1:
raise InvalidCode('Run_target takes only one positional argument: the target name.')
elif len(args) == 1:
if 'command' not in kwargs:
raise InterpreterException('Missing "command" keyword argument')
all_args = extract_as_list(kwargs, 'command')
deps = unholder(extract_as_list(kwargs, 'depends'))
else:
raise InterpreterException('Run_target needs at least one positional argument.')
cleaned_args = []
for i in unholder(listify(all_args)):
if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)):
mlog.debug('Wrong type:', str(i))
raise InterpreterException('Invalid argument to run_target.')
if isinstance(i, dependencies.ExternalProgram) and not i.found():
raise InterpreterException('Tried to use non-existing executable {!r}'.format(i.name))
cleaned_args.append(i)
name = args[0]
if not isinstance(name, str):
raise InterpreterException('First argument must be a string.')
cleaned_deps = []
for d in deps:
if not isinstance(d, (build.BuildTarget, build.CustomTarget)):
raise InterpreterException('Depends items must be build targets.')
cleaned_deps.append(d)
command, *cmd_args = cleaned_args
tg = RunTargetHolder(build.RunTarget(name, command, cmd_args, cleaned_deps, self.subdir, self.subproject), self)
self.add_target(name, tg.held_object)
full_name = (self.subproject, name)
assert(full_name not in self.build.run_target_names)
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@noKwargs
def func_alias_target(self, node, args, kwargs):
if len(args) < 2:
raise InvalidCode('alias_target takes at least 2 arguments.')
name = args[0]
if not isinstance(name, str):
raise InterpreterException('First argument must be a string.')
deps = unholder(listify(args[1:]))
for d in deps:
if not isinstance(d, (build.BuildTarget, build.CustomTarget)):
raise InterpreterException('Depends items must be build targets.')
tg = RunTargetHolder(build.AliasTarget(name, deps, self.subdir, self.subproject), self)
self.add_target(name, tg.held_object)
return tg
@permittedKwargs(permitted_kwargs['generator'])
def func_generator(self, node, args, kwargs):
gen = GeneratorHolder(self, args, kwargs)
self.generators.append(gen)
return gen
@FeatureNewKwargs('benchmark', '0.46.0', ['depends'])
@FeatureNewKwargs('benchmark', '0.52.0', ['priority'])
@permittedKwargs(permitted_kwargs['benchmark'])
def func_benchmark(self, node, args, kwargs):
# is_parallel isn't valid here, so make sure it isn't passed
if 'is_parallel' in kwargs:
del kwargs['is_parallel']
self.add_test(node, args, kwargs, False)
@FeatureNewKwargs('test', '0.46.0', ['depends'])
@FeatureNewKwargs('test', '0.52.0', ['priority'])
@permittedKwargs(permitted_kwargs['test'])
def func_test(self, node, args, kwargs):
if kwargs.get('protocol') == 'gtest':
FeatureNew('"gtest" protocol for tests', '0.55.0').use(self.subproject)
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs) -> build.EnvironmentVariables:
envlist = kwargs.get('env', EnvironmentVariablesHolder())
if isinstance(envlist, EnvironmentVariablesHolder):
env = envlist.held_object
elif isinstance(envlist, dict):
FeatureNew('environment dictionary', '0.52.0').use(self.subproject)
env = EnvironmentVariablesHolder(envlist)
env = env.held_object
else:
envlist = listify(envlist)
# Convert from array to environment object
env = EnvironmentVariablesHolder(envlist)
env = env.held_object
return env
def add_test(self, node, args, kwargs, is_base_test):
if len(args) != 2:
raise InterpreterException('test expects 2 arguments, {} given'.format(len(args)))
if not isinstance(args[0], str):
raise InterpreterException('First argument of test must be a string.')
exe = args[1]
if not isinstance(exe, (ExecutableHolder, JarHolder, ExternalProgramHolder)):
if isinstance(exe, mesonlib.File):
exe = self.func_find_program(node, args[1], {})
else:
raise InterpreterException('Second argument must be executable.')
par = kwargs.get('is_parallel', True)
if not isinstance(par, bool):
raise InterpreterException('Keyword argument is_parallel must be a boolean.')
cmd_args = unholder(extract_as_list(kwargs, 'args'))
for i in cmd_args:
if not isinstance(i, (str, mesonlib.File, build.Target)):
raise InterpreterException('Command line arguments must be strings, files or targets.')
env = self.unpack_env_kwarg(kwargs)
should_fail = kwargs.get('should_fail', False)
if not isinstance(should_fail, bool):
raise InterpreterException('Keyword argument should_fail must be a boolean.')
timeout = kwargs.get('timeout', 30)
if 'workdir' in kwargs:
workdir = kwargs['workdir']
if not isinstance(workdir, str):
raise InterpreterException('Workdir keyword argument must be a string.')
if not os.path.isabs(workdir):
raise InterpreterException('Workdir keyword argument must be an absolute path.')
else:
workdir = None
if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.')
protocol = kwargs.get('protocol', 'exitcode')
if protocol not in {'exitcode', 'tap', 'gtest'}:
raise InterpreterException('Protocol must be "exitcode", "tap", or "gtest".')
suite = []
prj = self.subproject if self.is_subproject() else self.build.project_name
for s in mesonlib.stringlistify(kwargs.get('suite', '')):
if len(s) > 0:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
depends = unholder(extract_as_list(kwargs, 'depends'))
for dep in depends:
if not isinstance(dep, (build.CustomTarget, build.BuildTarget)):
raise InterpreterException('Depends items must be build targets.')
priority = kwargs.get('priority', 0)
if not isinstance(priority, int):
raise InterpreterException('Keyword argument priority must be an integer.')
t = Test(args[0], prj, suite, exe.held_object, depends, par, cmd_args,
env, should_fail, timeout, workdir, protocol, priority)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(args[0], True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(args[0], True))
@FeatureNewKwargs('install_headers', '0.47.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_headers'])
def func_install_headers(self, node, args, kwargs):
source_files = self.source_strings_to_files(args)
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
h = Headers(source_files, kwargs)
self.build.headers.append(h)
return h
@FeatureNewKwargs('install_man', '0.47.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_man'])
def func_install_man(self, node, args, kwargs):
fargs = self.source_strings_to_files(args)
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
m = Man(fargs, kwargs)
self.build.man.append(m)
return m
@FeatureNewKwargs('subdir', '0.44.0', ['if_found'])
@permittedKwargs(permitted_kwargs['subdir'])
def func_subdir(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
for i in mesonlib.extract_as_list(kwargs, 'if_found'):
if not hasattr(i, 'found_method'):
raise InterpreterException('Object used in if_found does not have a found method.')
if not i.found_method([], {}):
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
if symlinkless_dir in self.visited_subdirs:
raise InvalidArguments('Tried to enter directory "%s", which has already been visited.'
% subdir)
self.visited_subdirs[symlinkless_dir] = True
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.append(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException("Non-existent build file '{!s}'".format(buildfilename))
with open(absname, encoding='utf8') as f:
code = f.read()
assert(isinstance(code, str))
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs):
if kwargs.get('install_mode', None) is None:
return None
install_mode = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
# We skip any arguments that are set to `false`
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@FeatureNewKwargs('install_data', '0.46.0', ['rename'])
@FeatureNewKwargs('install_data', '0.38.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_data'])
def func_install_data(self, node, args, kwargs):
kwsource = mesonlib.stringlistify(kwargs.get('sources', []))
raw_sources = args + kwsource
sources = []
source_strings = []
for s in raw_sources:
if isinstance(s, mesonlib.File):
sources.append(s)
elif isinstance(s, str):
source_strings.append(s)
else:
raise InvalidArguments('Argument must be string or file.')
sources += self.source_strings_to_files(source_strings)
install_dir = kwargs.get('install_dir', None)
if not isinstance(install_dir, (str, type(None))):
raise InvalidArguments('Keyword argument install_dir not a string.')
install_mode = self._get_kwarg_install_mode(kwargs)
rename = kwargs.get('rename', None)
data = DataHolder(build.Data(sources, install_dir, install_mode, rename))
self.build.data.append(data.held_object)
return data
@FeatureNewKwargs('install_subdir', '0.42.0', ['exclude_files', 'exclude_directories'])
@FeatureNewKwargs('install_subdir', '0.38.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_subdir'])
@stringArgs
def func_install_subdir(self, node, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Install_subdir requires exactly one argument.')
subdir = args[0]
if 'install_dir' not in kwargs:
raise InvalidArguments('Missing keyword argument install_dir')
install_dir = kwargs['install_dir']
if not isinstance(install_dir, str):
raise InvalidArguments('Keyword argument install_dir not a string.')
if 'strip_directory' in kwargs:
if not isinstance(kwargs['strip_directory'], bool):
raise InterpreterException('"strip_directory" keyword must be a boolean.')
strip_directory = kwargs['strip_directory']
else:
strip_directory = False
if 'exclude_files' in kwargs:
exclude = extract_as_list(kwargs, 'exclude_files')
for f in exclude:
if not isinstance(f, str):
raise InvalidArguments('Exclude argument not a string.')
elif os.path.isabs(f):
raise InvalidArguments('Exclude argument cannot be absolute.')
exclude_files = set(exclude)
else:
exclude_files = set()
if 'exclude_directories' in kwargs:
exclude = extract_as_list(kwargs, 'exclude_directories')
for d in exclude:
if not isinstance(d, str):
raise InvalidArguments('Exclude argument not a string.')
elif os.path.isabs(d):
raise InvalidArguments('Exclude argument cannot be absolute.')
exclude_directories = set(exclude)
else:
exclude_directories = set()
exclude = (exclude_files, exclude_directories)
install_mode = self._get_kwarg_install_mode(kwargs)
idir = InstallDir(self.subdir, subdir, install_dir, install_mode, exclude, strip_directory)
self.build.install_dirs.append(idir)
return idir
@FeatureNewKwargs('configure_file', '0.47.0', ['copy', 'output_format', 'install_mode', 'encoding'])
@FeatureNewKwargs('configure_file', '0.46.0', ['format'])
@FeatureNewKwargs('configure_file', '0.41.0', ['capture'])
@FeatureNewKwargs('configure_file', '0.50.0', ['install'])
@FeatureNewKwargs('configure_file', '0.52.0', ['depfile'])
@permittedKwargs(permitted_kwargs['configure_file'])
def func_configure_file(self, node, args, kwargs):
if len(args) > 0:
raise InterpreterException("configure_file takes only keyword arguments.")
if 'output' not in kwargs:
raise InterpreterException('Required keyword argument "output" not defined.')
actions = set(['configuration', 'command', 'copy']).intersection(kwargs.keys())
if len(actions) == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif len(actions) == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif len(actions) == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if 'capture' in kwargs:
if not isinstance(kwargs['capture'], bool):
raise InterpreterException('"capture" keyword must be a boolean.')
if 'command' not in kwargs:
raise InterpreterException('"capture" keyword requires "command" keyword.')
if 'format' in kwargs:
fmt = kwargs['format']
if not isinstance(fmt, str):
raise InterpreterException('"format" keyword must be a string.')
else:
fmt = 'meson'
if fmt not in ('meson', 'cmake', 'cmake@'):
raise InterpreterException('"format" possible values are "meson", "cmake" or "cmake@".')
if 'output_format' in kwargs:
output_format = kwargs['output_format']
if not isinstance(output_format, str):
raise InterpreterException('"output_format" keyword must be a string.')
else:
output_format = 'c'
if output_format not in ('c', 'nasm'):
raise InterpreterException('"format" possible values are "c" or "nasm".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InterpreterException('depfile file name must be a string')
else:
depfile = None
# Validate input
inputs = self.source_strings_to_files(extract_as_list(kwargs, 'input'))
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
# Validate output
output = kwargs['output']
if not isinstance(output, str):
raise InterpreterException('Output file name must be a string')
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = "{}:{}".format(mesonbuildfile, self.current_lineno)
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
# Perform the appropriate action
if 'configuration' in kwargs:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew('configure_file.configuration dictionary', '0.49.0').use(self.subproject)
conf = ConfigurationDataHolder(self.subproject, conf)
elif not isinstance(conf, ConfigurationDataHolder):
raise InterpreterException('Argument "configuration" is not of type configuration_data')
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs.setdefault('encoding', 'utf-8')
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf.held_object,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
"The variable(s) %s in the input file '%s' are not "
"present in the given configuration data." % (
var_list, inputs[0]), location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
mlog.warning('Got an empty configuration_data() object and found no '
'substitutions in the input file {!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0'.format(ifbase), location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf.held_object, output_format)
conf.mark_used()
elif 'command' in kwargs:
if len(inputs) > 1:
FeatureNew('multiple inputs in configure_file()', '0.52.0').use(self.subproject)
# We use absolute paths for input and output here because the cwd
# that the command is run from is 'unspecified', so it could change.
# Currently it's builddir/subdir for in_builddir else srcdir/subdir.
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
res = self.run_command_impl(node, cmd, {}, True)
if res.returncode != 0:
raise InterpreterException('Running configure command failed.\n%s\n%s' %
(res.stdout, res.stderr))
if 'capture' in kwargs and kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs.setdefault('encoding', 'utf-8')
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, 'r') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif 'copy' in kwargs:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copyfile(inputs_abs[0], ofile_abs)
shutil.copystat(inputs_abs[0], ofile_abs)
else:
# Not reachable
raise AssertionError
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs.get('install_dir', '')
if idir is False:
idir = ''
mlog.deprecation('Please use the new `install:` kwarg instead of passing '
'`false` to `install_dir:`', location=node)
if not isinstance(idir, str):
if isinstance(idir, list) and len(idir) == 0:
mlog.deprecation('install_dir: kwarg must be a string and not an empty array. '
'Please use the install: kwarg to enable or disable installation. '
'This will be a hard error in the next release.')
else:
raise InterpreterException('"install_dir" must be a string')
install = kwargs.get('install', idir != '')
if not isinstance(install, bool):
raise InterpreterException('"install" must be a boolean')
if install:
if not idir:
raise InterpreterException('"install_dir" must be specified '
'when "install" in a configure_file '
'is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = self._get_kwarg_install_mode(kwargs)
self.build.data.append(build.Data([cfile], idir, install_mode))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs):
prospectives = unholder(extract_as_list(kwargs, 'include_directories'))
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]).held_object)
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@permittedKwargs(permitted_kwargs['include_directories'])
@stringArgs
def func_include_directories(self, node, args, kwargs):
return self.build_incdir_object(args, kwargs.get('is_system', False))
def build_incdir_object(self, incdir_strings, is_system=False):
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments('Tried to form an absolute path to a source dir. '
'You should not do that but use relative paths instead.'
'''
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
''')
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments('Include dir %s does not exist.' % a)
i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system))
return i
@permittedKwargs(permitted_kwargs['add_test_setup'])
@stringArgs
def func_add_test_setup(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('Add_test_setup needs one argument for the setup name.')
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = (self.subproject if self.subproject else self.build.project_name) + ":" + setup_name
try:
inp = unholder(extract_as_list(kwargs, 'exe_wrapper'))
exe_wrapper = []
for i in inp:
if isinstance(i, str):
exe_wrapper.append(i)
elif isinstance(i, dependencies.ExternalProgram):
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
else:
raise InterpreterException('Exe wrapper can only contain strings or external binaries.')
except KeyError:
exe_wrapper = None
gdb = kwargs.get('gdb', False)
if not isinstance(gdb, bool):
raise InterpreterException('Gdb option must be a boolean')
timeout_multiplier = kwargs.get('timeout_multiplier', 1)
if not isinstance(timeout_multiplier, int):
raise InterpreterException('Timeout multiplier must be a number.')
is_default = kwargs.get('is_default', False)
if not isinstance(is_default, bool):
raise InterpreterException('is_default option must be a boolean')
if is_default:
if self.build.test_setup_default_name is not None:
raise InterpreterException('\'%s\' is already set as default. '
'is_default can be set to true only once' % self.build.test_setup_default_name)
self.build.test_setup_default_name = setup_name
env = self.unpack_env_kwarg(kwargs)
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env)
@permittedKwargs(permitted_kwargs['add_global_arguments'])
@stringArgs
def func_add_global_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_global_arguments(node, self.build.global_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_global_link_arguments'])
@stringArgs
def func_add_global_link_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_global_arguments(node, self.build.global_link_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_project_arguments'])
@stringArgs
def func_add_project_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_project_arguments(node, self.build.projects_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_project_link_arguments'])
@stringArgs
def func_add_project_link_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_project_arguments(node, self.build.projects_link_args[for_machine], args, kwargs)
def warn_about_builtin_args(self, args):
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra', '-Wpedantic')
optargs = ('-O0', '-O2', '-O3', '-Os', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning('Consider using the built-in warning_level option instead of using "{}".'.format(arg),
location=self.current_node)
elif arg in optargs:
mlog.warning('Consider using the built-in optimization level instead of using "{}".'.format(arg),
location=self.current_node)
elif arg == '-g':
mlog.warning('Consider using the built-in debug option instead of using "{}".'.format(arg),
location=self.current_node)
elif arg == '-pipe':
mlog.warning("You don't need to add -pipe, Meson will use it automatically when it is available.",
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning('Consider using the built-in option for sanitizers instead of using "{}".'.format(arg),
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning('Consider using the built-in option for language standard version instead of using "{}".'.format(arg),
location=self.current_node)
def add_global_arguments(self, node, argsdict, args, kwargs):
if self.is_subproject():
msg = 'Function \'{}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'.format(node.func_name)
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self.add_arguments(node, argsdict, frozen, args, kwargs)
def add_project_arguments(self, node, argsdict, args, kwargs):
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self.add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def add_arguments(self, node, argsdict, args_frozen, args, kwargs):
if args_frozen:
msg = 'Tried to use \'{}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all ' \
'arguments before your targets.'.format(node.func_name)
raise InvalidCode(msg)
if 'language' not in kwargs:
raise InvalidCode('Missing language definition in {}'.format(node.func_name))
self.warn_about_builtin_args(args)
for lang in mesonlib.stringlistify(kwargs['language']):
lang = lang.lower()
argsdict[lang] = argsdict.get(lang, []) + args
@noKwargs
@noArgsFlattening
def func_environment(self, node, args, kwargs):
if len(args) > 1:
raise InterpreterException('environment takes only one optional positional arguments')
elif len(args) == 1:
FeatureNew('environment positional arguments', '0.52.0').use(self.subproject)
initial_values = args[0]
if not isinstance(initial_values, dict) and not isinstance(initial_values, list):
raise InterpreterException('environment first argument must be a dictionary or a list')
else:
initial_values = {}
return EnvironmentVariablesHolder(initial_values)
@stringArgs
@noKwargs
def func_join_paths(self, node, args, kwargs):
return self.join_path_strings(args)
def run(self):
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self):
# TODO cross compilation
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self):
if 'b_lundef' not in self.coredata.base_options:
return
if 'b_sanitize' not in self.coredata.base_options:
return
if (self.coredata.base_options['b_lundef'].value and
self.coredata.base_options['b_sanitize'].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.base_options['b_sanitize'].value),
location=self.current_node)
def evaluate_subproject_info(self, path_from_source_root, subproject_dirname):
depth = 0
subproj_name = ''
segs = PurePath(path_from_source_root).parts
segs_spd = PurePath(subproject_dirname).parts
while segs and segs[0] == segs_spd[0]:
if len(segs_spd) == 1:
subproj_name = segs[1]
segs = segs[2:]
depth += 1
else:
segs_spd = segs_spd[1:]
segs = segs[1:]
return (depth, subproj_name)
# Check that the indicated file is within the same subproject
# as we currently are. This is to stop people doing
# nasty things like:
#
# f = files('../../master_src/file.c')
#
# Note that this is validated only when the file
# object is generated. The result can be used in a different
# subproject than it is defined in (due to e.g. a
# declare_dependency).
def validate_within_subproject(self, subdir, fname):
norm = os.path.normpath(os.path.join(subdir, fname))
if os.path.isabs(norm):
if not norm.startswith(self.environment.source_dir):
# Grabbing files outside the source tree is ok.
# This is for vendor stuff like:
#
# /opt/vendorsdk/src/file_with_license_restrictions.c
return
norm = os.path.relpath(norm, self.environment.source_dir)
assert(not os.path.isabs(norm))
(num_sps, sproj_name) = self.evaluate_subproject_info(norm, self.subproject_dir)
plain_filename = os.path.basename(norm)
if num_sps == 0:
if not self.is_subproject():
return
raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename)
if num_sps > 1:
raise InterpreterException('Sandbox violation: Tried to grab file %s from a nested subproject.' % plain_filename)
if sproj_name != self.subproject_directory_name:
raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename)
def source_strings_to_files(self, sources):
results = []
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
for s in sources:
if isinstance(s, (mesonlib.File, GeneratedListHolder,
TargetHolder, CustomTargetIndexHolder,
GeneratedObjectsHolder)):
pass
elif isinstance(s, str):
self.validate_within_subproject(self.subdir, s)
s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s)
else:
raise InterpreterException('Source item is {!r} instead of '
'string or File-type object'.format(s))
results.append(s)
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.forbidden_target_names:
raise InvalidArguments("Target name '%s' is reserved for Meson's "
"internal use. Please rename." % name)
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name)
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_holder = self.build_target(node, args, kwargs, SharedLibraryHolder)
# Check if user forces non-PIC static library.
pic = True
if 'pic' in kwargs:
pic = kwargs['pic']
elif 'b_staticpic' in self.environment.coredata.base_options:
pic = self.environment.coredata.base_options['b_staticpic'].value
if pic:
# Exclude sources from args and kwargs to avoid building them twice
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_holder.held_object.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_holder = self.build_target(node, static_args, static_kwargs, StaticLibraryHolder)
return BothLibrariesHolder(shared_holder, static_holder, self)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_builtin_option('default_library', self.subproject)
if default_library == 'shared':
return self.build_target(node, args, kwargs, SharedLibraryHolder)
elif default_library == 'static':
return self.build_target(node, args, kwargs, StaticLibraryHolder)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException('Unknown default_library value: %s.', default_library)
def build_target(self, node, args, kwargs, targetholder):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetholder == ExecutableHolder:
targetclass = build.Executable
elif targetholder == SharedLibraryHolder:
targetclass = build.SharedLibrary
elif targetholder == SharedModuleHolder:
targetclass = build.SharedModule
elif targetholder == StaticLibraryHolder:
targetclass = build.StaticLibrary
elif targetholder == JarHolder:
targetclass = build.Jar
else:
mlog.debug('Unknown target type:', str(targetholder))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, sources, objs, self.environment, kwargs)
target.project_version = self.project_version
if not self.environment.machines.matches_build_machine(for_machine):
self.add_cross_stdlib_info(target)
l = targetholder(target, self)
self.add_target(name, l.held_object)
self.project_args_frozen = True
return l
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
# BW compatibility. This was permitted so we must support it
# for a few releases so people can transition to "correct"
# path declarations.
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = {}
for i in target.sources:
# TODO other platforms
for lang, c in self.coredata.compilers.host.items():
if c.can_compile(i):
result[lang] = True
break
return result
def add_cross_stdlib_info(self, target):
if target.for_machine != MachineChoice.HOST:
return
for l in self.get_used_languages(target):
props = self.environment.properties.host
if props.has_stdlib(l) \
and self.subproject != props.get_stdlib(l)[0]:
target.add_deps(self.build.stdlibs.host[l])
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException('Tried to add non-existing source file %s.' % s)
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget: InterpreterObject) -> None:
if not self.subdir.startswith(self.subproject_dir):
if buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from a subproject target.')
else:
if not buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from the main project from a subproject.')
if self.subdir.split('/')[1] != buildtarget.subdir.split('/')[1]:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self):
return self.subproject != ''
@noKwargs
@noArgsFlattening
def func_set_variable(self, node, args, kwargs):
if len(args) != 2:
raise InvalidCode('Set_variable takes two arguments.')
varname, value = args
self.set_variable(varname, value)
@noKwargs
@noArgsFlattening
def func_get_variable(self, node, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InvalidCode('Get_variable takes one or two arguments.')
varname = args[0]
if isinstance(varname, Disabler):
return varname
if not isinstance(varname, str):
raise InterpreterException('First argument must be a string.')
try:
return self.variables[varname]
except KeyError:
pass
if len(args) == 2:
return args[1]
raise InterpreterException('Tried to get unknown variable "%s".' % varname)
@stringArgs
@noKwargs
def func_is_variable(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Is_variable takes two arguments.')
varname = args[0]
return varname in self.variables
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@noKwargs
def func_is_disabler(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Is_disabler takes one argument.')
varname = args[0]
return isinstance(varname, Disabler)
| 46.847044 | 197 | 0.609497 | [
"Apache-2.0"
] | tolnaisz/meson | mesonbuild/interpreter.py | 222,664 | Python |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import cupy as cp # noqa: F401
import awkward as ak # noqa: F401
def test_num_1():
content = ak.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
bitmask = ak.layout.IndexU8(np.array([40, 34], dtype=np.uint8))
array = ak.Array(ak.layout.BitMaskedArray(bitmask, content, False, 9, False))
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()
def test_num_2():
content = ak.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
bytemask = ak.layout.Index8(np.array([False, True, False], dtype=np.bool))
array = ak.Array(ak.layout.ByteMaskedArray(bytemask, content, True))
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()
def test_num_3():
array = ak.Array(ak.layout.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])))
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
def test_num_4():
array = ak.Array(
ak.layout.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]]))
)
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()
def test_num_5():
array = ak.Array(ak.layout.EmptyArray())
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
def test_num_6():
content = ak.layout.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9]))
array = ak.Array(ak.layout.ListOffsetArray64(offsets, content))
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()
def test_num_7():
content = ak.layout.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak.layout.IndexU32(np.array([0, 3, 3, 5, 6, 9]))
array = ak.Array(ak.layout.ListOffsetArrayU32(offsets, content))
cuda_array = ak.to_kernels(array, "cuda")
assert ak.num(cuda_array, 0) == ak.num(array, 0)
assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist()
def test_num_8():
content = ak.layout.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10])
)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak.layout.ListOffsetArray64(offsets, content)
regulararray = ak.layout.RegularArray(listoffsetarray, 2)
starts = ak.layout.Index64(np.array([0, 1]))
stops = ak.layout.Index64(np.array([2, 3]))
listarray = ak.layout.ListArray64(starts, stops, regulararray)
cuda_listoffsetarray = ak.to_kernels(listoffsetarray, "cuda")
assert ak.num(cuda_listoffsetarray, 0) == ak.num(ak.Array(listoffsetarray), 0)
assert (
ak.num(cuda_listoffsetarray, 1).tolist()
== ak.num(ak.Array(listoffsetarray), 1).tolist()
)
cuda_regulararray = ak.to_kernels(regulararray, "cuda")
assert ak.num(cuda_regulararray, 0) == ak.num(ak.Array(regulararray), 0)
assert (
ak.num(cuda_regulararray, 1).tolist()
== ak.num(ak.Array(regulararray), 1).tolist()
)
cuda_listarray = ak.to_kernels(listarray, "cuda")
assert ak.num(cuda_listarray, 0) == ak.num(ak.Array(listarray), 0)
assert ak.num(cuda_listarray, 1).tolist() == ak.num(ak.Array(listarray), 1).tolist()
content1 = ak.layout.NumpyArray(np.array([1, 2, 3, 4, 5]))
content2 = ak.layout.NumpyArray(
np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak.layout.Index32(np.array([0, 3, 3, 5, 6, 9]))
recordarray = ak.Array(
ak.layout.RecordArray(
[content1, listoffsetarray, content2, content1],
keys=["one", "two", "2", "wonky"],
)
)
cuda_recordarray = ak.to_kernels(recordarray, "cuda")
assert ak.num(cuda_recordarray, 0).tolist() == ak.num(recordarray, 0).tolist()
content0 = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout
content = ak.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
).layout
tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8))
index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32))
unionarray = ak.Array(ak.layout.UnionArray8_32(tags, index, [content0, content1]))
cuda_unionarray = ak.to_kernels(unionarray, "cuda")
assert ak.num(cuda_unionarray, 0) == ak.num(unionarray, 0)
def test_num_9():
content = ak.layout.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
index = ak.layout.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64))
indexedarray = ak.Array(ak.layout.IndexedArray32(index, content))
cuda_indexedarray = ak.to_kernels(indexedarray, "cuda")
assert ak.num(cuda_indexedarray, 0) == ak.num(indexedarray, 0)
ioa = ak.Array(
ak.layout.IndexedOptionArray32(
ak.layout.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]),
ak.layout.NumpyArray(
np.array(
[
5.2,
1.7,
6.7,
-0.4,
4.0,
7.8,
3.8,
6.8,
4.2,
0.3,
4.6,
6.2,
6.9,
-0.7,
3.9,
1.6,
8.7,
-0.7,
3.2,
4.3,
4.0,
5.8,
4.2,
7.0,
5.6,
3.8,
]
)
),
)
)
cuda_ioa = ak.to_kernels(ioa, "cuda")
ak.to_kernels(cuda_ioa, "cpu")
assert ak.num(cuda_ioa, 0) == ak.num(ioa, 0)
| 36.258242 | 88 | 0.54387 | [
"BSD-3-Clause"
] | colesbury/awkward-1.0 | tests-cuda/test_0345-cuda-num.py | 6,599 | Python |
from kfp.components import create_component_from_func, InputPath, OutputPath
def keras_convert_hdf5_model_to_tf_saved_model(
model_path: InputPath('KerasModelHdf5'),
converted_model_path: OutputPath('TensorflowSavedModel'),
):
'''Converts Keras HDF5 model to Tensorflow SavedModel format.
Args:
model_path: Keras model in HDF5 format.
converted_model_path: Keras model in Tensorflow SavedModel format.
Annotations:
author: Alexey Volkov <[email protected]>
'''
from pathlib import Path
from tensorflow import keras
model = keras.models.load_model(filepath=model_path)
keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf')
if __name__ == '__main__':
keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func(
keras_convert_hdf5_model_to_tf_saved_model,
base_image='tensorflow/tensorflow:2.3.0',
packages_to_install=['h5py==2.10.0'],
output_component_file='component.yaml',
annotations={
"author": "Alexey Volkov <[email protected]>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml",
},
)
| 39.058824 | 182 | 0.734187 | [
"Apache-2.0"
] | 9rince/kfp | components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.py | 1,328 | Python |
import logging
from croncierge import cmd_services
def log_cmd(cmd_response):
logging.info(f"Command response:\n{cmd_response}")
def test_cmd_stdout():
cmd = "python3 /home/maxim/projects/celecron/tests/test_croncierge/debug_cmd.py"
log_cmd(cmd_services.run_cmd(cmd))
def test_cmd_stderr():
cmd = "python3 tests/test_croncierge/debug_cmd_error.py"
log_cmd(cmd_services.run_cmd(cmd))
if __name__ == '__main__':
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.DEBUG)
... | 25.272727 | 84 | 0.717626 | [
"Apache-2.0"
] | mburdeev/croncierge | tests/test_croncierge/test_cmd_services.py | 556 | Python |
from datetime import timedelta
import pytest
from django.utils import timezone
from electeez_auth.models import User
@pytest.mark.django_db
def test_otp(client):
user = User.objects.create(email='[email protected]')
token = user.otp_new(redirect='valid')
response = client.post(token.path)
assert response['Location'] == 'valid'
# can't use the link twice
response = client.post(token.path)
assert response['Location'] != 'valid'
# try expired link
token = user.otp_new()
token.otp_expiry = timezone.now() - timedelta(minutes=1)
token.save()
response = client.post(token.path)
assert response['Location'] != 'valid'
| 25.846154 | 60 | 0.696429 | [
"MIT"
] | Joneswn/Baloti | electeez_auth/test_otp.py | 672 | Python |
import sys
from query_common import filter_records, ProjectMixins
from redcap import Project # note this is from PyCap.redcap
from typing import List
"""
This class of functions are responsible of retrieving relevant data structures from the CNFUN tables
"""
class CNFUN_project(ProjectMixins):
"""
One baby can have many admissions CaseIDs.
One hospital record can have many CaseIDs.
One baby has only one hospital record number.
"""
def __init__(
self, Token, URL, get_all_field=False,
):
"""
Create a project using PyCap
:param Token:
:param URL:
:return:
"""
# Several key properties we'll use throughout
self.project = Project(URL, Token)
# These are very important ID fields from the
fields_keyid = ["patientID", "cf_p_cnnpatientui"]
# For now, make sure to onyl get the data related to these key ids to reduce load time
self.data = self.get_fields(fields_keyid)
# if specified, get all the records.
if get_all_field:
self.data = self.project.export_records()
def filter_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
Check the list, only retain the relevant records with matching PatientID are retained.
:param dataset: CNBPIDs & record ID correspondence list.
:param CNNPatientUI:
:return:
"""
list_filtered = None
filtered_field = "cf_p_cnnpatientui"
# Handling when babyIDs is string instead of list (allowing batch function).
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
list_filtered = filter_records(self.data, filtered_field, CNNPatientUI)
return list_filtered
def get_PatientID_with_CNNPatientUI(self, CNNPatientUI: str or List[str]):
"""
PatientID has 1:1 correspondence with CNNPatientUI which is the same as PatientUI from CNN Baby table.
:return:
"""
# Listify the CNNPatientUI
if type(CNNPatientUI) is str:
CNNPatientUI = [CNNPatientUI]
# Filter with the information
list_filtered_dict = self.filter_with_CNNPatientUI(CNNPatientUI)
# Aggregate the list_PatientID
list_PatientID = []
for case in list_filtered_dict:
list_PatientID.append(case["patientid"])
return list_PatientID
def get_records_CNFUN(self, PatientID: str or List[str]):
"""
Retrieve the cases based on their INDEX which is the
:param cases:
:return:
"""
if type(PatientID) is str:
PatientID = [PatientID]
cases_data = self.project.export_records(records=PatientID)
return cases_data
| 32.44186 | 110 | 0.650538 | [
"MIT"
] | CNBP/RCAPI | query_CNFUN.py | 2,790 | Python |
from user import User
__all__ =[
"User"
] | 9.2 | 21 | 0.630435 | [
"MIT"
] | WuraLab/wuralab.github.io | pilot/__init__.py | 46 | Python |
# coding: utf-8
"""
ESP Documentation
The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures.
OpenAPI spec version: v2_sdk
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
from ..extensions.base_object import BaseObject
import re
class Role(BaseObject):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, created_at=None, updated_at=None):
"""
Role - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'name': 'str',
'created_at': 'datetime',
'updated_at': 'datetime'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'created_at': 'created_at',
'updated_at': 'updated_at'
}
self._id = id
self._name = name
self._created_at = created_at
self._updated_at = updated_at
@property
def id(self):
"""
Gets the id of this Role.
Unique ID
:return: The id of this Role.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Role.
Unique ID
:param id: The id of this Role.
:type: int
"""
self._id = id
@property
def name(self):
"""
Gets the name of this Role.
The name of the role
:return: The name of this Role.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Role.
The name of the role
:param name: The name of this Role.
:type: str
"""
self._name = name
@property
def created_at(self):
"""
Gets the created_at of this Role.
ISO 8601 timestamp when the resource was created
:return: The created_at of this Role.
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this Role.
ISO 8601 timestamp when the resource was created
:param created_at: The created_at of this Role.
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""
Gets the updated_at of this Role.
ISO 8601 timestamp when the resource was updated
:return: The updated_at of this Role.
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""
Sets the updated_at of this Role.
ISO 8601 timestamp when the resource was updated
:param updated_at: The updated_at of this Role.
:type: datetime
"""
self._updated_at = updated_at
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Role):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.091371 | 230 | 0.539146 | [
"MIT"
] | EvidentSecurity/esp-sdk-python | esp_sdk/models/role.py | 4,943 | Python |
import pandas as pd
import numpy as np
import wave
from scipy.io import wavfile
import os
import librosa
import pydub
import ffmpeg
from librosa.feature import melspectrogram
import warnings
from sklearn.utils import shuffle
from sklearn.utils import class_weight
from PIL import Image
import sklearn
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras import Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation, LSTM, SimpleRNN, Conv1D, Input, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import EfficientNetB0
from keras.models import load_model
import boto3
import botocore
def model_input():
# Load the trained model
model = load_model("best_model.h5")
#Access S3 Bucket and Download the audio file
BUCKET_NAME = 'thunderstruck-duck' # replace with your bucket name
KEY = "sample_mp3.mp3" # replace with your object key
s3 = boto3.client('s3',
aws_access_key_id='AKIAISITTOGCJRNF46HQ',
aws_secret_access_key= 'bq/VRAme7BxDMqf3hgEMLZdrJNVvrtdQ4VmoGAdB',
)
BUCKET_NAME = "thunderstruck-duck"
try:
s3.download_file(BUCKET_NAME, KEY, "sample_mp3.mp3")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
# else:
# raise
#Load the audio data using librosa
wave_data, wave_rate = librosa.load("sample_mp3.mp3")
wave_data, _ = librosa.effects.trim(wave_data)
#only take 5s samples and add them to the dataframe
song_sample = []
sample_length = 5*wave_rate
#The variable below is chosen mainly to create a 216x216 image
N_mels=216
for idx in range(0,len(wave_data),sample_length):
song_sample = wave_data[idx:idx+sample_length]
if len(song_sample)>=sample_length:
mel = melspectrogram(song_sample, n_mels=N_mels)
db = librosa.power_to_db(mel)
normalised_db = sklearn.preprocessing.minmax_scale(db)
filename = "sample_mel.tif"
db_array = (np.asarray(normalised_db)*255).astype(np.uint8)
db_image = Image.fromarray(np.array([db_array, db_array, db_array]).T)
db_image.save("{}{}".format("upload_mel/",filename))
#Create a DF that will take the created Melspectogram directory
data_df = pd.DataFrame([{'bird': "sample bird", 'song_sample': f"/app/upload_mel/{filename}"}])
# Users/HyunsooKim/Desktop/Boot_Camp/Homework/BIRD_CALL/upload_mel/{filename}"}])
#Compile the model
callbacks = [ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, factor=0.7),
EarlyStopping(monitor='val_loss', patience=5),
ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
model.compile(loss="categorical_crossentropy", optimizer='adam')
#Since we only have 1 melspectogram passing into the model, set batch size to 1 and the size of that image so the model can take the image file.
validation_batch_size_full = 1
target_size = (216,216)
train_datagen_full = ImageDataGenerator(
rescale=1. / 255
)
#Pass the columns into the model
validation_datagen_full = ImageDataGenerator(rescale=1. / 255)
validation_generator_full = validation_datagen_full.flow_from_dataframe(
dataframe = data_df,
x_col='song_sample',
y_col='bird',
directory='/',
target_size=target_size,
shuffle=False,
batch_size=validation_batch_size_full,
class_mode='categorical')
#Run the model
preds = model.predict_generator(validation_generator_full)
#We want to find the "INDEX" of maximum value within the pred, a numpy array. Use np.argmax and index into 0th element.
result = np.argmax(preds[0])
#load in the index dataframe, so we can find the name of the bird that matches the index of our result
index_df = pd.read_csv('xeno-canto_ca-nv_index.csv')
#rename the english_cname to birds for better access and clearity
bird_list = pd.DataFrame(index_df.english_cname.unique())
bird_list.columns = ["birds"]
#We are almost done. Save the percentage and the name of the bird into a variable and print it out!
percentage = preds[0][result]
Name_of_bird = bird_list['birds'][result]
print(f"This bird is {percentage} likely {Name_of_bird}")
final_data = {"likelihood": percentage, "name_of_bird": Name_of_bird}
return final_data
if __name__ == "__main__":
print(model_input()) | 39.045802 | 148 | 0.714761 | [
"Apache-2.0"
] | kimhyuns91/bird_call | import_and_model.py | 5,115 | Python |
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.dashboard import Dashboard # noqa: E501
from wavefront_api_client.rest import ApiException
class TestDashboard(unittest.TestCase):
"""Dashboard unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDashboard(self):
"""Test Dashboard"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.dashboard.Dashboard() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 30.243902 | 409 | 0.726613 | [
"Apache-2.0"
] | PowerOlive/python-client | test/test_dashboard.py | 1,240 | Python |
# coding: utf-8
"""Webmail tests."""
from __future__ import unicode_literals
import os
import shutil
import tempfile
try:
import mock
except ImportError:
from unittest import mock
from six import BytesIO
from django.core import mail
from django.urls import reverse
from modoboa.admin import factories as admin_factories
from modoboa.core import models as core_models
from modoboa.lib.tests import ModoTestCase
from . import data as tests_data
BODYSTRUCTURE_SAMPLE_WITH_FLAGS = [
(b'19 (UID 19 FLAGS (\\Seen) RFC822.SIZE 100000 BODYSTRUCTURE (("text" "plain" ("charset" "ISO-8859-1" "format" "flowed") NIL NIL "7bit" 2 1 NIL NIL NIL NIL)("message" "rfc822" ("name*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74") NIL NIL "8bit" 3632 ("Wed, 13 Dec 2006 20:30:02 +0100" {70}', # noqa
b"[INSCRIPTION] R\xe9c\xe9ption de votre dossier d'inscription Free Haut D\xe9bit"), # noqa
(b' (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) ((NIL NIL "hautdebit" "freetelecom.fr")) ((NIL NIL "nguyen.antoine" "wanadoo.fr")) NIL NIL NIL "<[email protected]>") ("text" "plain" ("charset" "iso-8859-1") NIL NIL "8bit" 1428 38 NIL ("inline" NIL) NIL NIL) 76 NIL ("inline" ("filename*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74")) NIL NIL) "mixed" ("boundary" "------------040706080908000209030901") NIL NIL NIL) BODY[HEADER.FIELDS (DATE FROM TO CC SUBJECT)] {266}', # noqa
b'Date: Tue, 19 Dec 2006 19:50:13 +0100\r\nFrom: Antoine Nguyen <[email protected]>\r\nTo: Antoine Nguyen <[email protected]>\r\nSubject: [Fwd: [INSCRIPTION] =?ISO-8859-1?Q?R=E9c=E9ption_de_votre_?=\r\n =?ISO-8859-1?Q?dossier_d=27inscription_Free_Haut_D=E9bit=5D?=\r\n\r\n'
),
b')'
]
def get_gif():
"""Return gif."""
gif = BytesIO(
b"GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00"
b"\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;")
gif.name = "image.gif"
return gif
class IMAP4Mock(object):
"""Fake IMAP4 client."""
def __init__(self, *args, **kwargs):
self.untagged_responses = {}
def _quote(self, data):
return data
def _simple_command(self, name, *args, **kwargs):
if name == "CAPABILITY":
self.untagged_responses["CAPABILITY"] = [b""]
elif name == "LIST":
self.untagged_responses["LIST"] = [b"() \".\" \"INBOX\""]
elif name == "NAMESPACE":
self.untagged_responses["NAMESPACE"] = [b'(("" "/")) NIL NIL']
return "OK", None
def append(self, *args, **kwargs):
pass
def create(self, name):
return "OK", None
def delete(self, name):
return "OK", None
def list(self):
return "OK", [b"() \".\" \"INBOX\""]
def rename(self, oldname, newname):
return "OK", None
def uid(self, command, *args):
if command == "SORT":
return "OK", [b"19"]
elif command == "FETCH":
uid = int(args[0])
data = BODYSTRUCTURE_SAMPLE_WITH_FLAGS
if uid == 46931:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_ONLY_4
elif "HEADER.FIELDS" in args[1]:
data = tests_data.BODYSTRUCTURE_SAMPLE_4
else:
data = tests_data.BODY_PLAIN_4
elif uid == 46932:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_ONLY_5
elif "HEADER.FIELDS" in args[1]:
data = tests_data.BODYSTRUCTURE_SAMPLE_9
else:
data = tests_data.BODYSTRUCTURE_SAMPLE_10
elif uid == 33:
if args[1] == "(BODYSTRUCTURE)":
data = tests_data.BODYSTRUCTURE_EMPTY_MAIL
else:
data = tests_data.EMPTY_BODY
elif uid == 133872:
data = tests_data.COMPLETE_MAIL
return "OK", data
elif command == "STORE":
return "OK", []
class WebmailTestCase(ModoTestCase):
"""Check webmail backend."""
@classmethod
def setUpTestData(cls): # noqa
"""Create some users."""
super(WebmailTestCase, cls).setUpTestData()
admin_factories.populate_database()
cls.user = core_models.User.objects.get(username="[email protected]")
def setUp(self):
"""Connect with a simpler user."""
patcher = mock.patch("imaplib.IMAP4")
self.mock_imap4 = patcher.start()
self.mock_imap4.return_value = IMAP4Mock()
self.addCleanup(patcher.stop)
self.set_global_parameter("imap_port", 1435)
self.workdir = tempfile.mkdtemp()
os.mkdir("{}/webmail".format(self.workdir))
self.set_global_parameter("update_scheme", False, app="core")
url = reverse("core:login")
data = {
"username": self.user.username, "password": "toto"
}
self.client.post(url, data)
def tearDown(self):
"""Cleanup."""
shutil.rmtree(self.workdir)
def test_listmailbox(self):
"""Check listmailbox action."""
url = reverse("modoboa_webmail:index")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.get(
"{}?action=listmailbox".format(url),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"[email protected]", response.json()["listing"])
response = self.client.get(
"{}?action=listmailbox&pattern=Réception&criteria=Subject"
.format(url),
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"[email protected]", response.json()["listing"])
def test_attachments(self):
"""Check attachments."""
url = reverse("modoboa_webmail:index")
response = self.client.get("{}?action=compose".format(url))
self.assertEqual(response.status_code, 200)
self.assertIn("compose_mail", self.client.session)
url = reverse("modoboa_webmail:attachment_list")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.set_global_parameters({"max_attachment_size": "10"})
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.post(url, {"attachment": get_gif()})
self.assertContains(response, "Attachment is too big")
self.set_global_parameters({"max_attachment_size": "10K"})
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.post(url, {"attachment": get_gif()})
self.assertContains(response, "upload_success")
self.assertEqual(
len(self.client.session["compose_mail"]["attachments"]), 1)
name = self.client.session["compose_mail"]["attachments"][0]["tmpname"]
path = "{}/webmail/{}".format(self.workdir, name)
self.assertTrue(os.path.exists(path))
url = reverse("modoboa_webmail:attachment_delete")
with self.settings(MEDIA_ROOT=self.workdir):
self.ajax_get("{}?name={}".format(url, name))
self.assertFalse(os.path.exists(path))
def test_delattachment_errors(self):
"""Check error cases."""
url = reverse("modoboa_webmail:index")
response = self.client.get("{}?action=compose".format(url))
self.assertEqual(response.status_code, 200)
self.assertIn("compose_mail", self.client.session)
url = reverse("modoboa_webmail:attachment_delete")
with self.settings(MEDIA_ROOT=self.workdir):
response = self.ajax_get("{}?name=".format(url))
self.assertEqual(response["status"], "ko")
self.assertEqual(response["respmsg"], "Bad query")
with self.settings(MEDIA_ROOT=self.workdir):
response = self.ajax_get("{}?name=test".format(url))
self.assertEqual(response["status"], "ko")
self.assertEqual(response["respmsg"], "Unknown attachment")
def test_send_mail(self):
"""Check compose form."""
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.client.post(
url, {
"from_": self.user.email, "to": "[email protected]",
"subject": "test", "body": "Test"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, "[email protected]")
# Try to send an email using HTML format
self.user.first_name = "Antoine"
self.user.last_name = "Nguyen"
self.user.parameters.set_value("editor", "html")
self.user.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
mail.outbox = []
response = self.client.post(
url, {
"from_": self.user.email,
"to": "[email protected]", "subject": "test",
"body": "<p>Test</p>"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, '"Antoine Nguyen" <[email protected]>')
def test_signature(self):
"""Check signature in different formats."""
signature = "Antoine Nguyen"
self.user.parameters.set_value("signature", signature)
self.user.save()
response = self.client.get(reverse("modoboa_webmail:index"))
self.assertEqual(response.status_code, 200)
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.ajax_get(url)
self.assertIn(signature, response["listing"])
def test_custom_js_in_preferences(self):
"""Check that custom js is included."""
url = reverse("core:user_index")
response = self.client.get(url)
self.assertContains(response, "function toggleSignatureEditor()")
def test_send_mail_errors(self):
"""Check error cases."""
url = "{}?action=compose".format(reverse("modoboa_webmail:index"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
response = self.ajax_post(
url, {"to": "", "subject": "test", "body": "Test"}, 400
)
self.assertEqual(len(mail.outbox), 0)
def test_new_folder(self):
"""Test folder creation."""
url = reverse("modoboa_webmail:folder_add")
response = self.client.get(url)
self.assertContains(response, "Create a new folder")
response = self.ajax_post(url, {"name": "Test"})
self.assertIn("newmb", response)
def test_edit_folder(self):
"""Test folder edition."""
url = reverse("modoboa_webmail:folder_change")
response = self.client.get(url)
self.assertContains(response, "Invalid request")
url = "{}?name=Test".format(url)
response = self.client.get(url)
self.assertContains(response, "Edit folder")
session = self.client.session
session["webmail_navparams"] = {"inbox": "Test"}
session.save()
response = self.ajax_post(url, {"oldname": "Test", "name": "Toto"})
self.assertEqual(response["respmsg"], "Folder updated")
def test_delete_folder(self):
"""Test folder removal."""
url = reverse("modoboa_webmail:folder_delete")
self.ajax_get(url, status=400)
url = "{}?name=Test".format(url)
session = self.client.session
session["webmail_navparams"] = {"inbox": "Test"}
session.save()
self.ajax_get(url)
def test_reply_to_email(self):
"""Test reply form."""
url = "{}?action=reply&mbox=INBOX&mailid=46931".format(
reverse("modoboa_webmail:index"))
session = self.client.session
session["lastaction"] = "compose"
session.save()
response = self.ajax_get(url)
self.assertIn('id="id_origmsgid"', response["listing"])
response = self.client.post(
url, {
"from_": self.user.email, "to": "[email protected]",
"subject": "test", "body": "Test",
"origmsgid": "<id@localhost>"
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].from_email, "[email protected]")
self.assertIn("References", mail.outbox[0].extra_headers)
def test_forward_email(self):
"""Test forward form."""
url = "{}?action=forward&mbox=INBOX&mailid=46932".format(
reverse("modoboa_webmail:index"))
session = self.client.session
session["lastaction"] = "compose"
session.save()
with self.settings(MEDIA_ROOT=self.workdir):
response = self.client.get(
url, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
response = response.json()
self.assertIn('id="id_origmsgid"', response["listing"])
self.assertEqual(
len(self.client.session["compose_mail"]["attachments"]), 1)
response = self.client.post(
url, {
"from_": self.user.email, "to": "[email protected]",
"subject": "test", "body": "Test",
"origmsgid": "<id@localhost>"
}
)
self.assertEqual(len(mail.outbox), 1)
def test_getmailcontent_empty_mail(self):
"""Try to display an empty email."""
url = "{}?action=reply&mbox=INBOX&mailid=33".format(
reverse("modoboa_webmail:mailcontent_get"))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_getmailsource(self):
"""Try to display a message's source."""
url = "{}?mbox=INBOX&mailid=133872".format(
reverse("modoboa_webmail:mailsource_get"))
response = self.client.get(url)
self.assertContains(response, "Message-ID")
| 39.392473 | 762 | 0.600792 | [
"MIT"
] | modoboa/modoboa-webmail | modoboa_webmail/tests/test_views.py | 14,655 | Python |
# -*- coding: utf-8 -*-
"""
General description
-------------------
This example illustrates the effect of activity_costs.
There are the following components:
- demand_heat: heat demand (constant, for the sake of simplicity)
- fireplace: wood firing, burns "for free" if somebody is around
- boiler: gas firing, consumes (paid) gas
Notice that activity_costs is an attribute to NonConvex.
This is because it relies on the activity status of a component
which is only available for nonconvex flows.
Installation requirements
-------------------------
This example requires version 0.3 of oemof. Install by:
pip install 'oemof.solph>=0.4,<0.5'
"""
import numpy as np
import pandas as pd
from oemof import solph
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
##########################################################################
# Calculate parameters and initialize the energy system and
##########################################################################
periods = 24
time = pd.date_range('1/1/2018', periods=periods, freq='H')
demand_heat = np.full(periods, 5)
demand_heat[:4] = 0
demand_heat[4:18] = 4
activity_costs = np.full(periods, 5)
activity_costs[18:] = 0
es = solph.EnergySystem(timeindex=time)
b_heat = solph.Bus(label='b_heat')
es.add(b_heat)
sink_heat = solph.Sink(
label='demand',
inputs={b_heat: solph.Flow(fix=demand_heat, nominal_value=1)})
fireplace = solph.Source(
label='fireplace',
outputs={b_heat: solph.Flow(nominal_value=3,
variable_costs=0,
nonconvex=solph.NonConvex(
activity_costs=activity_costs))})
boiler = solph.Source(
label='boiler',
outputs={b_heat: solph.Flow(nominal_value=10,
variable_costs=1)})
es.add(sink_heat, fireplace, boiler)
##########################################################################
# Optimise the energy system
##########################################################################
# create an optimization problem and solve it
om = solph.Model(es)
# solve model
om.solve(solver='cbc', solve_kwargs={'tee': True})
##########################################################################
# Check and plot the results
##########################################################################
results = solph.processing.results(om)
# plot data
if plt is not None:
data = solph.views.node(results, 'b_heat')['sequences']
ax = data.plot(kind='line', drawstyle='steps-post', grid=True, rot=0)
ax.set_xlabel('Time')
ax.set_ylabel('Heat (arb. units)')
plt.show()
| 27.387755 | 74 | 0.554024 | [
"MIT"
] | Bachibouzouk/oemof-examples | oemof_examples/oemof.solph/v0.4.x/activity_costs/activity_costs.py | 2,684 | Python |
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
from op_tester import op_tester
def test_and(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_and([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 & t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_and(op_tester):
d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_and([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 & t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_or(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_or([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 | t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_or(op_tester):
d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_or([i1, i2])
print(o)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 | t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_not(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
print(d1)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.logical_not([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
return [np.logical_not(d1)]
op_tester.run(init_builder, reference, step_type='infer')
def test_equal(op_tester):
d1 = (np.random.randn(2)).astype(np.float32)
d2 = (np.random.randn(2)).astype(np.float32)
d2[0] = d1[0]
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.equal([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1)
t2 = torch.tensor(d2)
out = torch.eq(t1, t2)
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_equal(op_tester):
d1 = (np.random.randn(2, 2)).astype(np.float32)
d2 = (np.random.randn(2)).astype(np.float32)
# d2[0][0] = d1[0]
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.equal([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1)
t2 = torch.tensor(d2)
out = torch.eq(t1, t2)
return [out]
op_tester.run(init_builder, reference, step_type='infer')
| 27 | 61 | 0.61745 | [
"MIT"
] | gglin001/popart | tests/integration/operators_test/boolean_test.py | 4,023 | Python |
#!/bin/env python
""" Module to display weather info on polybar """
# -*- coding: utf-8 -*-
import argparse
import datetime
import logging
import os
import time
import requests
import importlib
# pylint: disable=redefined-builtin
from requests import ConnectionError
from requests.exceptions import HTTPError, Timeout
from util import color_polybar, color_bash as cb
class MyInternetIsShitty(Exception):
""" Custom exception """
pass
def get_args():
""" Get script argument """
parser = argparse.ArgumentParser(description='Show current weather on polybar')
parser.add_argument('log', nargs='?', help='Logging for debugging or not')
parser.add_argument('-u', '--unit', default='metric', nargs='?',
help='unit: metric or imperial. Default: metric')
return parser.parse_args()
def set_up_logging():
""" Set some logging parameter """
if importlib.util.find_spec('requests'):
# Shut up the request module logger
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG)
def get_day_or_night():
""" return 'day' or 'night' based on current hour """
hour = int(datetime.datetime.now().strftime('%H'))
if hour >= 18 or hour <= 5:
return 'night'
return 'day'
def get_weather_icon(weather_id):
""" Get weather icon based on weather condition """
day_night_status = get_day_or_night()
weather = {
'thunderstorm': 200 <= weather_id <= 232,
'rain': 300 <= weather_id <= 531,
'snow': 600 <= weather_id <= 622,
'atmosphere': 701 <= weather_id <= 781,
'squall': weather_id == 771,
'tornado': weather_id == 781 or weather_id == 900,
'clear_day': weather_id == 800 and day_night_status == 'day',
'clear_night': weather_id == 800 and day_night_status == 'night',
'tropical storm': weather_id == 901,
'hurricane': weather_id == 902,
'cold': weather_id == 903,
'hot': weather_id == 904,
'windy': weather_id == 905,
'cloudy': 801 <= weather_id <= 804,
'hail': weather_id == 906
}
if weather['thunderstorm']:
return ''
elif weather['rain']:
return ''
elif weather['snow'] or weather['cold']:
return ''
elif weather['atmosphere'] or weather['windy']:
return ''
elif (weather['squall'] or
weather['tornado'] or
weather['tropical storm'] or
weather['hurricane']):
return ''
elif weather['clear_day'] or weather['hot']:
return ''
elif weather['clear_night']:
return ''
elif weather['cloudy']:
return ''
elif weather['hail']:
return ''
def get_thermo_icon(temp_value, temp_unit):
""" Get thermometer icon based on temperature """
if temp_unit == 'F':
temp_value = convert_temp_unit(temp_unit, 'C')
if temp_value <= -15:
return ''
elif -15 < temp_value <= 0:
return ''
elif 0 < temp_value <= 15:
return ''
elif 15 < temp_value <= 30:
return ''
elif temp_value > 30:
return ''
def convert_temp_unit(temp_value, temp_unit):
""" Convert current temp_value to temp_unit """
if temp_unit == 'C':
return round((temp_value - 32) / 1.8)
elif temp_unit == 'F':
return round(temp_value * 1.8 + 32)
def get_api_key():
""" Get secret api key from a file on filesystem """
paren_dir = os.path.dirname(os.path.realpath(__file__))
api_path = os.path.join(paren_dir, 'weather_api.txt')
with open(api_path, 'r') as file:
api_key = file.read().replace('\n', '')
return api_key
def get_city_id():
""" Workaround to get city id based on my schedule """
region_code = {
'TPHCM': 1580578,
'TPHCM2': 1566083,
'Hai Duong': 1581326,
'Tan An': 1567069
}
hour = int(datetime.datetime.now().strftime('%H'))
weekday = datetime.datetime.now().strftime('%a')
# 5pm Fri to 5pm Sun: Tan An, else Hai Duong
if (hour >= 17 and weekday == 'Fri') or weekday == 'Sat' or (hour < 17 and weekday == 'Sun'):
return region_code['Tan An']
return region_code['Hai Duong']
def update_weather(city_id, units, api_key):
""" Update weather by using openweather api """
url = 'http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units={}'
temp_unit = 'C' if units == 'metric' else 'K'
error_icon = color_polybar('', 'red')
try:
req = requests.get(url.format(city_id, api_key, units))
try:
description = req.json()['weather'][0]['description'].capitalize()
except ValueError:
print(error_icon, flush=True)
raise MyInternetIsShitty
temp_value = round(req.json()['main']['temp'])
temp = str(temp_value) + '°' + temp_unit
thermo_icon = color_polybar(get_thermo_icon(temp_value, units), 'main')
weather_id = req.json()['weather'][0]['id']
weather_icon = color_polybar(get_weather_icon(weather_id), 'main')
print('{} {} {} {}'.format(weather_icon, description, thermo_icon, temp), flush=True)
except (HTTPError, Timeout, ConnectionError):
print(error_icon, flush=True)
raise MyInternetIsShitty
def main():
""" main function """
arg = get_args()
if arg.log == 'debug':
set_up_logging()
units = arg.unit
api_key = get_api_key()
city_id = get_city_id()
while True:
try:
update_weather(city_id, units, api_key)
except MyInternetIsShitty:
logging.info(cb('update failed: ', 'red'))
time.sleep(3)
else:
logging.info(cb('update success', 'green'))
time.sleep(700)
if __name__ == '__main__':
main()
# vim: nofoldenable
| 27.059406 | 94 | 0.662642 | [
"BSD-3-Clause"
] | NearHuscarl/dotfiles | .config/polybar/weather/weather.py | 5,497 | Python |
#!/usr/bin/env python3
import sys
import argparse
import time
import socket
from socket import socket as Socket
def main():
# Command line arguments. Use a server_port > 1024 by default so that we can run
# server without sudo.
parser = argparse.ArgumentParser()
parser.add_argument('--server-port', '-p', default=2081, type=int,
help='Server_Port to use')
parser.add_argument('--run-server', '-s', action='store_true',
help='Run a ping server')
parser.add_argument('server_address', default='localhost',
help='Server to ping, no effect if running as a server.')
args = parser.parse_args()
if args.run_server:
return run_server(args.server_port)
else:
return run_client(args.server_address, args.server_port,)
def run_server(server_port):
"""Run the UDP pinger server
"""
# Create the server socket (to handle UDP requests using ipv4), make sure
# it is always closed by using with statement.
with Socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket:
# The socket stays connected even after this script ends. So in order
# to allow the immediate reuse of the socket (so that we can kill and
# re-run the server while debugging) we set the following option. This
# is potentially dangerous in real code: in rare cases you may get junk
# data arriving at the socket.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Set the server port
server_socket.bind(('', server_port))
# Start accepting ping requests
print("Ping server ready on port", server_port)
while True:
# Receive message and send one back
_, client_address = server_socket.recvfrom(1024)
server_socket.sendto("".encode(), client_address)
return 0
def run_client(server_address, server_port):
"""Ping a UDP pinger server running at the given address
"""
# Fill in the client side code here.
raise NotImplementedError
return 0
if __name__ == "__main__":
sys.exit(main())
| 28.881579 | 84 | 0.653759 | [
"CC0-1.0"
] | akshayrb22/Kurose-and-Ross-socket-programming-exercises | ping/ping.py | 2,195 | Python |
matrix_a = [[1,2,3], [4,5,6]]
result = [ [ element for element in t] for t in zip(*matrix_a)]
print(result) | 35.666667 | 63 | 0.635514 | [
"MIT"
] | caru1613/introduction_to_python_TEAMLAB_MOOC | lab_assignment/lab_bla/linux_mac/sample/matrix_transpose.py | 107 | Python |
from unittest import TestCase
import os.path as osp
from datumaro.components.format_detection import (
FormatDetectionConfidence, FormatRequirementsUnmet, apply_format_detector,
)
from datumaro.util.test_utils import TestDir
from tests.requirements import Requirements, mark_requirement
class FormatDetectionTest(TestCase):
def setUp(self) -> None:
test_dir_context = TestDir()
self._dataset_root = test_dir_context.__enter__()
self.addCleanup(test_dir_context.__exit__, None, None, None)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_empty_detector(self):
result = apply_format_detector(self._dataset_root, lambda c: None)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_custom_confidence(self):
result = apply_format_detector(self._dataset_root,
lambda c: FormatDetectionConfidence.LOW)
self.assertEqual(result, FormatDetectionConfidence.LOW)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_root_path(self):
provided_root = None
def detect(context):
nonlocal provided_root
provided_root = context.root_path
apply_format_detector(self._dataset_root, detect)
self.assertEqual(provided_root, self._dataset_root)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_fail(self):
def detect(context):
context.fail('abcde')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives, ('abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_success(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
selected_file = None
def detect(context):
nonlocal selected_file
selected_file = context.require_file('**/[fg]oo*.t?t')
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
self.assertEqual(selected_file, 'foobar.txt')
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_failure(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
context.require_file('*/*')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('*/*', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_exclude_fname_one(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
context.require_file('foobar.*', exclude_fnames='*.txt')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('foobar.*', result.exception.failed_alternatives[0])
self.assertIn('*.txt', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_file_exclude_fname_many(self):
for ext in ('txt', 'lst'):
with open(osp.join(self._dataset_root, f'foobar.{ext}'), 'w'):
pass
def detect(context):
context.require_file('foobar.*', exclude_fnames=('*.txt', '*.lst'))
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(len(result.exception.failed_alternatives), 1)
self.assertIn('foobar.*', result.exception.failed_alternatives[0])
self.assertIn('*.txt', result.exception.failed_alternatives[0])
self.assertIn('*.lst', result.exception.failed_alternatives[0])
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_success(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w') as f:
print('123', file=f)
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde') as f:
if next(f) != '123\n':
raise Exception
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_failure_bad_file(self):
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
pass
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('foobar.txt: abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_failure_exception(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
raise Exception
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('foobar.txt: abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_probe_text_file_nested_req(self):
with open(osp.join(self._dataset_root, 'foobar.txt'), 'w'):
pass
def detect(context):
with context.probe_text_file('foobar.txt', 'abcde'):
context.fail('abcde')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('abcde',))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_any_success(self):
alternatives_executed = set()
def detect(context):
nonlocal alternatives_executed
with context.require_any():
with context.alternative():
alternatives_executed.add(1)
context.fail('bad alternative 1')
with context.alternative():
alternatives_executed.add(2)
# good alternative 2
with context.alternative():
alternatives_executed.add(3)
context.fail('bad alternative 3')
result = apply_format_detector(self._dataset_root, detect)
self.assertEqual(result, FormatDetectionConfidence.MEDIUM)
self.assertEqual(alternatives_executed, {1, 2, 3})
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_require_any_failure(self):
def detect(context):
with context.require_any():
with context.alternative():
context.fail('bad alternative 1')
with context.alternative():
context.fail('bad alternative 2')
with self.assertRaises(FormatRequirementsUnmet) as result:
apply_format_detector(self._dataset_root, detect)
self.assertEqual(result.exception.failed_alternatives,
('bad alternative 1', 'bad alternative 2'))
| 38.207921 | 79 | 0.671029 | [
"MIT"
] | TOsmanov/datumaro | tests/test_format_detection.py | 7,718 | Python |
#!/usr/bin/env python
import re
import time
from tools.multiclass_shared import prepare_data
# run with toy data
[traindat, label_traindat, testdat, label_testdat] = prepare_data()
# run with opt-digits if available
#[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)
parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5]]
def classifier_multiclass_ecoc (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,lawidth=2.1,C=1,epsilon=1e-5):
import shogun
from shogun import ECOCStrategy, LinearMulticlassMachine
from shogun import MulticlassAccuracy
from shogun import MulticlassLabels
import shogun as sg
def nonabstract_class(name):
try:
getattr(shogun, name)()
except TypeError:
return False
return True
encoders = [x for x in dir(shogun)
if re.match(r'ECOC.+Encoder', x) and nonabstract_class(x)]
decoders = [x for x in dir(shogun)
if re.match(r'ECOC.+Decoder', x) and nonabstract_class(x)]
fea_train = sg.features(fm_train_real)
fea_test = sg.features(fm_test_real)
gnd_train = MulticlassLabels(label_train_multiclass)
if label_test_multiclass is None:
gnd_test = None
else:
gnd_test = MulticlassLabels(label_test_multiclass)
base_classifier = sg.machine("LibLinear",
liblinear_solver_type="L2R_L2LOSS_SVC",
use_bias=True)
#print('Testing with %d encoders and %d decoders' % (len(encoders), len(decoders)))
#print('-' * 70)
#format_str = '%%15s + %%-10s %%-10%s %%-10%s %%-10%s'
#print((format_str % ('s', 's', 's')) % ('encoder', 'decoder', 'codelen', 'time', 'accuracy'))
def run_ecoc(ier, idr):
encoder = getattr(shogun, encoders[ier])()
decoder = getattr(shogun, decoders[idr])()
# whether encoder is data dependent
if hasattr(encoder, 'set_labels'):
encoder.set_labels(gnd_train)
encoder.set_features(fea_train)
strategy = ECOCStrategy(encoder, decoder)
classifier = LinearMulticlassMachine(strategy, fea_train, base_classifier, gnd_train)
classifier.train()
label_pred = classifier.apply(fea_test)
if gnd_test is not None:
evaluator = MulticlassAccuracy()
acc = evaluator.evaluate(label_pred, gnd_test)
else:
acc = None
return (classifier.get_num_machines(), acc)
for ier in range(len(encoders)):
for idr in range(len(decoders)):
t_begin = time.clock()
(codelen, acc) = run_ecoc(ier, idr)
if acc is None:
acc_fmt = 's'
acc = 'N/A'
else:
acc_fmt = '.4f'
t_elapse = time.clock() - t_begin
#print((format_str % ('d', '.3f', acc_fmt)) %
# (encoders[ier][4:-7], decoders[idr][4:-7], codelen, t_elapse, acc))
if __name__=='__main__':
print('MulticlassECOC')
classifier_multiclass_ecoc(*parameter_list[0])
| 31.655556 | 180 | 0.703054 | [
"BSD-3-Clause"
] | neroangelo296/shogun | examples/undocumented/python/classifier_multiclass_ecoc.py | 2,849 | Python |
# Import dependencies
# Math/Torch
import numpy as np
import torch.nn as nn
# Typing
from typing import List
# Instantiate class
class MRR(nn.Module):
"""Compute MRR metric (Mean reciprocal rank)"""
def __init__(self, max_rank = 10):
super(MRR, self).__init__()
# Set max mrr rank
self.max_rank = max_rank
def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float:
"""Calculate the reciprocal rank for a given hypothesis and reference
Params:
hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Reference id (as a integer) of the correct id of response
Returns:
reciprocal rank
"""
# Assure hypothesis_ids is a numpy array
hypothesis_ids = np.asarray(hypothesis_ids)
# Calculate rank
try:
rank = np.where(hypothesis_ids == reference_id)[0][0] + 1
except IndexError:
rank = self.max_rank + 1
# Rank grater then max_rank is set to zero
if rank > self.max_rank:
reciprocal_rank = 0.0
else:
# Calculate reciprocal rank
reciprocal_rank = 1. / rank
return reciprocal_rank
def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float:
"""Score the mean reciprocal rank for the batch
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]]
>>> batch_reference_id = [2, 2, 1]
>>> mrr = MRR()
>>> mrr(batch_hypothesis_ids, batch_reference_id)
0.61111111111111105
Args:
batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance
reference_id: Batch of reference id (as a integer) of the correct id of response
Returns:
Mean reciprocal rank (MRR)
"""
# Assure batches have same length
assert len(batch_hypothesis_ids) == len(batch_reference_id), "Hypothesis batch and reference batch must have same length."
# Size of batch
batch_size = len(batch_hypothesis_ids)
# MRR to be calculated
mrr = 0
for hypothesis_ids, reference_id in zip(batch_hypothesis_ids, batch_reference_id):
# Calculate reciprocal rank
reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id)
# Add to MRR
mrr += reciprocal_rank/batch_size
return mrr
| 32.447059 | 130 | 0.603698 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | platiagro/tasks | tasks/retriever/mrr.py | 2,758 | Python |
#
# 1573. Number of Ways to Split a String
#
# Q: https://leetcode.com/problems/number-of-ways-to-split-a-string/
# A: https://leetcode.com/problems/number-of-ways-to-split-a-string/discuss/830433/Javascript-Python3-C%2B%2B-solutions
#
class Solution:
def numWays(self, S: str, MOD = int(1e9 + 7)) -> int:
N = len(S)
cnt = len([c for c in S if c == '1'])
# case 1: all zeros, return the sum of the series for the cardinality of S minus 1
if not cnt:
return (N - 2) * (N - 1) // 2 % MOD
# case 2: cannot evenly divide the ones into 3 equal paritions
if cnt % 3:
return 0
# case 3: return the product of the first and second accumulated "gaps of zeros" between each parition of equal ones
K = cnt // 3
first = 0
second = 0
ones = 0
for i in range(N):
if S[i] == '1':
ones += 1
if ones == 1 * K and S[i] == '0': first +=1
if ones == 2 * K and S[i] == '0': second += 1
return (first + 1) * (second + 1) % MOD # ⭐️ +1 for "gaps of zeros" from i..j inclusive
| 39.275862 | 124 | 0.543459 | [
"MIT"
] | claytonjwong/leetcode-py | 1573_number_ways_to_split_string.py | 1,143 | Python |
"""
WSGI config for kongoauth project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kongoauth.settings")
application = get_wsgi_application()
| 23.294118 | 78 | 0.787879 | [
"MIT"
] | AppointmentGuru/AuthenticationGuru | kongoauth/wsgi.py | 396 | Python |
# -:- coding:utf8 -:-
import base64
import hmac
import json
import sys
import time
import urllib
import uuid
from hashlib import sha1
import requests
from flask import current_app
from werkzeug.local import LocalProxy
DEFAULT_URL = 'https://sms.aliyuncs.com'
SMS = LocalProxy(lambda: current_app.extensions['kits_sms'])
class SMSSender(object):
def __init__(self, app_key, secret_key, url=DEFAULT_URL):
self.app_key = app_key
self.secret_key = secret_key
self.url = url
@staticmethod
def percent_encode(content):
# content = str(content)
res = urllib.quote(content, '')
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def sign(self, access_key_secret, params):
params = sorted(params.items(), key=lambda param: param[0])
canonical_querystring = ''
for (k, v) in params:
canonical_querystring += '&' + self.percent_encode(k) + '=' + self.percent_encode(v)
string_to_sign = 'GET&%2F&' + self.percent_encode(canonical_querystring[1:]) # 使用get请求方法
h = hmac.new(access_key_secret + "&", string_to_sign, sha1)
signature = base64.encodestring(h.digest()).strip()
return signature
def make_url(self, params):
timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
parameters = {
'Format': 'JSON',
'Version': '2016-09-27',
'AccessKeyId': self.app_key,
'SignatureVersion': '1.0',
'SignatureMethod': 'HMAC-SHA1',
'SignatureNonce': str(uuid.uuid1()),
'Timestamp': timestamp,
}
for key in params.keys():
parameters[key] = params[key]
signature = self.sign(self.secret_key, parameters)
parameters['Signature'] = signature
url = self.url + "/?" + urllib.urlencode(parameters)
return url
def do_request(self, params):
url = self.make_url(params)
response = requests.get(url)
print response.ok, response.content
def send(self, template_code, sign_name, receive_num, param):
params = {
'Action': 'SingleSendSms',
'SignName': sign_name,
'TemplateCode': template_code,
'RecNum': receive_num,
'ParamString': json.dumps(param)
}
url = self.make_url(params)
response = requests.get(url)
if not response.ok:
current_app.logger.error(response.content)
return response.ok
def init_extension(kits, app):
url = kits.get_parameter('SMS_URL', default=DEFAULT_URL)
app_key = kits.get_parameter("SMS_APP_KEY")
secret_key = kits.get_parameter('SMS_SECRET_KEY')
app.extensions['kits_sms'] = SMSSender(app_key, secret_key, url)
if __name__ == '__main__':
sender = SMSSender('LTAIWLcy7iT5v7mr', 'gRL1rtYnyfKMDVZs7b4fhbosX0MAAo ')
print sender.send("SMS_49485493", u"testing", "18708140165", param={'code': "123456", 'product': "benjamin"})
| 33.0625 | 114 | 0.600504 | [
"MIT"
] | by46/flask-kits | flask_kits/sms/__init__.py | 3,186 | Python |
import matplotlib
matplotlib.use('Agg')
import os
from os.path import join
import argparse
import torch
import numpy as np
import pickle
import sys
import datetime
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder
from utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped
from utils.vocabulary import Vocabulary
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
import torchvision.utils as vutils
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from shutil import copy2
import importlib
from pyquaternion import Quaternion
from models.pose_predictor_euler_crop import define_model
from utils.plot_utils import plot_mean
from utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \
isRotationMatrix, eulerAnglesToRotationMatrix, \
norm_sincos, sincos2rotm
from utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\
loss_quat, loss_quat_single, euler_XYZ_to_reparam, loss_quat_huber
from utils.plot_utils import plot_mean
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]= "1,2,3"
IMAGE_SIZE = (299, 299)
NUM_VIEWS = 1
SAMPLE_SIZE = 40
VAL_SEQS =5
TRAIN_SEQS_PER_EPOCH = 80
LOSS_FN = loss_euler_reparametrize
EXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments'
sys.path.append(EXP_ROOT_DIR)
class Trainer(object):
def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True):
self.use_cuda = use_cuda
self.load_model = load_model
self.model_folder = model_folder
self.validation_directory = validation_directory
self.train_directory = train_directory
self.args = args
self.builder = builder
self.loss_fn = loss_fn
self.logdir = join(model_folder, 'logs')
self.writer = SummaryWriter(self.logdir)
self.logger = Logger(self.args.log_file)
self.itr = 0
# Create Model
self.model = self.create_model()
if multi_gpu:
self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
# Build validation set
validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)
validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)]
validation_set = ConcatDataset(validation_set)
self.len_validation_set = len(validation_set)
del validation_builder
self.validation_loader = DataLoader(
validation_set,
batch_size=8,
shuffle=False,
pin_memory=self.use_cuda,
)
self.validation_calls = 0
# Build Training Set
self.triplet_builder = builder(self.args.n_views, \
train_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE)
self.training_queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True)
dataset_builder_process.start()
# Get Logger
# Model specific setup
# self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9)
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08)
# This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler
self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
# self.criterion = nn.CrossEntropyLoss()
def train(self):
trn_losses_ = []
val_losses_= []
val_acc_ = []
trn_acc_ = []
for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs):
print("=" * 20)
self.logger.info("Starting epoch: {0} ".format(epoch))
dataset = self.training_queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=self.args.minibatch_size, # batch_size(epoch, self.args.max_minibatch_size),
shuffle=True,
pin_memory=self.use_cuda,
)
train_embedding_features_buffer = []
train_images_buffer = []
train_labels = []
correct = 0
for _ in range(0, 1):
losses = []
for minibatch in data_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
#anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix
anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix
# frames = Variable(minibatch)
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)
losses.append(loss.data.cpu().numpy())
correct += (torch.norm(a_pred - anchor_quats, 2) < 1).data.cpu().numpy().sum() # print(gradcheck(loss_fn, (tcn, minibatch,)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Add embeddings
train_labels.append(anchor_quats)
train_embedding_features_buffer.append(anchor_quats)
train_images_buffer.append(anchor_frames)
print("logging to {}".format(self.logdir))
self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr)
self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr)
self.itr += 1
trn_losses_.append(np.mean(losses))
self.logger.info('train loss: ', np.mean(losses))
self.logger.info("Training score correct {correct}/{total}".format(
correct=correct,
total=len(data_loader)
))
trn_acc_.append(correct)
self.writer.add_image('frame_1', minibatch[0][0], self.itr)
# self.writer.add_image('pose1', str(minibatch[1][0].data.detach().cpu().numpy()), self.itr)
self.writer.add_image('frame_2', minibatch[0][1], self.itr)
# self.writer.add_image('pose_2', str(minibatch[1][1].data.detach().cpu().numpy()), self.itr)
self.writer.add_image('frame_3', minibatch[0][2], self.itr)
# self.writer.add_image('pose_3', str(minibatch[1][2].data.detach().cpu().numpy()), self.itr)
self.writer.add_image('frame_4', minibatch[0][3], self.itr)
# self.writer.add_image('pose_4', str(minibatch[1][3].data.detach().cpu().numpy()), self.itr)
# Get embeddings
features = torch.cat(train_embedding_features_buffer[:30]).squeeze_()
labels = torch.cat(train_labels[:30]).squeeze_()
# features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1)
# label = torch.Tensor(np.asarray(label_buffer))
images = torch.cat(train_images_buffer[:30]).squeeze_()#/255.0, [0, 3, 1, 2]
self.writer.add_embedding(features, metadata=labels, label_img=images, global_step=epoch)
if epoch % 1 == 0:
loss, correct = self.validate()
self.learning_rate_scheduler.step(loss)
val_losses_.append(loss)
val_acc_.append(correct)
if epoch % self.args.save_every == 0 and epoch != 0:
self.logger.info('Saving model.')
self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files'))
print("logging to {}".format(self.logdir))
plot_mean(trn_losses_, self.model_folder, 'train_loss')
plot_mean(val_losses_, self.model_folder, 'validation_loss')
plot_mean(trn_acc_, self.model_folder, 'train_acc')
plot_mean(val_acc_, self.model_folder, 'validation_accuracy')
# plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin')
def validate(self):
# Run model on validation data and log results
correct = 0
losses = []
for minibatch in self.validation_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
#anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix
anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats)
losses.append(loss.data.cpu().numpy())
correct += (torch.norm(a_pred - anchor_quats, 2) < 0.1).data.cpu().numpy().sum()
self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls)
self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls)
self.validation_calls += 1
loss = np.mean(losses)
self.logger.info("Validation score correct {correct}/{total}".format(
correct=correct,
total=self.len_validation_set
))
self.logger.info('val loss: ',loss)
return loss, correct
def model_filename(self, model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(self, model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(self, queue, triplet_builder, log):
while 1:
datasets = []
for i in range(TRAIN_SEQS_PER_EPOCH):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
# log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def create_model(self):
model = define_model(pretrained=True)
# model = PosNet()
if self.load_model:
model_path = os.path.join(
self.model_folder,
self.load_model
)
# map_location allows us to load models trained on cuda to cpu.
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if self.use_cuda:
model = model.cuda()
return model
def batch_size(self, epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
def main(args):
# module = importlib.import_module(args.exp_name + '.config')
# conf = getattr(module, 'Config_Isaac_Server')()
# EXP_DIR = conf.EXP_DIR
# MODEL_FOLDER = conf.MODEL_FOLDER
# GPU Configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
# Load model
model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped())
if not os.path.exists(model_folder):
os.makedirs(model_folder)
# Get data loader builder and loss function
builder = getattr(importlib.import_module('utils.builders'), args.builder)
loss_fn = LOSS_FN
# Define train and validation directories
train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/')
validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/')
# Copies of executed config
if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'):
os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments')
copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder)
copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder)
# Build training class
trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=10)
parser.add_argument('--load-model', type=str, required=False)
parser.add_argument('--minibatch-size', type=int, default=8)
parser.add_argument('--model-name', type=str, default='tcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.001)
parser.add_argument('--n-views', type=int, default=NUM_VIEWS)
parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss')
# Model parameters
# Path parameters
parser.add_argument('--exp-name', type=str, required=True)
parser.add_argument('--run-name', type=str, required=True)
parser.add_argument('--builder', type=str, required=True)
args = parser.parse_args()
print(args)
main(args)
| 43.650307 | 169 | 0.646381 | [
"MIT"
] | msieb1/LTCN | train_pose_euler_crop.py | 14,230 | Python |
#!/usr/bin/env python3
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
from __future__ import print_function
import os
import sys
import platform
try:
exec(open('systemds/project_info.py').read())
except IOError:
print("Could not read project_info.py.", file=sys.stderr)
sys.exit()
ARTIFACT_NAME = __project_artifact_id__
ARTIFACT_VERSION = __project_version__
ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split("-")[0]
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))
src_path_prefix = os.path.join(root_dir, 'src', 'main', 'python', 'dist', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT)
src_path = src_path_prefix + '.zip' if platform.system() == "Windows" and os.path.exists(
src_path_prefix + '.zip') else src_path_prefix + '.tar.gz'
os.rename(
src_path,
os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tar.gz'))
wheel_name = '-'.join([ARTIFACT_NAME, ARTIFACT_VERSION_SHORT, 'py3', 'none', 'any.whl'])
wheel = os.path.join(root_dir, 'src', 'main', 'python', 'dist', wheel_name)
os.rename(wheel, os.path.join(root_dir, 'target', wheel_name))
| 42.468085 | 119 | 0.695892 | [
"Apache-2.0"
] | AlexanderErtl/systemds | src/main/python/post_setup.py | 1,996 | Python |
import logging
import numpy as np
import math
import psutil
import time
from autogluon.common.features.types import R_BOOL, R_CATEGORY, R_OBJECT, S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT
from autogluon.core.constants import REGRESSION
from autogluon.core.utils.exceptions import NotEnoughMemoryError
from autogluon.core.models.abstract.model_trial import skip_hpo
from autogluon.core.models import AbstractModel
from autogluon.core.utils.utils import normalize_pred_probas
logger = logging.getLogger(__name__)
# TODO: Normalize data!
class KNNModel(AbstractModel):
"""
KNearestNeighbors model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._X_unused_index = None # Keeps track of unused training data indices, necessary for LOO OOF generation
def _get_model_type(self):
if self.params_aux.get('use_daal', True):
try:
# TODO: Add more granular switch, currently this affects all future KNN models even if they had `use_daal=False`
from sklearnex import patch_sklearn
patch_sklearn("knn_classifier")
patch_sklearn("knn_regressor")
# daal backend for KNN seems to be 20-40x+ faster than native sklearn with no downsides.
logger.log(15, '\tUsing daal4py KNN backend...')
except:
pass
try:
from ._knn_loo_variants import KNeighborsClassifier, KNeighborsRegressor
except:
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
logger.warning('WARNING: Leave-one-out variants of KNN failed to import. Falling back to standard KNN implementations.')
if self.problem_type == REGRESSION:
return KNeighborsRegressor
else:
return KNeighborsClassifier
def _preprocess(self, X, **kwargs):
X = super()._preprocess(X, **kwargs)
X = X.fillna(0).to_numpy(dtype=np.float32)
return X
def _set_default_params(self):
default_params = {
'weights': 'uniform',
'n_jobs': -1,
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
ignored_type_group_raw=[R_BOOL, R_CATEGORY, R_OBJECT], # TODO: Eventually use category features
ignored_type_group_special=[S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT],
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
@classmethod
def _get_default_ag_args(cls) -> dict:
default_ag_args = super()._get_default_ag_args()
extra_ag_args = {'valid_stacker': False}
default_ag_args.update(extra_ag_args)
return default_ag_args
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': True}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
# TODO: Enable HPO for KNN
def _get_default_searchspace(self):
spaces = {}
return spaces
def _fit(self,
X,
y,
time_limit=None,
sample_weight=None,
**kwargs):
time_start = time.time()
X = self.preprocess(X)
self._validate_fit_memory_usage(X=X) # TODO: Can incorporate this into samples, can fit on portion of data to satisfy memory instead of raising exception immediately
if sample_weight is not None: # TODO: support
logger.log(15, "sample_weight not yet supported for KNNModel, this model will ignore them in training.")
num_rows_max = len(X)
# FIXME: v0.1 Must store final num rows for refit_full or else will use everything! Worst case refit_full could train far longer than the original model.
if time_limit is None or num_rows_max <= 10000:
self.model = self._get_model_type()(**self._get_model_params()).fit(X, y)
else:
self.model = self._fit_with_samples(X=X, y=y, time_limit=time_limit - (time.time() - time_start))
def _validate_fit_memory_usage(self, X):
max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio']
model_size_bytes = 4 * X.shape[0] * X.shape[1] # Assuming float32 types
expected_final_model_size_bytes = model_size_bytes * 3.6 # Roughly what can be expected of the final KNN model in memory size
if expected_final_model_size_bytes > 10000000: # Only worth checking if expected model size is >10MB
available_mem = psutil.virtual_memory().available
model_memory_ratio = expected_final_model_size_bytes / available_mem
if model_memory_ratio > (0.15 * max_memory_usage_ratio):
logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory...')
if model_memory_ratio > (0.20 * max_memory_usage_ratio):
raise NotEnoughMemoryError # don't train full model to avoid OOM error
# TODO: Won't work for RAPIDS without modification
# TODO: Technically isn't OOF, but can be used inplace of OOF. Perhaps rename to something more accurate?
def get_oof_pred_proba(self, X, normalize=None, **kwargs):
"""X should be the same X passed to `.fit`"""
y_oof_pred_proba = self._get_oof_pred_proba(X=X, **kwargs)
if normalize is None:
normalize = self.normalize_pred_probas
if normalize:
y_oof_pred_proba = normalize_pred_probas(y_oof_pred_proba, self.problem_type)
y_oof_pred_proba = y_oof_pred_proba.astype(np.float32)
return y_oof_pred_proba
def _get_oof_pred_proba(self, X, **kwargs):
if callable(getattr(self.model, "predict_proba_loo", None)):
y_oof_pred_proba = self.model.predict_proba_loo()
elif callable(getattr(self.model, "predict_loo", None)):
y_oof_pred_proba = self.model.predict_loo()
else:
raise AssertionError(f'Model class {type(self.model)} does not support out-of-fold prediction generation.')
y_oof_pred_proba = self._convert_proba_to_unified_form(y_oof_pred_proba)
if X is not None and self._X_unused_index:
X_unused = X.iloc[self._X_unused_index]
y_pred_proba_new = self.predict_proba(X_unused)
X_unused_index = set(self._X_unused_index)
num_rows = len(X)
X_used_index = [i for i in range(num_rows) if i not in X_unused_index]
oof_pred_shape = y_oof_pred_proba.shape
if len(oof_pred_shape) == 1:
y_oof_tmp = np.zeros(num_rows, dtype=np.float32)
y_oof_tmp[X_used_index] = y_oof_pred_proba
y_oof_tmp[self._X_unused_index] = y_pred_proba_new
else:
y_oof_tmp = np.zeros((num_rows, oof_pred_shape[1]), dtype=np.float32)
y_oof_tmp[X_used_index, :] = y_oof_pred_proba
y_oof_tmp[self._X_unused_index, :] = y_pred_proba_new
y_oof_pred_proba = y_oof_tmp
return y_oof_pred_proba
# TODO: Consider making this fully generic and available to all models
def _fit_with_samples(self,
X,
y,
time_limit,
start_samples=10000,
max_samples=None,
sample_growth_factor=2,
sample_time_growth_factor=8):
"""
Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used.
X and y must already be preprocessed.
Parameters
----------
X : np.ndarray
The training data features (preprocessed).
y : Series
The training data ground truth labels.
time_limit : float, default = None
Time limit in seconds to adhere to when fitting model.
start_samples : int, default = 10000
Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples.
For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...]
max_samples : int, default = None
The maximum number of samples to use.
If None or greater than the number of rows in X, then it is set equal to the number of rows in X.
sample_growth_factor : float, default = 2
The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit.
sample_time_growth_factor : float, default = 8
The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds.
If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early.
"""
time_start = time.time()
num_rows_samples = []
if max_samples is None:
num_rows_max = len(X)
else:
num_rows_max = min(len(X), max_samples)
num_rows_cur = start_samples
while True:
num_rows_cur = min(num_rows_cur, num_rows_max)
num_rows_samples.append(num_rows_cur)
if num_rows_cur == num_rows_max:
break
num_rows_cur *= sample_growth_factor
num_rows_cur = math.ceil(num_rows_cur)
if num_rows_cur * 1.5 >= num_rows_max:
num_rows_cur = num_rows_max
def sample_func(chunk, frac):
# Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba)
n = max(math.ceil(len(chunk) * frac), 1)
return chunk.sample(n=n, replace=False, random_state=0)
if self.problem_type != REGRESSION:
y_df = y.to_frame(name='label').reset_index(drop=True)
else:
y_df = None
time_start_sample_loop = time.time()
time_limit_left = time_limit - (time_start_sample_loop - time_start)
model_type = self._get_model_type()
idx = None
for i, samples in enumerate(num_rows_samples):
if samples != num_rows_max:
if self.problem_type == REGRESSION:
idx = np.random.choice(num_rows_max, size=samples, replace=False)
else:
idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=samples/num_rows_max).index
X_samp = X[idx, :]
y_samp = y.iloc[idx]
else:
X_samp = X
y_samp = y
idx = None
self.model = model_type(**self._get_model_params()).fit(X_samp, y_samp)
time_limit_left_prior = time_limit_left
time_fit_end_sample = time.time()
time_limit_left = time_limit - (time_fit_end_sample - time_start)
time_fit_sample = time_limit_left_prior - time_limit_left
time_required_for_next = time_fit_sample * sample_time_growth_factor
logger.log(15, f'\t{round(time_fit_sample, 2)}s \t= Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)')
if time_required_for_next > time_limit_left and i != len(num_rows_samples) - 1:
logger.log(20, f'\tNot enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[i+1]} rows is expected to take {round(time_required_for_next, 2)}s)')
break
if idx is not None:
idx = set(idx)
self._X_unused_index = [i for i in range(num_rows_max) if i not in idx]
return self.model
# TODO: Add HPO
def _hyperparameter_tune(self, **kwargs):
return skip_hpo(self, **kwargs)
def _more_tags(self):
return {'valid_oof': True}
class FAISSModel(KNNModel):
def _get_model_type(self):
from .knn_utils import FAISSNeighborsClassifier, FAISSNeighborsRegressor
if self.problem_type == REGRESSION:
return FAISSNeighborsRegressor
else:
return FAISSNeighborsClassifier
def _set_default_params(self):
default_params = {
'index_factory_string': 'Flat',
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
@classmethod
def _get_default_ag_args_ensemble(cls, **kwargs) -> dict:
default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs)
extra_ag_args_ensemble = {'use_child_oof': False}
default_ag_args_ensemble.update(extra_ag_args_ensemble)
return default_ag_args_ensemble
def _more_tags(self):
return {'valid_oof': False} | 48.106007 | 237 | 0.653518 | [
"Apache-2.0"
] | taesup-aws/autogluon | tabular/src/autogluon/tabular/models/knn/knn_model.py | 13,614 | Python |
from utils import *
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
# TODO: Update the unit list to add the new diagonal units
diagonal1 = [['A1', 'B2', 'C3', 'D4', 'E5', 'F6', 'G7', 'H8', 'I9']]
diagonal2 = [['A9', 'B8', 'C7', 'D6', 'E5', 'F4', 'G3', 'H2', 'I1']]
unitlist = unitlist + diagonal1 + diagonal2
# Must be called after all units (including diagonals) are added to the unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
The naked twins strategy says that if you have two or more unallocated boxes
in a unit and there are only two digits that can go in those two boxes, then
those two digits can be eliminated from the possible assignments of all other
boxes in the same unit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the naked twins eliminated from peers
Notes
-----
Your solution can either process all pairs of naked twins from the input once,
or it can continue processing pairs of naked twins until there are no such
pairs remaining -- the project assistant test suite will accept either
convention. However, it will not accept code that does not process all pairs
of naked twins from the original input. (For example, if you start processing
pairs of twins and eliminate another pair of twins before the second pair
is processed then your code will fail the PA test suite.)
The first convention is preferred for consistency with the other strategies,
and because it is simpler (since the reduce_puzzle function already calls this
strategy repeatedly).
See Also
--------
Pseudocode for this algorithm on github:
https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md
"""
"""
out = values.copy()
len_2_boxes = [box for box in values if len(values[box]) == 2]
for boxA in len_2_boxes:
boxAPeers = peers[boxA]
for boxB in boxAPeers:
if values[boxA] == values[boxB]:
intersect = [val for val in boxAPeers if val in peers[boxB]]
for peer in intersect:
out[peer] = out[peer].replace(values[boxA], '')
return out
"""
out = values.copy()
for boxA in values:
boxAPeers = peers[boxA]
for boxB in boxAPeers:
if values[boxA] == values[boxB] and len(values[boxA]) == 2:
intersect = [val for val in boxAPeers if val in peers[boxB]]
for peer in intersect:
for digit in values[boxA]:
out[peer] = out[peer].replace(digit, '')
return out
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
values[peer] = values[peer].replace(digit,'')
return values
def only_choice(values):
"""Apply the only choice strategy to a Sudoku puzzle
The only choice strategy says that if only one box in a unit allows a certain
digit, then that box must be assigned that digit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with all single-valued boxes assigned
Notes
-----
You should be able to complete this function by copying your code from the classroom
"""
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
values[dplaces[0]] = digit
return values
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""Apply depth first search to solve Sudoku puzzles in order to solve puzzles
that cannot be solved by repeated reduction alone.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary with all boxes assigned or False
Notes
-----
You should be able to complete this function by copying your code from the classroom
and extending it to call the naked twins strategy.
"""
"Using depth-first search and propagation, try all possible values."
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""Find the solution to a Sudoku puzzle using search and constraint propagation
Parameters
----------
grid(string)
a string representing a sudoku grid.
Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns
-------
dict or False
The dictionary representation of the final sudoku grid or False if no solution exists.
"""
values = grid2values(grid)
values = search(values)
return values
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| 33.309013 | 113 | 0.633552 | [
"MIT"
] | justinlnx/artificial-intelligence | Projects/1_Sudoku/solution.py | 7,761 | Python |
"""
Module: 'uheapq' on micropython-v1.16-esp32
"""
# MCU: {'ver': 'v1.16', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.16.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.16.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.5.4
from typing import Any
def heapify(*args, **kwargs) -> Any:
...
def heappop(*args, **kwargs) -> Any:
...
def heappush(*args, **kwargs) -> Any:
...
| 27.473684 | 287 | 0.58046 | [
"MIT"
] | mattytrentini/micropython-stubs | stubs/micropython-v1_16-esp32/uheapq.py | 522 | Python |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that contains base class for Melange Expando models.
"""
__authors__ = [
'"Lennard de Rijk" <[email protected]>',
]
from google.appengine.ext import db
from soc.logic import dicts
class ExpandoBase(db.Expando):
"""Expando Base model.
This might later on contain general functionalities like the
ModelWithFieldAttributes model.
"""
toDict = dicts.toDict
| 26.236842 | 74 | 0.748245 | [
"Apache-2.0"
] | MatthewWilkes/mw4068-packaging | src/melange/src/soc/models/expando_base.py | 997 | Python |
from sqlalchemy.orm.exc import NoResultFound
from zeeguu_core.model import User, Language, UserWord, Text, Bookmark
def own_or_crowdsourced_translation(user, word: str, from_lang_code: str, context: str):
own_past_translation = get_own_past_translation(user, word, from_lang_code, context)
if own_past_translation:
translations = [{'translation': own_past_translation,
'service_name': 'Own Last Translation',
'quality': 100}]
return translations
others_past_translation = get_others_past_translation(word, from_lang_code, context)
if others_past_translation:
translations = [{'translation': others_past_translation,
'service_name': 'Contributed Translation',
'quality': 100}]
return translations
return None
def get_others_past_translation(word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context)
def get_own_past_translation(user, word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context, user)
def _get_past_translation(word: str, from_lang_code: str, context: str, user: User = None):
try:
from_language = Language.find(from_lang_code)
origin_word = UserWord.find(word, from_language)
text = Text.query.filter_by(content=context).one()
query = Bookmark.query.filter_by(origin_id=origin_word.id, text_id=text.id)
if user:
query = query.filter_by(user_id=user.id)
# prioritize older users
query.order_by(Bookmark.user_id.asc())
return query.first().translation.word
except Exception as e:
print(e)
return None
| 31.982143 | 91 | 0.685092 | [
"MIT"
] | C0DK/Zeeguu-Core | zeeguu_core/crowd_translations/__init__.py | 1,791 | Python |
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="splom", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.splom.marker.Color
Bar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud
for `symbol`.
""",
),
**kwargs,
)
| 47.792857 | 76 | 0.54521 | [
"MIT"
] | mastermind88/plotly.py | packages/python/plotly/plotly/validators/splom/_marker.py | 6,691 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import time
import detectron2.utils.comm as comm
import torch
from d2go.config import (
CfgNode as CN,
auto_scale_world_size,
reroute_config_path,
temp_defrost,
)
from d2go.distributed import get_local_rank, get_num_processes_per_machine
from d2go.runner import GeneralizedRCNNRunner, create_runner
from d2go.utils.launch_environment import get_launch_environment
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from detectron2.utils.serialize import PicklableWrapper
from d2go.utils.helper import run_once
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
def basic_argument_parser(
distributed=True,
requires_config_file=True,
requires_output_dir=True,
):
""" Basic cli tool parser for Detectron2Go binaries """
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
required=requires_config_file,
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
if not requires_config_file:
# NOTE if not passing yaml file, user should explicitly set the
# following args, and use `opts` for non-common usecase.
parser.add_argument(
"--datasets",
type=str,
nargs="+",
required=True,
help="cfg.DATASETS.TEST",
)
parser.add_argument(
"--min_size",
type=int,
required=True,
help="cfg.INPUT.MIN_SIZE_TEST",
)
parser.add_argument(
"--max_size",
type=int,
required=True,
help="cfg.INPUT.MAX_SIZE_TEST",
)
return parser
return parser
def create_cfg_from_cli_args(args, default_cfg):
"""
Instead of loading from defaults.py, this binary only includes necessary
configs building from scratch, and overrides them from args. There're two
levels of config:
_C: the config system used by this binary, which is a sub-set of training
config, override by configurable_cfg. It can also be override by
args.opts for convinience.
configurable_cfg: common configs that user should explicitly specify
in the args.
"""
_C = CN()
_C.INPUT = default_cfg.INPUT
_C.DATASETS = default_cfg.DATASETS
_C.DATALOADER = default_cfg.DATALOADER
_C.TEST = default_cfg.TEST
if hasattr(default_cfg, "D2GO_DATA"):
_C.D2GO_DATA = default_cfg.D2GO_DATA
if hasattr(default_cfg, "TENSORBOARD"):
_C.TENSORBOARD = default_cfg.TENSORBOARD
# NOTE configs below might not be necessary, but must add to make code work
_C.MODEL = CN()
_C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE
_C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON
_C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON
_C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS
assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support"
_C.OUTPUT_DIR = args.output_dir
configurable_cfg = [
"DATASETS.TEST",
args.datasets,
"INPUT.MIN_SIZE_TEST",
args.min_size,
"INPUT.MAX_SIZE_TEST",
args.max_size,
]
cfg = _C.clone()
cfg.merge_from_list(configurable_cfg)
cfg.merge_from_list(args.opts)
return cfg
def prepare_for_launch(args):
"""
Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR.
"""
print(args)
runner = create_runner(args.runner)
cfg = runner.get_default_cfg()
if args.config_file:
with PathManager.open(reroute_config_path(args.config_file), "r") as f:
print("Loaded config file {}:\n{}".format(args.config_file, f.read()))
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
else:
cfg = create_cfg_from_cli_args(args, default_cfg=cfg)
cfg.freeze()
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, runner
def setup_after_launch(cfg, output_dir, runner):
"""
Set things up after entering DDP, including
- creating working directory
- setting up logger
- logging environment
- initializing runner
"""
create_dir_on_global_main_process(output_dir)
comm.synchronize()
setup_loggers(output_dir)
cfg.freeze()
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
logger.info("Initializing runner ...")
runner = initialize_runner(runner, cfg)
log_info(cfg, runner)
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
@run_once()
def setup_loggers(output_dir, color=None):
if not color:
color = get_launch_environment() == "local"
d2_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="detectron2",
abbrev_name="d2",
)
fvcore_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="fvcore",
)
d2go_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="d2go",
abbrev_name="d2go",
)
mobile_cv_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="mobile_cv",
abbrev_name="mobile_cv",
)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
# NOTE: the root logger might has been configured by other applications,
# since this already sub-top level, just don't propagate to root.
d2_logger.propagate = False
fvcore_logger.propagate = False
d2go_logger.propagate = False
mobile_cv_logger.propagate = False
def log_info(cfg, runner):
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
logger.info("Environment info:\n" + collect_env_info())
logger.info("Running with full config:\n{}".format(cfg))
logger.info("Running with runner: {}".format(runner))
def dump_cfg(cfg, path):
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_local_main_process(dir):
if get_local_rank() == 0 and dir:
PathManager.mkdirs(dir)
def create_dir_on_global_main_process(dir):
if comm.get_rank() == 0 and dir:
PathManager.mkdirs(dir)
def initialize_runner(runner, cfg):
runner = runner or GeneralizedRCNNRunner()
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
# by default use single thread when DDP with multiple processes
num_threads = 1
else:
# GlobalInit will clean PyTorch's num_threads and set it to 1,
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0 # only enable for local main process
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func):
def new_main_func(cfg, output_dir, *args, **kwargs):
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None # fallback to use normal pdb for single process
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func)
| 31.544343 | 88 | 0.654678 | [
"Apache-2.0"
] | Dinesh101041/d2go | d2go/setup.py | 10,315 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from ax.exceptions.model import ModelError
from ax.models.torch.utils import (
_generate_sobol_points,
is_noiseless,
normalize_indices,
subset_model,
tensor_callable_to_array_callable,
)
from ax.utils.common.testutils import TestCase
from botorch.models import HeteroskedasticSingleTaskGP, ModelListGP, SingleTaskGP
from torch import Tensor
class TorchUtilsTest(TestCase):
def test_is_noiseless(self):
x = torch.zeros(1, 1)
y = torch.zeros(1, 1)
se = torch.zeros(1, 1)
model = SingleTaskGP(x, y)
self.assertTrue(is_noiseless(model))
model = HeteroskedasticSingleTaskGP(x, y, se)
self.assertFalse(is_noiseless(model))
with self.assertRaises(ModelError):
is_noiseless(ModelListGP())
def testNormalizeIndices(self):
indices = [0, 2]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, indices)
nlzd_indices = normalize_indices(indices, 4)
self.assertEqual(nlzd_indices, indices)
indices = [0, -1]
nlzd_indices = normalize_indices(indices, 3)
self.assertEqual(nlzd_indices, [0, 2])
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([3], 3)
with self.assertRaises(ValueError):
nlzd_indices = normalize_indices([-4], 3)
def testSubsetModel(self):
x = torch.zeros(1, 1)
y = torch.rand(1, 2)
obj_t = torch.rand(2)
model = SingleTaskGP(x, y)
self.assertEqual(model.num_outputs, 2)
# basic test, can subset
obj_weights = torch.tensor([1.0, 0.0])
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIsNone(obj_t_sub)
self.assertEqual(model_sub.num_outputs, 1)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
# basic test, cannot subset
obj_weights = torch.tensor([1.0, 2.0])
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIsNone(obj_t_sub)
self.assertIs(model_sub, model) # check identity
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
# test w/ outcome constraints, can subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertEqual(model_sub.num_outputs, 1)
self.assertIsNone(obj_t_sub)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0])))
# test w/ outcome constraints, cannot subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIs(model_sub, model) # check identity
self.assertIsNone(obj_t_sub)
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertIs(ocs_sub, ocs) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
# test w/ objective thresholds, cannot subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs, obj_t)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIs(model_sub, model) # check identity
self.assertIs(obj_t, obj_t_sub)
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
self.assertIs(ocs_sub, ocs) # check identity
# test w/ objective thresholds, can subset
obj_weights = torch.tensor([1.0, 0.0])
ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
subset_model_results = subset_model(model, obj_weights, ocs, obj_t)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0])))
self.assertEqual(model_sub.num_outputs, 1)
self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
self.assertTrue(torch.equal(obj_t_sub, obj_t[:1]))
self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
# test unsupported
yvar = torch.ones(1, 2)
model = HeteroskedasticSingleTaskGP(x, y, yvar)
subset_model_results = subset_model(model, obj_weights)
model_sub = subset_model_results.model
obj_weights_sub = subset_model_results.objective_weights
ocs_sub = subset_model_results.outcome_constraints
obj_t_sub = subset_model_results.objective_thresholds
self.assertIsNone(ocs_sub)
self.assertIs(model_sub, model) # check identity
self.assertIs(obj_weights_sub, obj_weights) # check identity
self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1])))
# test error on size inconsistency
obj_weights = torch.ones(3)
with self.assertRaises(RuntimeError):
subset_model(model, obj_weights)
def testGenerateSobolPoints(self):
bounds = [(0.0, 1.0) for _ in range(3)]
linear_constraints = (
torch.tensor([[1, -1, 0]], dtype=torch.double),
torch.tensor([[0]], dtype=torch.double),
)
def test_rounding_func(x: Tensor) -> Tensor:
return x
gen_sobol = _generate_sobol_points(
n_sobol=100,
bounds=bounds,
device=torch.device("cpu"),
linear_constraints=linear_constraints,
rounding_func=test_rounding_func,
)
self.assertEqual(len(gen_sobol), 100)
self.assertIsInstance(gen_sobol, Tensor)
def testTensorCallableToArrayCallable(self):
def tensor_func(x: Tensor) -> Tensor:
return np.exp(x)
new_func = tensor_callable_to_array_callable(
tensor_func=tensor_func, device=torch.device("cpu")
)
self.assertTrue(callable(new_func))
self.assertIsInstance(new_func(np.array([1.0, 2.0])), np.ndarray)
| 46.367232 | 88 | 0.680151 | [
"MIT"
] | Balandat/Ax | ax/models/tests/test_torch_model_utils.py | 8,207 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: trim.montecarlo.source
.. moduleauthor:: Hendrix Demers <[email protected]>
"""
# Copyright 2019 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules.
from trim.montecarlo.math import Point
# Globals and constants variables.
GROUP_SOURCE = "source"
GROUP_POSITIONS = "position (nm)"
GROUP_DIRECTION = "direction"
ATTRIBUTE_KINETIC_ENERGY = "kinetic energy (keV)"
ATTRIBUTE_MASS = "mass (amu)"
ATTRIBUTE_ATOMIC_NUMBER = "atomic number"
class Source:
def __init__(self):
# Default to Ar at 6 keV
self.position_nm = Point(0.0, 0.0, 0.0)
self.direction = Point(0.0, 0.0, -1.0)
self.kinetic_energy_keV = 6.0
self.mass_amu = 39.962
self.atomic_number = 18
def write(self, parent):
group = parent.require_group(GROUP_SOURCE)
position_group = group.require_group(GROUP_POSITIONS)
self.position_nm.write(position_group)
direction_group = group.require_group(GROUP_DIRECTION)
self.direction.write(direction_group)
group.attrs[ATTRIBUTE_KINETIC_ENERGY] = self.kinetic_energy_keV
group.attrs[ATTRIBUTE_MASS] = self.mass_amu
group.attrs[ATTRIBUTE_ATOMIC_NUMBER] = self.atomic_number
def read(self, parent):
group = parent.require_group(GROUP_SOURCE)
position_group = group.require_group(GROUP_POSITIONS)
self.position_nm.read(position_group)
direction_group = group.require_group(GROUP_DIRECTION)
self.direction.read(direction_group)
self.kinetic_energy_keV = group.attrs[ATTRIBUTE_KINETIC_ENERGY]
self.mass_amu = group.attrs[ATTRIBUTE_MASS]
self.atomic_number = group.attrs[ATTRIBUTE_ATOMIC_NUMBER]
| 30.423077 | 74 | 0.719343 | [
"Apache-2.0"
] | drix00/pytrim-montecarlo | trim/montecarlo/options/source.py | 2,373 | Python |
#!/usr/bin/env python3
import utils
utils.check_version((3,7)) # make sure we are running at least Python 3.7
utils.clear() # clear the screen
print('Greetings!')
color = ''
while (color != 'red'):color = input("What is my favorite color? ")
while (color != 'red'):
color = color.lower().strip()
if (color == 'red'):
print('Correct!')
elif (color == 'pink'):
print('Close!')
else:
print('Sorry, try again.') | 25.368421 | 82 | 0.558091 | [
"MIT"
] | BraffordHunter/E01a-Control-Structues | main8.py | 482 | Python |
from qtpy.QtWidgets import QDialog, QLineEdit, QPushButton, QLabel, QVBoxLayout
from brainrender_gui.style import style, update_css
class AddRegionsWindow(QDialog):
left = 250
top = 250
width = 400
height = 300
label_msg = (
"Write the acronyms of brainregions "
+ "you wish to add.\n[as 'space' separated strings (e.g.: STN TH)]"
)
def __init__(self, main_window, palette):
"""
Creates a new window for user to input
which regions to add to scene.
Arguments:
----------
main_window: reference to the App's main window
palette: main_window's palette, used to style widgets
"""
super().__init__()
self.setWindowTitle("Add brain regions")
self.ui()
self.main_window = main_window
self.setStyleSheet(update_css(style, palette))
def ui(self):
"""
Define UI's elements
"""
self.setGeometry(self.left, self.top, self.width, self.height)
layout = QVBoxLayout()
# Regions
label = QLabel(self)
label.setObjectName("PopupLabel")
label.setText(self.label_msg)
self.textbox = QLineEdit(self)
# Alpha
alpha_label = QLabel(self)
alpha_label.setObjectName("PopupLabel")
alpha_label.setText("Alpha")
self.alpha_textbox = QLineEdit(self)
self.alpha_textbox.setText(str(1.0))
# Color
color_label = QLabel(self)
color_label.setObjectName("PopupLabel")
color_label.setText("Color")
self.color_textbox = QLineEdit(self)
self.color_textbox.setText("atlas")
# Create a button in the window
self.button = QPushButton("Add regions", self)
self.button.clicked.connect(self.on_click)
self.button.setObjectName("RegionsButton")
layout.addWidget(label)
layout.addWidget(self.textbox)
layout.addWidget(alpha_label)
layout.addWidget(self.alpha_textbox)
layout.addWidget(color_label)
layout.addWidget(self.color_textbox)
layout.addWidget(self.button)
self.setLayout(layout)
self.show()
def on_click(self):
"""
On click or 'Enter' get the regions
from the input and call the add_regions
method of the main window
"""
regions = self.textbox.text().split(" ")
self.main_window.add_regions(
regions, self.alpha_textbox.text(), self.color_textbox.text()
)
self.close()
| 27.526316 | 79 | 0.602677 | [
"BSD-3-Clause"
] | brainglobe/bg-brainrender-gui | brainrender_gui/widgets/add_regions.py | 2,615 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import distro
import logging
import platform
from pathlib import Path
from typing import (
Union,
)
from mozphab import environment
from .bmo import BMOAPIError
from .config import config
from .environment import MOZPHAB_VERSION
from .helpers import prompt
from .logger import logger
from .user import user_data
class Telemetry:
def __init__(self):
"""Initiate Glean, load pings and metrics."""
import glean
logging.getLogger("glean").setLevel(logging.DEBUG)
logger.debug("Initializing Glean...")
glean.Glean.initialize(
application_id="MozPhab",
application_version=MOZPHAB_VERSION,
upload_enabled=True,
configuration=glean.Configuration(),
data_dir=Path(environment.MOZBUILD_PATH) / "telemetry-data",
)
self._pings = glean.load_pings(environment.MOZPHAB_MAIN_DIR / "pings.yaml")
self._metrics = glean.load_metrics(
environment.MOZPHAB_MAIN_DIR / "metrics.yaml"
)
@property
def environment(self):
return self._metrics.mozphab.environment
@property
def usage(self):
return self._metrics.mozphab.usage
@property
def user(self):
return self._metrics.mozphab.user
@property
def submission(self):
return self._metrics.mozphab.submission
def _set_os(self):
"""Collect human readable information about the OS version.
For Linux it is setting a distribution name and version.
"""
system, node, release, version, machine, processor = platform.uname()
if system == "Linux":
distribution_name, distribution_number, _ = distro.linux_distribution(
full_distribution_name=False
)
distribution_version = " ".join([distribution_name, distribution_number])
elif system == "Windows":
_release, distribution_version, _csd, _ptype = platform.win32_ver()
elif system == "Darwin":
distribution_version, _versioninfo, _machine = platform.mac_ver()
else:
distribution_version = release
self.environment.distribution_version.set(distribution_version)
def _set_python(self):
self.environment.python_version.set(platform.python_version())
def set_vcs(self, repo):
self.environment.vcs.name.set(repo.vcs)
self.environment.vcs.version.set(repo.vcs_version)
def submit(self):
self._pings.usage.submit()
logger.debug("Telemetry submit called.")
def set_metrics(self, args):
"""Sets metrics common to all commands."""
self.usage.command.set(args.command)
self._set_os()
self._set_python()
self.usage.override_switch.set(
getattr(args, "force_vcs", False) or getattr(args, "force", False)
)
self.usage.command_time.start()
self.user.installation.set(user_data.installation_id)
self.user.id.set(user_data.user_code)
class TelemetryDisabled:
"""Dummy class that does nothing."""
def __init__(*args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, *args, **kwargs):
return self
def update_user_data():
"""Update user_data to enable or disable Telemetry.
If employment data has been changed Telemetry might be switched on
automatically. The opt-in decision is taken for the new employee. Non employees
will have an option to enable data collection.
"""
is_employee_changed = user_data.set_user_data()
if not is_employee_changed:
return
# Switch on Telemetry for employee or ask to opt-in for non-employee
if user_data.is_employee:
logger.warning(
"Enabled collecting MozPhab usage data.\n"
"See https://moz-conduit.readthedocs.io/en/latest"
"/mozphab-data-collection.html"
)
config.telemetry_enabled = True
else:
# user is new or no longer employee
opt_in = (
prompt(
"Would you like to allow MozPhab to collect usage data?",
["Yes", "No"],
)
== "Yes"
)
if opt_in:
config.telemetry_enabled = True
else:
logger.info(
"MozPhab usage data collection disabled.\n"
"See https://moz-conduit.readthedocs.io/en/latest"
"/mozphab-data-collection.html"
)
config.telemetry_enabled = False
config.write()
def configure_telemetry(args):
if args.command == "install-certificate":
# Collecting data without a certificate is not possible.
_Globals.telemetry = TelemetryDisabled()
return
if args.command == "self-update":
# Avoid locking issues on Windows by not loading Glean when we're updating
_Globals.telemetry = TelemetryDisabled()
return
# `user_data` file will remain empty until user calls MozPhab with a command
# requiring existence of the Repository.
if args.needs_repo:
try:
update_user_data()
except BMOAPIError as err:
# Error in retrieving user status.
# We quietly allow to work without enabling Telemetry.
logger.debug("BMOAPIErrori: %s", err)
_Globals.telemetry = TelemetryDisabled()
return
# We can't call telemetry if user data was never collected.
if not config.telemetry_enabled or not user_data.is_data_collected:
_Globals.telemetry = TelemetryDisabled()
return
# Enable telemetry by swapping the telemetry global with a Glean backed object.
_Globals.telemetry = Telemetry()
telemetry().set_metrics(args)
def telemetry():
return _Globals.telemetry
class _Globals:
"""Container for holding globals in a way that can be easily replaced."""
telemetry: Union[Telemetry, TelemetryDisabled] = TelemetryDisabled()
| 31.489899 | 85 | 0.648597 | [
"MPL-2.0"
] | cgsheeh/review | mozphab/telemetry.py | 6,235 | Python |
import os
import numpy as np
import pickle
import pathlib
from random import shuffle, choice
def get_info_dataset(dataset_path, update=False):
# TODO: Implements some checks to verify edits to the dataset from last pickle.dump(data)
storing_data_path = dataset_path + "/info.txt"
if update and os.path.exists(dataset_path + "/info.txt"):
os.remove(dataset_path + "/info.txt")
if os.path.isfile(storing_data_path):
with open(storing_data_path, 'rb') as filehandle:
data = pickle.load(filehandle)
class_info = data['class_info']
ds_info = data['ds_info']
# CHECKS if the paths stored match the DB
# TODO: This check just pick 3 elements and check existence, can be improved
if not os.path.exists(choice(ds_info['train_paths'])) or not os.path.exists(choice(ds_info['val_paths'])) \
or not os.path.exists(choice(ds_info['test_paths'])):
print(f"Dataset paths seem incorrect, "
f"you should update the dataset info running '-m DATA -d {dataset_path}")
exit()
# Shuffle elements
else:
shuffle(ds_info['train_paths'])
shuffle(ds_info['val_paths'])
shuffle(ds_info['final_training_paths'])
shuffle(ds_info['test_paths'])
else:
# Create dataset filepaths
train_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/train")
for file in f if ".png" in file or ".jpg" in file]
val_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/val")
for file in f if ".png" in file or ".jpg" in file]
final_training_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training")
for file in f if ".png" in file or ".jpg" in file]
test_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/test")
for file in f if ".png" in file or ".jpg" in file]
ds_info = {'ds_type': 'images', 'train_paths': train_paths, 'val_paths': val_paths, 'test_paths': test_paths,
'final_training_paths': final_training_paths}
temp_class_names = np.array([item.name for item in pathlib.Path(dataset_path + "/training/train").glob('*')])
# Sort class_names to keep same order, which influence training in one-hot encore, over different machines
class_names = np.sort(temp_class_names, axis=-1)
nclasses = len(class_names)
class_info = {"class_names": class_names, "n_classes": nclasses}
# GENERAL STATS
size_train = len(train_paths)
size_val = len(val_paths)
size_test = len(test_paths)
class_info.update({"train_size": size_train, "val_size": size_val, "test_size": size_test, 'info': {}})
for name in class_names:
size_trainf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/train/{}".format(name))])
size_valf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/val/{}".format(name))])
size_testf = sum([len(files) for r, d, files in os.walk(dataset_path + "/test/{}".format(name))])
class_info['info']["{}".format(name)] = {}
class_info['info']["{}".format(name)]['TRAIN'] = size_trainf
class_info['info']["{}".format(name)]['VAL'] = size_valf
class_info['info']["{}".format(name)]['TEST'] = size_testf
class_info['info']["{}".format(name)]['TOT'] = size_testf + size_valf + size_trainf
with open(storing_data_path, 'wb') as filehandle:
data = {'ds_info': ds_info, 'class_info': class_info}
pickle.dump(data, filehandle)
return class_info, ds_info
| 48.8375 | 120 | 0.604044 | [
"MIT"
] | 1Stohk1/tami | utils/preprocessing_data.py | 3,907 | Python |
import json
import logging
import sys
from typing import Any, Callable, Dict, List
from dhis2.core.http import BaseHttpRequest
from dhis2.core.inventory import HostResolved, Inventory, resolve_one
from fhir.resources.bundle import Bundle
from .models.svcm import CodeList, SVCMConfig
from .svcm_resources import build_bundle
log = logging.getLogger(__name__)
def get_source(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]:
host = resolve_one(config.source.id, inventory)
if "dhis2" not in host.type:
log.error("Only 'dhis2' source type is currently supported")
sys.exit(-1)
log.info(f"Creating source from '{host.key}' with base url '{host.baseUrl}'")
def fn():
filters = []
# https://docs.dhis2.org/2.35/en/developer/html/webapi_metadata_object_filter.html
if config.source.lastUpdated:
filters.append(f"lastUpdated:ge:{config.source.lastUpdated}")
option_sets_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.optionSets))
option_sets_filter.extend(filters)
option_sets = BaseHttpRequest(host).get(
"api/optionSets",
params={
"fields": "id,code,name,version,translations,options[id,code,name,translations]",
"rootJunction": "OR",
"filter": option_sets_filter,
"paging": False,
},
)
categories_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.categories))
categories_filter.extend(filters)
categories = BaseHttpRequest(host).get(
"api/categories",
params={
"fields": "id,code,name,translations,categoryOptions::rename(options)[id,code,name,translations]",
"rootJunction": "OR",
"filter": categories_filter,
"paging": False,
},
)
data = {
"optionSets": option_sets.get("optionSets", []),
"categories": categories.get("categories", []),
}
return (
host,
data,
)
return fn
def get_target(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]:
id = config.target.id
if "log://" == id:
log.info("Creating 'log://' target")
def target_log(data: Any):
log.info("Writing result to stdout")
print(json.dumps(data[1].as_json(), indent=2))
return target_log
elif "null://" == id:
log.info("Creating 'null://' target")
def target_null(data: Any):
log.info("Doing nothing with result")
return target_null
host = resolve_one(id, inventory)
if "dhis2" in host.type:
log.error("'dhis2' target type is not currently supported")
sys.exit(-1)
log.info(f"Creating target from '{host.key}' with base url '{host.baseUrl}'")
def target_push(data: Any):
payload: Bundle = data[1]
return BaseHttpRequest(host).post("", data=payload.as_json())
return target_push
def transform(config: SVCMConfig, data: Any):
host: HostResolved = data[0]
payload: Dict[str, Any] = data[1]
code_lists: List[CodeList] = []
option_sets = payload.get("optionSets", [])
categories = payload.get("categories", [])
for option_set in option_sets:
code_lists.append(CodeList(**option_set))
for category in categories:
code_lists.append(CodeList(**category, type="categories"))
return (
host,
build_bundle(code_lists, host.baseUrl),
)
def run(config: SVCMConfig, inventory: Inventory):
log.info(f"SVCM job '{config.id}'' starting")
source = get_source(config, inventory)
target = get_target(config, inventory)
data = source()
data = transform(config, data)
data = target(data)
if data:
log.info(f"Got response from target system {data}")
log.info(f"SVCM job '{config.id}' finished")
| 28.204225 | 114 | 0.615481 | [
"BSD-3-Clause"
] | dhis2/dhis2-python | dhis2_core/src/dhis2/code_list/svcm.py | 4,005 | Python |
#!/usr/bin/env python
"""
This is a crude script for detecting reference leaks in the C-based cbor2
implementation. It is by no means fool-proof and won't pick up all possible ref
leaks, but it is a reasonable "confidence test" that things aren't horribly
wrong. The script assumes you're in an environment with objgraph and cbor2
installed.
The script outputs a nicely formatted table of the tests run, and the number of
"extra" objects that existed after the tests (indicating a ref-leak), or "-" if
no extra objects existed. The ideal output is obviously "-" in all rows.
"""
import sys
import objgraph
import tracemalloc
from datetime import datetime, timezone, timedelta
from fractions import Fraction
from decimal import Decimal
from collections import namedtuple, OrderedDict
def import_cbor2():
# Similar hack to that used in tests/conftest to get separate C and Python
# implementations
import cbor2
import cbor2.types
import cbor2.encoder
import cbor2.decoder
class Module(object):
# Mock module class
pass
py_cbor2 = Module()
for source in (cbor2.types, cbor2.encoder, cbor2.decoder):
for name in dir(source):
setattr(py_cbor2, name, getattr(source, name))
return cbor2, py_cbor2
c_cbor2, py_cbor2 = import_cbor2()
UTC = timezone.utc
TEST_VALUES = [
# label, kwargs, value
('None', {}, None),
('10e0', {}, 1),
('10e12', {}, 1000000000000),
('10e29', {}, 100000000000000000000000000000),
('-10e0', {}, -1),
('-10e12', {}, -1000000000000),
('-10e29', {}, -100000000000000000000000000000),
('float1', {}, 1.0),
('float2', {}, 3.8),
('str', {}, 'foo'),
('bigstr', {}, 'foobarbaz ' * 1000),
('bytes', {}, b'foo'),
('bigbytes', {}, b'foobarbaz\x00' * 1000),
('datetime', {'timezone': UTC}, datetime(2019, 5, 9, 22, 4, 5, 123456)),
('decimal', {}, Decimal('1.1')),
('fraction', {}, Fraction(1, 5)),
('intlist', {}, [1, 2, 3]),
('bigintlist', {}, [1, 2, 3] * 1000),
('strlist', {}, ['foo', 'bar', 'baz']),
('bigstrlist', {}, ['foo', 'bar', 'baz'] * 1000),
('dict', {}, {'a': 1, 'b': 2, 'c': 3}),
('bigdict', {}, {'a' * i: i for i in range(1000)}),
('set', {}, {1, 2, 3}),
('bigset', {}, set(range(1000))),
('bigdictlist', {}, [{'a' * i: i for i in range(100)}] * 100),
('objectdict', {'timezone': UTC},
{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}),
('objectdictlist', {'timezone': UTC},
[{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}] * 100),
]
Leaks = namedtuple('Leaks', ('count', 'comparison'))
Tests = namedtuple('Test', ('objgraph', 'malloc'))
Result = namedtuple('Result', ('encoding', 'decoding', 'roundtrip'))
peak = {}
def growth():
return objgraph.growth(limit=None, peak_stats=peak)
def test_malloc(op):
count = 0
start = datetime.now()
# NOTE: Filter pointing to the op() line in the loop below, because we're
# only interested in memory allocated by that line. Naturally, if this file
# is edited, the lineno parameter below must be adjusted!
only_op = tracemalloc.Filter(True, __file__, lineno=102, all_frames=True)
tracemalloc.start(10)
try:
# Perform a pre-run of op so that any one-time memory allocation
# (module imports, etc.) don't affect the later diffs
op()
before = tracemalloc.take_snapshot().filter_traces([only_op])
while True:
count += 1
op()
if datetime.now() - start > timedelta(seconds=0.2):
break
after = tracemalloc.take_snapshot().filter_traces([only_op])
diff = after.compare_to(before, 'traceback')
diff = [entry for entry in diff if entry.size_diff > 0]
return count, diff
finally:
tracemalloc.stop()
def test_objgraph(op):
count = 0
start = datetime.now()
# See notes above
op()
growth()
while True:
count += 1
op()
if datetime.now() - start > timedelta(seconds=0.2):
break
return count, growth()
def test(op):
return Tests(Leaks(*test_objgraph(op)), Leaks(*test_malloc(op)))
def format_leaks(result):
if result.objgraph.comparison:
return '%d objs (/%d)' % (
sum(leak[-1] for leak in result.objgraph.comparison),
result.objgraph.count)
elif result.malloc.comparison and (
result.malloc.count < result.malloc.comparison[0].size_diff):
# Running the loop always results in *some* memory allocation, but as
# long as the bytes allocated are less than the number of loops it's
# unlikely to be an actual leak
return '%d bytes (/%d)' % (
result.malloc.comparison[0].size_diff, result.malloc.count)
else:
return '-'
def output_table(results):
# Build table content
head = ('Test', 'Encoding', 'Decoding', 'Round-trip')
rows = [head] + [
(
label,
format_leaks(result.encoding),
format_leaks(result.decoding),
format_leaks(result.roundtrip),
)
for label, result in results.items()
]
# Format table output
cols = zip(*rows)
col_widths = [max(len(row) for row in col) for col in cols]
sep = ''.join((
'+-',
'-+-'.join('-' * width for width in col_widths),
'-+',
))
print(sep)
print(''.join((
'| ',
' | '.join(
'{value:<{width}}'.format(value=value, width=width)
for value, width in zip(head, col_widths)
),
' |',
)))
print(sep)
for row in rows[1:]:
print(''.join((
'| ',
' | '.join(
'{value:<{width}}'.format(value=value, width=width)
for value, width in zip(row, col_widths)
),
' |',
)))
print(sep)
print()
print("""\
There *will* be false positives in the table above. Ignore leaks involving a
tiny number of objects (e.g. 1) or a small number of bytes (e.g. < 8Kb) as such
allocations are quite normal.
In the case of a ref-leak of an object that can reference others (lists, sets,
dicts, or anything with a __dict__), expect to see 100s or 1000s of "objs"
leaked. In the case of a ref-leak of a simple object (int, str, bytes, etc.),
expect to see a few hundred Kb allocated.
If leaks occur across the board, it's likely to be in something universal like
dump/load. If it's restricted to a type, check the encoding and decoding
methods for that type.
""")
def main():
results = OrderedDict()
sys.stderr.write("Testing")
sys.stderr.flush()
for name, kwargs, value in TEST_VALUES:
encoded = py_cbor2.dumps(value, **kwargs)
results[name] = Result(
encoding=test(lambda: c_cbor2.dumps(value, **kwargs)),
decoding=test(lambda: c_cbor2.loads(encoded)),
roundtrip=test(lambda: c_cbor2.loads(c_cbor2.dumps(value, **kwargs))),
)
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
sys.stderr.write("\n")
output_table(results)
sys.stderr.write("\n")
if __name__ == '__main__':
main()
| 33.738938 | 93 | 0.570361 | [
"MIT"
] | Chia-Network/cbor2 | scripts/ref_leak_test.py | 7,625 | Python |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import tempfile
import os
import torch
from zoo.chronos.model.forecast.lstm_forecaster import LSTMForecaster
from zoo.orca import init_orca_context, stop_orca_context
from unittest import TestCase
import pytest
def create_data():
num_train_samples = 1000
num_val_samples = 400
num_test_samples = 400
input_time_steps = 24
input_feature_dim = 2
output_time_steps = 1
output_feature_dim = 2
def get_x_y(num_samples):
x = np.random.rand(num_samples, input_time_steps, input_feature_dim).astype(np.float32)
y = x[:, -output_time_steps:, :]*2 + \
np.random.rand(num_samples, output_time_steps, output_feature_dim).astype(np.float32)
return x, y
train_data = get_x_y(num_train_samples)
val_data = get_x_y(num_val_samples)
test_data = get_x_y(num_test_samples)
return train_data, val_data, test_data
class TestChronosModelLSTMForecaster(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_tcn_forecaster_fit_eva_pred(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
train_loss = forecaster.fit(train_data, epochs=2)
test_pred = forecaster.predict(test_data[0])
assert test_pred.shape == test_data[1].shape
test_mse = forecaster.evaluate(test_data)
def test_tcn_forecaster_onnx_methods(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
forecaster.fit(train_data, epochs=2)
try:
import onnx
import onnxruntime
pred = forecaster.predict(test_data[0])
pred_onnx = forecaster.predict_with_onnx(test_data[0])
np.testing.assert_almost_equal(pred, pred_onnx, decimal=5)
mse = forecaster.evaluate(test_data, multioutput="raw_values")
mse_onnx = forecaster.evaluate_with_onnx(test_data,
multioutput="raw_values")
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
mse = forecaster.evaluate(test_data)
mse_onnx = forecaster.evaluate_with_onnx(test_data)
np.testing.assert_almost_equal(mse, mse_onnx, decimal=5)
except ImportError:
pass
def test_tcn_forecaster_save_load(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
train_mse = forecaster.fit(train_data, epochs=2)
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
test_pred_save = forecaster.predict(test_data[0])
forecaster.save(ckpt_name)
forecaster.load(ckpt_name)
test_pred_load = forecaster.predict(test_data[0])
np.testing.assert_almost_equal(test_pred_save, test_pred_load)
def test_tcn_forecaster_runtime_error(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01)
with pytest.raises(RuntimeError):
with tempfile.TemporaryDirectory() as tmp_dir_name:
ckpt_name = os.path.join(tmp_dir_name, "ckpt")
forecaster.save(ckpt_name)
with pytest.raises(RuntimeError):
forecaster.predict(test_data[0])
with pytest.raises(RuntimeError):
forecaster.evaluate(test_data)
def test_tcn_forecaster_shape_error(self):
train_data, val_data, test_data = create_data()
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=1,
loss="mae",
lr=0.01)
with pytest.raises(AssertionError):
forecaster.fit(train_data, epochs=2)
def test_tcn_forecaster_xshard_input(self):
train_data, val_data, test_data = create_data()
print("original", train_data[0].dtype)
init_orca_context(cores=4, memory="2g")
from zoo.orca.data import XShards
def transform_to_dict(data):
return {'x': data[0], 'y': data[1]}
def transform_to_dict_x(data):
return {'x': data[0]}
train_data = XShards.partition(train_data).transform_shard(transform_to_dict)
val_data = XShards.partition(val_data).transform_shard(transform_to_dict)
test_data = XShards.partition(test_data).transform_shard(transform_to_dict_x)
for distributed in [True, False]:
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01,
distributed=distributed)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data)
distributed_eval = forecaster.evaluate(val_data)
stop_orca_context()
def test_tcn_forecaster_distributed(self):
train_data, val_data, test_data = create_data()
init_orca_context(cores=4, memory="2g")
forecaster = LSTMForecaster(past_seq_len=24,
input_feature_num=2,
output_feature_num=2,
loss="mae",
lr=0.01,
distributed=True)
forecaster.fit(train_data, epochs=2)
distributed_pred = forecaster.predict(test_data[0])
distributed_eval = forecaster.evaluate(val_data)
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
forecaster.to_local()
local_pred = forecaster.predict(test_data[0])
local_eval = forecaster.evaluate(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred, decimal=5)
try:
import onnx
import onnxruntime
local_pred_onnx = forecaster.predict_with_onnx(test_data[0])
local_eval_onnx = forecaster.evaluate_with_onnx(val_data)
np.testing.assert_almost_equal(distributed_pred, local_pred_onnx, decimal=5)
except ImportError:
pass
model = forecaster.get_model()
assert isinstance(model, torch.nn.Module)
stop_orca_context()
| 40.631313 | 97 | 0.595649 | [
"Apache-2.0"
] | DiegoCao/analytics-zoo | pyzoo/test/zoo/chronos/model/forecast/test_lstm_forecaster.py | 8,045 | Python |
from pypy.objspace.std.iterobject import W_SeqIterObject
from pypy.interpreter.error import OperationError
class TestW_IterObject:
def body3(self, w_iter):
w = self.space.wrap
assert self.space.eq_w(self.space.next(w_iter), w(5))
assert self.space.eq_w(self.space.next(w_iter), w(3))
assert self.space.eq_w(self.space.next(w_iter), w(99))
self.body0(w_iter)
def body0(self, w_iter):
raises(OperationError, self.space.next, w_iter)
raises(OperationError, self.space.next, w_iter)
def test_iter(self):
w = self.space.wrap
w_tuple = self.space.newtuple([w(5), w(3), w(99)])
w_iter = W_SeqIterObject(w_tuple)
self.body3(w_iter)
def test_iter_builtin(self):
w = self.space.wrap
w_tuple = self.space.newtuple([w(5), w(3), w(99)])
w_iter = self.space.iter(w_tuple)
self.body3(w_iter)
def test_emptyiter(self):
w_list = self.space.newlist([])
w_iter = W_SeqIterObject(w_list)
self.body0(w_iter)
def test_emptyiter_builtin(self):
w_list = self.space.newlist([])
w_iter = self.space.iter(w_list)
self.body0(w_iter)
class AppTestW_IterObjectApp:
def test_user_iter(self):
class C(object):
def next(self):
raise StopIteration
def __iter__(self):
return self
assert list(C()) == []
def test_iter_getitem(self):
class C(object):
def __getitem__(self, i):
return range(2)[i]
assert list(C()) == range(2)
def test_iter_fail_noseq(self):
class C(object):
pass
raises(TypeError,
iter,
C())
class AppTest_IterObject(object):
def test_no_len_on_list_iter(self):
iterable = [1,2,3,4]
raises(TypeError, len, iter(iterable))
def test_no_len_on_tuple_iter(self):
iterable = (1,2,3,4)
raises(TypeError, len, iter(iterable))
def test_no_len_on_deque_iter(self):
from _collections import deque
iterable = deque([1,2,3,4])
raises(TypeError, len, iter(iterable))
def test_no_len_on_reversed(self):
it = reversed("foobar")
raises(TypeError, len, it)
def test_no_len_on_reversed_seqiter(self):
# this one fails on CPython. See http://bugs.python.org/issue3689
it = reversed([5,6,7])
raises(TypeError, len, it)
def test_no_len_on_UserList_iter_reversed(self):
import sys, _abcoll
sys.modules['collections'] = _abcoll
from UserList import UserList
iterable = UserList([1,2,3,4])
raises(TypeError, len, iter(iterable))
raises(TypeError, len, reversed(iterable))
del sys.modules['collections']
def test_reversed_frees_empty(self):
import gc
for typ in list, unicode:
free = [False]
class U(typ):
def __del__(self):
free[0] = True
r = reversed(U())
raises(StopIteration, next, r)
gc.collect(); gc.collect(); gc.collect()
assert free[0]
def test_reversed_mutation(self):
n = 10
d = range(n)
it = reversed(d)
next(it)
next(it)
assert it.__length_hint__() == n-2
d.append(n)
assert it.__length_hint__() == n-2
d[1:] = []
assert it.__length_hint__() == 0
assert list(it) == []
d.extend(xrange(20))
assert it.__length_hint__() == 0
def test_no_len_on_set_iter(self):
iterable = set([1,2,3,4])
raises(TypeError, len, iter(iterable))
def test_no_len_on_xrange(self):
iterable = xrange(10)
raises(TypeError, len, iter(iterable))
def test_contains(self):
logger = []
class Foo(object):
def __init__(self, value, name=None):
self.value = value
self.name = name or value
def __repr__(self):
return '<Foo %s>' % self.name
def __eq__(self, other):
logger.append((self, other))
return self.value == other.value
foo1, foo2, foo3 = Foo(1), Foo(2), Foo(3)
foo42 = Foo(42)
foo_list = [foo1, foo2, foo3]
foo42 in (x for x in foo_list)
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo42, foo1), (foo42, foo2), (foo42, foo3)]
del logger[:]
foo2_bis = Foo(2, '2 bis')
foo2_bis in (x for x in foo_list)
logger_copy = logger[:] # prevent re-evaluation during pytest error print
assert logger_copy == [(foo2_bis, foo1), (foo2_bis, foo2)]
| 31.153846 | 82 | 0.571193 | [
"MIT"
] | igormcoelho/neo-boa | idea2/pypyjs-3/deps/pypy/pypy/objspace/std/test/test_iterobject.py | 4,860 | Python |
# -*- coding: utf-8 -*-
"""
"""
import argparse
import os
import sys
if __name__ == '__main__':
pass
| 7.857143 | 26 | 0.572727 | [
"Apache-2.0"
] | mzntaka0/audy | audy/mix/noise.py | 110 | Python |
import numpy as np
# Thinning morphological operation applied using lookup tables.
# We convert the 3x3 neighbourhood surrounding a pixel to an index
# used to lookup the output in a lookup table.
# Bit masks for each neighbour
# 1 2 4
# 8 16 32
# 64 128 256
NEIGH_MASK_EAST = 32
NEIGH_MASK_NORTH_EAST = 4
NEIGH_MASK_NORTH = 2
NEIGH_MASK_NORTH_WEST = 1
NEIGH_MASK_WEST = 8
NEIGH_MASK_SOUTH_WEST = 64
NEIGH_MASK_SOUTH = 128
NEIGH_MASK_SOUTH_EAST = 256
NEIGH_MASK_CENTRE = 16
# Masks in a list
# MASKS[0] = centre
# MASKS[1..8] = start from east, counter-clockwise
MASKS = [NEIGH_MASK_CENTRE,
NEIGH_MASK_EAST, NEIGH_MASK_NORTH_EAST, NEIGH_MASK_NORTH, NEIGH_MASK_NORTH_WEST,
NEIGH_MASK_WEST, NEIGH_MASK_SOUTH_WEST, NEIGH_MASK_SOUTH, NEIGH_MASK_SOUTH_EAST,
]
# Constant listing all indices
_LUT_INDS = np.arange(512)
def binary_image_to_lut_indices(x):
"""
Convert a binary image to an index image that can be used with a lookup table
to perform morphological operations. Non-zero elements in the image are interpreted
as 1, zero elements as 0
:param x: a 2D NumPy array.
:return: a 2D NumPy array, same shape as x
"""
if x.ndim != 2:
raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim))
# If the dtype of x is not bool, convert
if x.dtype != np.bool:
x = x != 0
# Add
x = np.pad(x, [(1, 1), (1, 1)], mode='constant')
# Convert to LUT indices
lut_indices = x[:-2, :-2] * NEIGH_MASK_NORTH_WEST + \
x[:-2, 1:-1] * NEIGH_MASK_NORTH + \
x[:-2, 2:] * NEIGH_MASK_NORTH_EAST + \
x[1:-1, :-2] * NEIGH_MASK_WEST + \
x[1:-1, 1:-1] * NEIGH_MASK_CENTRE + \
x[1:-1, 2:] * NEIGH_MASK_EAST + \
x[2:, :-2] * NEIGH_MASK_SOUTH_WEST + \
x[2:, 1:-1] * NEIGH_MASK_SOUTH + \
x[2:, 2:] * NEIGH_MASK_SOUTH_EAST
return lut_indices.astype(np.int32)
def apply_lut(x, lut):
"""
Perform a morphological operation on the binary image x using the supplied lookup table
:param x:
:param lut:
:return:
"""
if lut.ndim != 1:
raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim))
if lut.shape[0] != 512:
raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0]))
lut_indices = binary_image_to_lut_indices(x)
return lut[lut_indices]
def identity_lut():
"""
Create identity lookup tablef
:return:
"""
lut = np.zeros((512,), dtype=bool)
inds = np.arange(512)
lut[(inds & NEIGH_MASK_CENTRE) != 0] = True
return lut
def _lut_mutate_mask(lut):
"""
Get a mask that shows which neighbourhood shapes result in changes to the image
:param lut: lookup table
:return: mask indicating which lookup indices result in changes
"""
return lut != identity_lut()
def lut_masks_zero(neigh):
"""
Create a LUT index mask for which the specified neighbour is 0
:param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour
:return: a LUT index mask
"""
if neigh > 8:
neigh -= 8
return (_LUT_INDS & MASKS[neigh]) == 0
def lut_masks_one(neigh):
"""
Create a LUT index mask for which the specified neighbour is 1
:param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour
:return: a LUT index mask
"""
if neigh > 8:
neigh -= 8
return (_LUT_INDS & MASKS[neigh]) != 0
def _thin_cond_g1():
"""
Thinning morphological operation; condition G1
:return: a LUT index mask
"""
b = np.zeros(512, dtype=int)
for i in range(1, 5):
b += lut_masks_zero(2 * i - 1) & (lut_masks_one(2 * i) | lut_masks_one(2 * i + 1))
return b == 1
def _thin_cond_g2():
"""
Thinning morphological operation; condition G2
:return: a LUT index mask
"""
n1 = np.zeros(512, dtype=int)
n2 = np.zeros(512, dtype=int)
for k in range(1, 5):
n1 += (lut_masks_one(2 * k - 1) | lut_masks_one(2 * k))
n2 += (lut_masks_one(2 * k) | lut_masks_one(2 * k + 1))
m = np.minimum(n1, n2)
return (m >= 2) & (m <= 3)
def _thin_cond_g3():
"""
Thinning morphological operation; condition G3
:return: a LUT index mask
"""
return ((lut_masks_one(2) | lut_masks_one(3) | lut_masks_zero(8)) & lut_masks_one(1)) == 0
def _thin_cond_g3_prime():
"""
Thinning morphological operation; condition G3'
:return: a LUT index mask
"""
return ((lut_masks_one(6) | lut_masks_one(7) | lut_masks_zero(4)) & lut_masks_one(5)) == 0
def _thin_iter_1_lut():
"""
Thinning morphological operation; lookup table for iteration 1
:return: lookup table
"""
lut = identity_lut()
cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3()
lut[cond] = False
return lut
def _thin_iter_2_lut():
"""
Thinning morphological operation; lookup table for iteration 2
:return: lookup table
"""
lut = identity_lut()
cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3_prime()
lut[cond] = False
return lut
def binary_thin(x, max_iter=None):
"""
Binary thinning morphological operation
:param x: a binary image, or an image that is to be converted to a binary image
:param max_iter: maximum number of iterations; default is `None` that results in an infinite
number of iterations (note that `binary_thin` will automatically terminate when no more changes occur)
:return:
"""
thin1 = _thin_iter_1_lut()
thin2 = _thin_iter_2_lut()
thin1_mut = _lut_mutate_mask(thin1)
thin2_mut = _lut_mutate_mask(thin2)
iter_count = 0
while max_iter is None or iter_count < max_iter:
# Iter 1
lut_indices = binary_image_to_lut_indices(x)
x_mut = thin1_mut[lut_indices]
if x_mut.sum() == 0:
break
x = thin1[lut_indices]
# Iter 2
lut_indices = binary_image_to_lut_indices(x)
x_mut = thin2_mut[lut_indices]
if x_mut.sum() == 0:
break
x = thin2[lut_indices]
iter_count += 1
return x
| 27.70354 | 106 | 0.632487 | [
"MIT"
] | CipiOrhei/eecvf | Benchmarking/bsds500/bsds/thin.py | 6,261 | Python |
#!/usr/bin/env python3
"""
Usage: program | ./memcheck.py
"""
import fileinput
import pdb
with fileinput.input() as f:
data = "".join(f)
s = {}
for l in data.splitlines():
if "malloc:" in l:
c = l.split(":")
s[c[-1].strip()] = l
# print("malloc:%s" %c[-1].strip())
if "free:" in l:
c = l.split(":")
del s[c[-1].strip()]
# print("free:%s" %c[-1].strip())
# print("size: %d" % len(s))
print("以下内存申请可能未释放,请检查:")
for l in s:
print(s[l])
else:
print("没有需要处理的")
| 16.882353 | 44 | 0.463415 | [
"Apache-2.0"
] | lbyoo/l_clib | py/memcheck.py | 620 | Python |
import random
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
def __str__(self):
return f"{self.suit} {self.rank}: {BlackJack.values[self.rank]}"
class Hand:
def __init__(self):
self.cards = [] # start with empty list
self.value = 0
self.aces = 0
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
def add_card(self, card):
self.cards.append(card)
self.value += BlackJack.values[card.rank]
if card.rank == 'Ace':
self.aces += 1
def __str__(self):
return f"Current Hand:{self.cards}\nCurrent Value:{self.value}\nCurrent Aces:{self.aces}\n"
class Deck:
def __init__(self, card_game):
self.game = card_game
# create deck with all 52 cards
self.cards = list()
for suit in self.game.suits:
for rank in self.game.ranks:
self.cards.append(Card(suit, rank))
def shuffle(self):
random.shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
def __str__(self):
return f"{[x for x in self.cards]}"
class Chips:
def __init__(self, total=100):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
self.bet = 0
def lose_bet(self):
self.total -= self.bet
self.bet = 0
def make_bet(self, bet):
if bet <= self.total:
self.bet = bet
else:
raise ValueError(f"The bet ({bet}) exceeds available chips ({self.total})")
def __str__(self):
return f"Total: {self.total}\nCurrent Bet:{self.bet}\n"
class Player:
def __init__(self, name):
self.name = name
self.wins = 0
self.lost_games = 0
self.chips = Chips()
def __str__(self):
return f"{self.name}:\n{self.wins} wins\n{self.lost_games} losses\nChips:{self.chips}\n"
class BlackJack:
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,
'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}
def __init__(self, player):
self.player = player
self.deck = Deck(self)
self.playing = False
def greeting(self):
print("WELCOME TO BLACKJACK!")
def take_bet(self):
while True:
try:
# Ask the Player for their bet
bet = int(input("Please put your bet: "))
# Make sure that the Player's bet does not exceed their available chips
self.player.chips.make_bet(bet)
break
except TypeError:
print("Invalid input. Please try again")
except ValueError as exc:
print(f"{exc} Please try again")
def hit(self, hand):
cd = self.deck.deal_card()
# print(f"Deal Card: {cd}")
hand.add_card(cd)
hand.adjust_for_ace()
def hit_or_stand(self, hand):
while True:
print(f"{self.player.name}: current {hand.value}")
action = input("Hit or Stand? Enter 'h' or 's': ")
if action[0].lower() == 's':
print("STAY\n")
self.playing = False
elif action[0].lower() == 'h':
print("HIT\n")
self.hit(hand)
else:
print(f"Sorry, I do not understand your choice '{action}'. Please try again")
continue
break
def player_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} BUSTED!")
self.player.chips.lose_bet()
self.player.lost_games += 1
def player_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS! ")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS - Dealer BUSTED!")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer WINS")
self.player.chips.lose_bet()
self.player.lost_games += 1
def push(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer and {self.player.name} tie - PUSH!")
def show_some(self, p_hand, d_hand):
# Show only one of the Dealer's cards, the other remains hidden
print(f"Dealer's card (one hidden): {d_hand.cards[0]}")
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
def show_all_cards(self, p_hand, d_hand):
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
# Show both of the Player's cards
print(f"Dealer's Cards:")
for card in d_hand.cards:
print(card)
print(f"total= {d_hand.value}")
def play(self):
"""
# 1. Create a deck of 52 cards
# 2. Shuffle the deck
# 3. Ask the Player for their bet
# 4. Make sure that the Player's bet does not exceed their available chips
# 5. Deal two cards to the Dealer and two cards to the Player
# 6. Show only one of the Dealer's cards, the other remains hidden
# 7. Show both of the Player's cards
# 8. Ask the Player if they wish to Hit, and take another card
# 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
# 10. If a Player Stands, play the Dealer's hand.
# The dealer will always Hit until the Dealer's value meets or exceeds 17
# 11. Determine the winner and adjust the Player's chips accordingly
# 12. Ask the Player if they'd like to play again
"""
print("--NEW GAME---")
self.playing = True
self.deck.shuffle()
dealer_hand = Hand()
player_hand = Hand()
# Deal two cards to the Dealer and two cards to the Player
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
self.take_bet()
# show cards, but keep one dealer card hidden
self.show_some(player_hand, dealer_hand)
while self.playing:
# Ask the Player if they wish to Hit, and take another card
# If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
self.hit_or_stand(player_hand)
self.show_some(player_hand, dealer_hand)
if player_hand.value > 21:
# player busts - lost his bet
self.player_busts(player_hand, dealer_hand)
break
# If Player has not busted
if player_hand.value <= 21:
# The dealer will always Hit until the Dealer's value meets or exceeds 17
while dealer_hand.value < 17:
self.hit(dealer_hand)
# Determine for the winner - show all cards
self.show_all_cards(player_hand, dealer_hand)
# Determine the winner and adjust the Player's chips accordingly
if dealer_hand.value > 21:
self.dealer_busts(player_hand, dealer_hand)
elif player_hand.value > dealer_hand.value:
self.player_wins(player_hand, dealer_hand)
elif player_hand.value < dealer_hand.value:
self.dealer_wins(player_hand, dealer_hand)
else:
self.push(player_hand, dealer_hand)
if __name__ == "__main__":
game_on = True
# Play a new game of BlackJack with Player Daniela
player = Player('Daniela')
game = BlackJack(player)
game.greeting()
while game_on:
game.play()
print(f"GAME DONE.\nGame Stats:\n\n{player}")
# Ask the Player if they'd like to play again
if input("Would you like another game? y/n: ") != 'y':
game_on = False
| 31.822878 | 116 | 0.57224 | [
"MIT"
] | tse4a/Python-Challenge | BlackJack.py | 8,624 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.