content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import math
def is_prime_power(n):
#even number divisible
factors = set()
while n % 2 == 0:
factors.add(2)
n = n / 2
#n became odd
for i in range(3,int(math.sqrt(n))+1,2):
while (n % i == 0):
factors.add(i)
n = n / i
if n > 2:
factors.add(n)
return len(factors) == 1
def main():
n = int(input('Enter n: '))
count = -1
curr = 0
while count < n:
curr += 1
if is_prime_power(curr):
count += 1
print(curr)
if __name__ == '__main__':
main()
| 17.65625 | 43 | 0.486726 | [
"Unlicense"
] | EJammy/Integer-Sequences | Prime Powers/prime_powers.py | 565 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 for stopped Virtual Maschine life cycle
"""
#Import Local Modules
import marvin
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.remoteSSHClient import remoteSSHClient
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
#Import System modules
import time
class Services:
"""Test Stopped VM Life Cycle Services
"""
def __init__(self):
self.services = {
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"virtual_machine":
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offering":
{
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 128, # In MBs
},
"disk_offering": {
"displaytext": "Tiny volume",
"name": "Tiny volume",
"disksize": 1
},
"volume": {
"diskname": "DataDisk",
"url": 'http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2',
"format": 'VHD'
},
"iso": # ISO settings for Attach/Detach ISO tests
{
"displaytext": "Test ISO",
"name": "testISO",
"url": "http://people.apache.org/~tsp/dummy.iso",
# Source URL where ISO is located
"ostype": 'CentOS 5.3 (64-bit)',
"mode": 'HTTP_DOWNLOAD', # Downloading existing ISO
},
"template": {
"url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2",
"hypervisor": 'XenServer',
"format": 'VHD',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostype": 'CentOS 5.3 (64-bit)',
"templatefilter": 'self',
"passwordenabled": True,
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
}
class TestDeployVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_vm_startvm_true(self):
"""Test Deploy Virtual Machine with startVM=true parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=true
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=True,
diskofferingid=self.disk_offering.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_vm_startvm_false(self):
"""Test Deploy Virtual Machine with startVM=false parameter
"""
# Validate the following:
# 1. deploy Vm with the startvm=false
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Check listRouters call for that account. List routers should
# return empty response
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug("Destroying instance: %s" % self.virtual_machine.name)
self.virtual_machine.delete(self.apiclient)
self.debug("Instance is destroyed!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
# Sleep to ensure that all resources are deleted
time.sleep((int(interval[0].value) + int(delay[0].value)))
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
list_vm_response,
None,
"Check list response returns a valid list"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_04_deploy_startvm_false_attach_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_05_deploy_startvm_false_change_so(self):
"""Test Deploy Virtual Machine with startVM=false and change service offering
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 4. Change service offering
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
medium_service_off = ServiceOffering.create(
self.apiclient,
self.services["service_offering"]
)
self.cleanup.append(medium_service_off)
self.debug("Changing service offering for instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.change_service_offering(
self.apiclient,
medium_service_off.id
)
except Exception as e:
self.fail("Change service offering failed: %s" % e)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Instance: %s started" % self.virtual_machine.name)
listedvm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_(len(listedvm) > 0)
self.assertEqual(listedvm[0].serviceofferingid, medium_service_off.id, msg="VM did not change service offering")
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_06_deploy_startvm_attach_detach(self):
"""Test Deploy Virtual Machine with startVM=false and
attach detach volumes
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach volume should be successful
# 4. Detach volume from instance. Detach should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Creating a volume in account: %s" %
self.account.name)
volume = Volume.create(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created volume in account: %s" % self.account.name)
self.debug("Attaching volume to instance: %s" %
self.virtual_machine.name)
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed!")
self.debug("Detaching the disk: %s" % volume.name)
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_07_deploy_startvm_attach_iso(self):
"""Test Deploy Virtual Machine with startVM=false and attach ISO
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Attach ISO to the instance. Attach ISO should be successful
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
diskofferingid=self.disk_offering.id,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Registering a ISO in account: %s" %
self.account.name)
iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Successfully created ISO with ID: %s" % iso.id)
try:
iso.download(self.apiclient)
self.cleanup.append(iso)
except Exception as e:
self.fail("Exception while downloading ISO %s: %s"\
% (iso.id, e))
self.debug("Attach ISO with ID: %s to VM ID: %s" % (
iso.id,
self.virtual_machine.id
))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail("Attach ISO failed!")
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id,
listall=True
)
self.assertEqual(
isinstance(vms, list),
True,
"List vms should return a valid list"
)
vm = vms[0]
self.assertEqual(
vm.isoid,
iso.id,
"The ISO status should be reflected in list Vm call"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_08_deploy_attached_volume(self):
"""Test Deploy Virtual Machine with startVM=false and attach volume already attached to different machine
"""
# Validate the following:
# 1. deploy Vm with the startvm=false. Attach volume to the instance
# 2. listVM command should return the deployed VM.State of this VM
# should be "Stopped".
# 3. Create an instance with datadisk attached to it. Detach DATADISK
# 4. Attach the volume to first virtual machine.
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Stopped state after deployment with startvm=false"
)
self.debug(
"Fetching DATADISK details for instance: %s" %
self.virtual_machine_2.name)
volumes = Volume.list(
self.apiclient,
type='DATADISK',
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"List volumes should return a valid list"
)
volume = volumes[0]
self.debug("Detaching the disk: %s" % volume.name)
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug("Datadisk %s detached!" % volume.name)
except Exception as e:
self.fail("Detach volume failed!")
self.debug("Attaching volume to instance: %s" %
self.virtual_machine_1.name)
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail("Attach volume failed with %s!" % e)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine_1.id,
type='DATADISK',
id=volume.id,
listall=True
)
self.assertNotEqual(
volumes,
None,
"List Volumes should not list any volume for instance"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_09_stop_vm_migrate_vol(self):
"""Test Stopped Virtual Machine's ROOT volume migration
"""
# Validate the following:
# 1. deploy Vm with startvm=true
# 2. Should not be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
# 4. Stop the vm
# 5.list primary storages in the cluster , should be more than one
# 6.Migrate voluem to another available primary storage
clusters = Cluster.list(
self.apiclient,
zoneid = self.zone.id
)
self.assertEqual(
isinstance(clusters, list),
True,
"Check list response returns a valid list"
)
i = 0
for cluster in clusters :
storage_pools = StoragePool.list(
self.apiclient,
clusterid = cluster.id
)
if len(storage_pools) > 1 :
self.cluster_id = cluster.id
i += 1
break
if i == 0 :
self.skipTest("No cluster with more than one primary storage pool to perform migrate volume test")
hosts = Host.list(
self.apiclient,
clusterid = self.cluster_id
)
self.assertEqual(
isinstance(hosts, list),
True,
"Check list response returns a valid list"
)
host = hosts[0]
self.debug("Deploying instance on host: %s" % host.id)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hostid=host.id,
mode=self.zone.networktype
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Stopping instance: %s" % self.virtual_machine.name)
self.virtual_machine.stop(self.apiclient)
self.debug("Instance is stopped!")
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after stoping vm"
)
volumes = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
isinstance(volumes, list),
True,
"Check volume list response returns a valid list"
)
vol_response = volumes[0]
#get the storage name in which volume is stored
storage_name = vol_response.storage
storage_pools = StoragePool.list(
self.apiclient,
clusterid = self.cluster_id
)
#Get storage pool to migrate volume
for spool in storage_pools:
if spool.name == storage_name:
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug("Migrating volume to storage pool: %s" % self.storage_name)
Volume.migrate(
self.apiclient,
storageid = self.storage_id,
volumeid = vol_response.id
)
volume = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
self.assertEqual(
volume[0].storage,
self.storage_name,
"Check volume migration response")
return
class TestDeployHaEnabledVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployHaEnabledVM,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_ha_vm_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm with the startvm parameter = false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Created".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Stopped state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_02_deploy_ha_vm_from_iso(self):
"""Test Deploy HA enabled Virtual Machine from ISO
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=true
# 2. listVM command should return the deployed VM. State of this VM
# should be "Running".
self.iso = Iso.create(
self.apiclient,
self.services["iso"],
account=self.account.name,
domainid=self.account.domainid
)
try:
# Dowanload the ISO
self.iso.download(self.apiclient)
self.cleanup.append(self.iso)
except Exception as e:
raise Exception("Exception while downloading ISO %s: %s"\
% (self.iso.id, e))
self.debug("Registered ISO: %s" % self.iso.name)
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
templateid=self.iso.id,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_03_deploy_ha_vm_iso_startvm_false(self):
"""Test Deploy HA enabled Virtual Machine from ISO with startvm=false
"""
# Validate the following:
# 1. deployHA enabled Vm using ISO with the startvm parameter=false
# 2. listVM command should return the deployed VM. State of this VM
# should be "Stopped".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in Running state after deployment"
)
return
class TestRouterStateAfterDeploy(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestRouterStateAfterDeploy,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.services["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_01_deploy_vm_no_startvm(self):
"""Test Deploy Virtual Machine with no startVM parameter
"""
# Validate the following:
# 1. deploy Vm without specifying the startvm parameter
# 2. Should be able to login to the VM.
# 3. listVM command should return the deployed VM.State of this VM
# should be "Running".
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine_1 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=False
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_1.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_1.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
routers,
None,
"List routers should return empty response"
)
self.debug(
"Deploying another instance (startvm=true) in the account: %s" %
self.account.name)
self.virtual_machine_2 = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
startvm=True
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine_2.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine_2.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in Running state after deployment"
)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertEqual(
isinstance(routers, list),
True,
"List routers should not return empty response"
)
for router in routers:
self.debug("Router state: %s" % router.state)
self.assertEqual(
router.state,
"Running",
"Router should be in running state when instance is running in the account"
)
self.debug("Destroying the running VM:%s" %
self.virtual_machine_2.name)
self.virtual_machine_2.delete(self.apiclient)
self.debug("Instance destroyed..waiting till expunge interval")
interval = list_configurations(
self.apiclient,
name='expunge.interval'
)
delay = list_configurations(
self.apiclient,
name='expunge.delay'
)
# Sleep to ensure that all resources are deleted
time.sleep((int(interval[0].value) + int(delay[0].value)) * 2)
self.debug("Checking the router state after VM deployment")
routers = Router.list(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
listall=True
)
self.assertNotEqual(
routers,
None,
"Router should get deleted after expunge delay+wait"
)
return
class TestDeployVMBasicZone(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMBasicZone,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
# Create service offerings, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.services["iso"]["zoneid"] = self.zone.id
self.services["virtual_machine"]["template"] = self.template.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
class TestDeployVMFromTemplate(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployVMFromTemplate,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"],
offerha=True
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
# Cleanup
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = Services().services
self.services["virtual_machine"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.template = Template.register(
self.apiclient,
self.services["template"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
try:
self.template.download(self.apiclient)
except Exception as e:
raise Exception("Template download failed: %s" % e)
self.cleanup = [self.account]
return
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_deploy_vm_password_enabled(self):
"""Test Deploy Virtual Machine with startVM=false & enabledpassword in
template
"""
# Validate the following:
# 1. Create the password enabled template
# 2. Deploy Vm with this template and passing startvm=false
# 3. Start VM. Deploy VM should be successful and it should be in Up
# and running state
self.debug("Deploying instance in the account: %s" %
self.account.name)
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
templateid=self.template.id,
startvm=False,
)
self.debug("Deployed instance in account: %s" %
self.account.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Stopped",
"VM should be in stopped state after deployment"
)
self.debug("Starting the instance: %s" % self.virtual_machine.name)
self.virtual_machine.start(self.apiclient)
self.debug("Started the instance: %s" % self.virtual_machine.name)
list_vm_response = list_virtual_machines(
self.apiclient,
id=self.virtual_machine.id
)
self.debug(
"Verify listVirtualMachines response for virtual machine: %s" \
% self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
return
class TestVMAccountLimit(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestVMAccountLimit,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_vm_per_account(self):
"""Test VM limit per account
"""
# Validate the following
# 1. Set the resource limit for VM per account.
# 2. Deploy VMs more than limit in that account.
# 3. AIP should error out
self.debug(
"Updating instance resource limit for account: %s" %
self.account.name)
# Set usage_vm=1 for Account 1
update_resource_limit(
self.apiclient,
0, # Instance
account=self.account.name,
domainid=self.account.domainid,
max=1
)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
# Exception should be raised for second instance (account_1)
with self.assertRaises(Exception):
VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
return
class TestUploadAttachVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestUploadAttachVolume,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
# Create Account, VMs etc
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.account
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
#Clean up, terminate the created instance, volumes and snapshots
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "eip", "advancedns", "basic", "sg"])
def test_upload_attach_volume(self):
"""Test Upload volume and attach to VM in stopped state
"""
# Validate the following
# 1. Upload the volume using uploadVolume API call
# 2. Deploy VM with startvm=false.
# 3. Attach the volume to the deployed VM in step 2
self.debug(
"Uploading the volume: %s" %
self.services["volume"]["diskname"])
try:
volume = Volume.upload(
self.apiclient,
self.services["volume"],
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid
)
self.debug("Uploading the volume: %s" % volume.name)
volume.wait_for_upload(self.apiclient)
self.debug("Volume: %s uploaded successfully")
except Exception as e:
self.fail("Failed to upload the volume: %s" % e)
self.debug(
"Deploying VM instance in account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
startvm=False
)
# Verify VM state
self.assertEqual(
virtual_machine.state,
'Stopped',
"Check VM state is Running or not"
)
virtual_machine.attach_volume(self.apiclient, volume)
return
class TestDeployOnSpecificHost(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(
TestDeployOnSpecificHost,
cls
).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls._cleanup = [
cls.service_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.account = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = []
return
def tearDown(self):
try:
self.account.delete(self.apiclient)
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "advancedns", "simulator",
"api", "basic", "eip", "sg"])
def test_deployVmOnGivenHost(self):
"""Test deploy VM on specific host
"""
# Steps for validation
# 1. as admin list available hosts that are Up
# 2. deployVM with hostid=above host
# 3. listVirtualMachines
# 4. destroy VM
# Validate the following
# 1. listHosts returns at least one host in Up state
# 2. VM should be in Running
# 3. VM should be on the host that it was deployed on
hosts = Host.list(
self.apiclient,
zoneid=self.zone.id,
type='Routing',
state='Up',
listall=True
)
self.assertEqual(
isinstance(hosts, list),
True,
"CS should have atleast one host Up and Running"
)
host = hosts[0]
self.debug("Deploting VM on host: %s" % host.name)
try:
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
hostid=host.id
)
self.debug("Deploy VM succeeded")
except Exception as e:
self.fail("Deploy VM failed with exception: %s" % e)
self.debug("Cheking the state of deployed VM")
vms = VirtualMachine.list(
self.apiclient,
id=vm.id,
listall=True,
account=self.account.name,
domainid=self.account.domainid
)
self.assertEqual(
isinstance(vms, list),
True,
"List Vm should return a valid response"
)
vm_response = vms[0]
self.assertEqual(
vm_response.state,
"Running",
"VM should be in running state after deployment"
)
self.assertEqual(
vm_response.hostid,
host.id,
"Host id where VM is deployed should match"
)
return
| 41.317884 | 120 | 0.452668 | [
"Apache-2.0"
] | ksowmya/cloudstack-1 | test/integration/component/test_stopped_vm.py | 82,016 | Python |
# Created by Qingzhi Ma at 2019-07-24
# All right reserved
# Department of Computer Science
# the University of Warwick
# [email protected]
from dbestclient.ml.density import DBEstDensity
from dbestclient.ml.modelwraper import SimpleModelWrapper, GroupByModelWrapper
from dbestclient.ml.regression import DBEstReg
from dbestclient.tools.dftools import convert_df_to_yx
import numpy as np
class SimpleModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, n_total_point, n_sample_point,groupby_attribute=None, groupby_value=None):
self.xheader = xheader
self.yheader = yheader
self.simpe_model_wrapper = SimpleModelWrapper(mdl, tbl, xheader, y=yheader, n_total_point=n_total_point,
n_sample_point=n_sample_point, groupby_attribute=groupby_attribute, groupby_value=groupby_value)
def fit(self, x, y):
reg = DBEstReg().fit(x, y)
density = DBEstDensity().fit(x)
self.simpe_model_wrapper.load_model(density, reg)
return self.simpe_model_wrapper
def fit_from_df(self, df):
y, x = convert_df_to_yx(df, self.xheader, self.yheader)
return self.fit(x, y)
class GroupByModelTrainer:
def __init__(self, mdl, tbl, xheader, yheader, groupby_attribute, n_total_point, n_sample_point,
x_min_value=-np.inf, x_max_value=np.inf):
self.groupby_model_wrapper = GroupByModelWrapper(mdl, tbl, xheader, yheader, groupby_attribute,
x_min_value=x_min_value, x_max_value=x_max_value)
self.groupby_attribute = groupby_attribute
self.mdl = mdl
self.tbl = tbl
self.xheader = xheader
self.yheader = yheader
self.n_total_point = n_total_point
self.n_sample_point = n_sample_point
self.x_min_value = x_min_value
self.x_max_value = x_max_value
def fit_from_df(self,df):
sample_grouped = df.groupby(by=self.groupby_attribute)
for name, group in sample_grouped:
print("training " +name )
simple_model_wrapper = SimpleModelTrainer(self.mdl, self.tbl, self.xheader, self.yheader,
self.n_total_point[name], self.n_sample_point[name],
groupby_attribute=self.groupby_attribute, groupby_value=name).fit_from_df(group)
self.groupby_model_wrapper.add_simple_model(simple_model_wrapper)
# print(self.groupby_model_wrapper)
return self.groupby_model_wrapper
| 44.186441 | 150 | 0.67127 | [
"BSD-2-Clause"
] | horeapinca/DBEstClient | dbestclient/ml/modeltrainer.py | 2,607 | Python |
from sanic import Sanic, response, Blueprint
from sanic.request import RequestParameters
from sanic_jinja2 import SanicJinja2
from sanic_session import Session, AIORedisSessionInterface
import aiosqlite
import aiofiles
import aioredis
import asyncio
import json
import html
import sys
import os
import re
from route.tool.tool import *
from route.mark.py.namumark import *
setting_data = json.loads(open('data/setting.json', encoding = 'utf8').read())
version_load = json.loads(open('data/version.json', encoding='utf-8').read())
engine_version = version_load["main"]["engine_version"]
markup_version = version_load["main"]["markup_version"]
build_count = version_load["main"]["build_count"]
renew_count = version_load["main"]["renew_count"]
print('')
print('VientoEngine')
print('engine_version : ' + engine_version)
print('markup_version : ' + markup_version)
print('build_count : ' + build_count)
print('renew_count : ' + renew_count)
print('')
for route_file in os.listdir("route"):
py_file = re.search(r"(.+)\.py$", route_file)
if py_file:
py_file = py_file.groups()[0]
exec("from route." + py_file + " import *")
## 위키 설정
async def run():
server_setting = {
"host" : {
"setting": "host",
"default": "0.0.0.0"
},
"port" : {
"setting": "port",
"default": "3000"
},
"lang" : {
"setting": "lang",
"default": "ko-KR",
"list" : ["ko-KR", "en-US"]
},
"encode" : {
"setting": "encode",
"default": "pbkdf2-sha512",
"list" : ["sha3", "sha256", "pbkdf2-sha512"]
}
}
try:
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
if not 'db_type' and 'db_name' and 'host' and 'port' in setting_data:
try:
os.remove('data/setting.json')
except:
print('Error : Please delete data/setting.json')
raise
else:
print('db_type : ' + setting_data['db_type'])
print('db_name : ' + setting_data['db_name'])
print('\n', end='')
print('host : ' + setting_data['host'])
print('port : ' + setting_data['port'])
except:
setting_json = ['sqlite', '', '', '']
db_type = ['sqlite']
print('db_type : sqlite')
print('db_name : ', end = '')
setting_json[1] = str(input())
if setting_json[1] == '':
setting_json[1] = 'data'
print('\n', end='')
print('host (' + server_setting['host']['default'] + ') : ', end = '')
setting_json[2] = str(input())
if setting_json[2] == '':
setting_json[2] = server_setting['host']['default']
print('port (' + server_setting['port']['default'] + ') : ', end = '')
setting_json[3] = str(input())
if setting_json[3] == '':
setting_json[3] = server_setting['port']['default']
async with aiofiles.open('data/setting.json', 'w', encoding = 'utf8') as f:
await f.write('{ "db_name" : "' + setting_json[1] + '", "db_type" : "' + setting_json[0] + '", "host" : "' + setting_json[2] + '", "port" : "' + setting_json[3] + '" }')
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
db_create = {}
db_create['table'] = ['doc', 'doc_cac', 'doc_his', 'rec_dis', 'rec_ban', 'rec_log', 'mbr', 'mbr_set', 'mbr_log', 'ban', 'dis', 'dis_log', 'acl', 'backlink', 'wiki_set', 'list_per', 'list_fil', 'html_fil', 'list_alarm', 'list_watch', 'list_inter']
for i in db_create['table']:
try:
await db.execute('select test from ' + i + ' limit 1')
except:
try:
await db.execute('create table ' + i + '(test longtext)')
except:
await db.execute("alter table " + i + " add test longtext default ''")
db_setup = 0
try:
db_ver = await db.execute('select data from wiki_set where name = "db_ver"')
db_ver = await db_ver.fetchall()
if not db_ver:
db_setup = 1
else:
if int(version_load['main']['renew_count']) > int(db_ver[0][0]):
db_setup = 1
except:
db_setup = 1
if db_setup != 0:
db_create['doc'] = ['title', 'data']
db_create['doc_cac'] = ['title', 'data']
db_create['doc_his'] = ['id', 'title', 'data', 'date', 'ip', 'send', 'leng', 'hide', 'type']
db_create['rec_dis'] = ['title', 'sub', 'date', 'band', 'stop', 'agree']
db_create['rec_ban'] = ['block', 'end', 'today', 'blocker', 'why', 'band']
db_create['rec_log'] = ['who', 'what', 'time']
db_create['mbr'] = ['id', 'pw', 'acl', 'date', 'email']
db_create['mbr_set'] = ['name', 'id', 'data']
db_create['mbr_log'] = ['name', 'ip', 'ua', 'today', 'sub']
db_create['ban'] = ['block', 'end', 'why', 'band', 'login']
db_create['dis'] = ['doc', 'title', 'id', 'state', 'date', 'agree']
db_create['dis_log'] = ['id', 'data', 'date', 'ip', 'block', 'top', 'code', 'doc']
db_create['acl'] = ['title', 'decu', 'dis', 'view', 'why']
db_create['backlink'] = ['title', 'link', 'type']
db_create['wiki_set'] = ['name', 'data', 'coverage']
db_create['list_per'] = ['name', 'acl']
db_create['list_fil'] = ['name', 'regex', 'sub']
db_create['html_fil'] = ['html', 'kind', 'plus']
db_create['list_alarm'] = ['name', 'data', 'date']
db_create['list_watch'] = ['user', 'title']
db_create['list_inter'] = ['title', 'link', 'icon']
for create_table in db_create['table']:
for create in db_create[create_table]:
try:
await db.execute('select ' + create + ' from ' + create_table + ' limit 1')
except:
await db.execute("alter table " + create_table + " add " + create + " longtext default ''")
try:
await db.execute('create index index_' + create_table + '_' + create + ' on ' + create_table + '(' + create + ')')
except:
pass
await db.execute('delete from wiki_set where name = "db_ver"')
await db.execute('insert into wiki_set (name, data) values (?, ?)', ["db_ver", version_load['main']['renew_count']])
await db.commit()
first_setup = await db.execute('select data from wiki_set where name = "lang"')
first_setup = await first_setup.fetchall()
if not first_setup:
lang = server_setting['lang']['list'][0] + ', ' + server_setting['lang']['list'][1]
print('lang [' + lang + '] (' + server_setting['lang']['default'] + ') : ', end = '')
setting_lang = str(input())
if setting_lang == '':
setting_lang = server_setting['lang']['default']
await db.execute('insert into wiki_set (name, data) values (?, ?)', ['lang', setting_lang])
encode = server_setting['encode']['list'][0] + ', ' + server_setting['encode']['list'][1] + ', ' + server_setting['encode']['list'][2]
print('encode [' + encode + '] (' + server_setting['encode']['default'] + ') : ', end = '')
setting_encode = str(input())
if setting_encode == '':
setting_encode = server_setting['encode']['default']
await db.execute('insert into wiki_set (name, data) values (?, ?)', ['encode', setting_encode])
await db.commit()
else:
encode_check = await db.execute('select data from wiki_set where name = "encode"')
encode_check = await encode_check.fetchall()
print('lang : ' + first_setup[0][0])
print('encode : ' + encode_check[0][0])
print("\n", end='')
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
app = Sanic(__name__)
jinja = SanicJinja2(app, pkg_path='skins')
session = Session(app)
app.static('/skins', './skins')
## 주소 설정
'''@app.listener('before_server_start')
async def server_init(app, loop):
app.redis = await aioredis.create_pool(
('localhost', 6379),
minsize=5,
maxsize=10,
loop=loop
)
session.init_app(app, interface=AIORedisSessionInterface(app.redis))'''
@app.route('/')
async def wiki_frontpage(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from wiki_set where name = ?", ['frontpage'])
data_get = await data_get.fetchall()
if data_get:
return response.redirect('/w/' + data_get[0][0])
else:
return response.redirect('/w/FrontPage')
@app.route("/w/<name:string>")
async def wiki_read(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = await db.execute("select data from doc where title = ?", [name])
data = await data.fetchall()
if data:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = await namumark(data[0][0]),
title = name,
sub = 0,
menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']]
)
else:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = "해당 문서를 찾을 수 없습니다.",
title = name,
sub = 0,
menu = [['edit/' + name, '편집'], ['discuss/' + name, '토론'], ['backlink/' + name, '역링크'], ['history/' + name, '역사'], ['acl/' + name, 'ACL']]
)
@app.route("/edit/<name:string>", methods=['POST', 'GET'])
async def wiki_edit(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from doc where title = ? ", [name])
data_get = await data_get.fetchall()
data = ""
olddata = ''
if data_get:
data = data_get[0][0]
olddata = data
if request.method == 'POST':
data = request.form.get('wiki_edit_textarea_1', '')
send = request.form.get('wiki_edit_textbox_1', '')
if data_get:
if data_get[0][0] == data:
return response.redirect("/w/" + name)
else:
data = re.sub('\n', '<br>', data)
await db.execute("update doc set data = ? where title = ?", [data, name])
await db.commit()
await history_add(name, data, await date_time(), await user_name(request), send, str(len(data) - len(olddata)))
return response.redirect("/w/" + name)
else:
data = re.sub('\n', '<br>', data)
await db.execute("insert into doc (title, data) values (?, ?)", [name, data])
await db.commit()
await history_add(name, data, await date_time(), await user_name(request), send, str(len(data)))
return response.redirect("/w/" + name)
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<textarea rows="25" class="wiki_textarea" name="wiki_edit_textarea_1">''' + html.escape(re.sub('<br>', '\n', data)) + '''</textarea>
<hr class="wiki_hr">
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_edit_textbox_1">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_edit_button_1">저장</button>
</form>
''',
title = name,
sub = '편집',
menu = [['delete/' + name, '삭제'], ['move/' + name, '이동'], ['w/' + name, '문서']]
)
@app.route("/history/<name:string>")
async def wiki_history(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = '''
<table class="wiki_history_table">
<tr class="wiki_history_table_top">
<td class="wiki_table_history_top">문서</td>
<td class="wiki_table_history_top">편집자</td>
<td class="wiki_table_history_top">시간</td>
</tr>
'''
data_get = await db.execute("select id, title, date, ip, send, leng from doc_his where title = ? order by id + 0 desc limit 30", [name])
data_get = await data_get.fetchall()
for history_data in data_get:
if data_get:
data += '''
<tr class="wiki_history_table_middle">
<td class="wiki_table_history"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td>
<td class="wiki_table_history">''' + await user_link(history_data[3]) + '''</td>
<td class="wiki_table_history">''' + history_data[2] + '''
</tr>
<tr>
<td colspan="3" class="wiki_table_history">''' + history_data[4] + '''</td>
</tr>
'''
data += '</table>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = data,
title = name,
sub = '역사',
menu = [['w/' + name, '문서']]
)
@app.route("/delete/<name:string>", methods=['POST', 'GET'])
async def wiki_delete(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from doc where title = ? ", [name])
data_get = await data_get.fetchall()
if request.method == 'POST':
send = request.form.get('wiki_delete_textbox_1', '')
await db.execute("delete from doc where title = ?", [name])
await db.commit()
await history_add(name, '', await date_time(), await user_name(request), send, '0')
return response.redirect("/w/" + name)
if data_get:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<textarea class="wiki_textarea" name="wiki_dekete_textarea_1" readonly>''' + data_get[0][0] + '''</textarea>
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_delete_textbox_1">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_delete_button_1">확인</button>
</form>
''',
title = name,
sub = '삭제',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/") # 오류 페이지 구현 필요
@app.route("/move/<name:string>", methods=['POST', 'GET'])
async def wiki_move(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data_get = await db.execute("select data from doc where title = ? ", [name])
data_get = await data_get.fetchall()
if request.method == 'POST':
change_name = request.form.get('wiki_move_textbox_1', '')
send = request.form.get('wiki_move_textbox_2', '')
await db.execute("update doc set title = ? where title = ?", [change_name, name])
await db.execute("update doc_his set title = ? where title = ?", [change_name, name])
await db.commit()
await history_add(change_name, '', await date_time(), await user_name(request), send, '0')
return response.redirect("/w/" + change_name)
if data_get:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<input type="text" value="''' + name + '''" class="wiki_textbox" name="wiki_move_textbox_1">
<hr class="wiki_hr">
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_move_textbox_2">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_move_button_1">확인</button>
</form>
''',
title = name,
sub = '이동',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/") # 오류 페이지 구현 필요
@app.route("/revert/<name:string>", methods=['POST', 'GET'])
async def wiki_revert(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
args = RequestParameters()
num = request.args.get('num', '1')
dbdata = await db.execute("select data from doc_his order by cast(id as integer) desc limit 1")
dbdata = await dbdata.fetchall()
current = dbdata[0][0]
data_get = await db.execute("select data from doc_his where id = ?", [num])
data_get = await data_get.fetchall()
data_get = data_get[0][0]
if request.method == 'POST':
send = request.form.get('wiki_revert_textbox_2', '')
data_get = re.sub('\n', '<br>', data_get)
await db.execute("update doc set data = ? where title = ?", [data_get, name])
await db.commit()
await history_add(name, data_get, await date_time(), await user_name(request), send, str(len(current) - len(data_get)))
return response.redirect("/w/" + name)
if data_get:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<textarea rows="25" class="wiki_textarea" name="wiki_revert_textarea_1" readonly>''' + data_get + '''</textarea>
<hr class="wiki_hr">
<input type="text" placeholder="요약" class="wiki_textbox" name="wiki_revert_textbox_2">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_revert_button_1">확인</button>
</form>
''',
title = name,
sub = 'r' + num + ' 복구',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/") # 오류 페이지 구현 필요
@app.route("/member/signup", methods=['POST', 'GET'])
async def wiki_signup(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
if request.ctx.session.get('id') == 1:
return response.redirect('/')
if request.method == 'POST':
signup_id = request.form.get('wiki_signup_textbox_1', '')
signup_password_1 = request.form.get('wiki_signup_textbox_2', '')
signup_password_2 = request.form.get('wiki_signup_textbox_3', '')
if not signup_password_1 and not signup_password_2:
return response.redirect("/error/") # 오류 페이지 구현 필요
if signup_password_1 != signup_password_2:
return response.redirect("/error/") # 오류 페이지 구현 필요
if re.search("(?:[^A-Za-z0-9가-힣])", signup_id):
return response.redirect("/error/") # 오류 페이지 구현 필요
if len(signup_id) > 24 or len(signup_id) < 3:
return response.redirect("/error/") # 오류 페이지 구현 필요
id_check = await db.execute("select id from mbr where id = ?", [signup_id])
id_check = await id_check.fetchall()
if id_check:
return response.redirect("/error/")
encode_password = await password_encode(signup_password_1, signup_id)
first_check = await db.execute("select * from mbr limit 1")
first_check = await first_check.fetchall()
if not first_check:
await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'owner', await date_time(), ''])
await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()])
await db.commit()
return response.redirect("/member/login")
else:
await db.execute("insert into mbr (id, pw, acl, date, email) values (?, ?, ?, ?, ?)", [signup_id, encode_password, 'member', await date_time(), '']) # 추후 권한 개편 시 member가 아닌 직접 선택하도록 변경.
await db.execute("insert into mbr_log (name, ip, ua, today) values (?, ?, ?, ?)", [signup_id, '0', '0', await date_time()])
await db.commit()
return response.redirect("/member/login")
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = '''
<form method="post">
<input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_signup_textbox_1">
<hr class="wiki_hr">
<input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_signup_textbox_2">
<hr class="wiki_hr">
<input type="password" placeholder="비밀번호 확인" class="wiki_textbox" name="wiki_signup_textbox_3">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_signup_button_1">확인</button>
</form>
''',
title = '계정 만들기',
sub = 0,
menu = 0
)
@app.route("/member/login", methods=['POST', 'GET'])
async def wiki_login(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
if request.ctx.session.get('id') == 1:
return response.redirect('/')
if request.method == 'POST':
wiki_id = request.form.get('wiki_login_textbox_1', '')
wiki_password = request.form.get('wiki_login_textbox_2', '')
wiki_pass_check = await VerifyAuth(wiki_id, wiki_password, 0)
if wiki_pass_check == 1:
request.ctx.session['id'] = wiki_id
return response.redirect("/")
else:
return response.redirect('/error/') # 오류 페이지 구현 필요
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = '''
<form method="post">
<input type="text" placeholder="아이디" class="wiki_textbox" name="wiki_login_textbox_1">
<hr class="wiki_hr">
<input type="password" placeholder="비밀번호" class="wiki_textbox" name="wiki_login_textbox_2">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_login_button_1">확인</button>
</form>
''',
title = '로그인',
sub = 0,
menu = 0
)
@app.route("/member/logout", methods=['POST', 'GET'])
async def wiki_logout(request):
if not request.ctx.session.get('id') or request.ctx.session.get('id') == 0:
return response.redirect('/')
request.ctx.session['id'] = 0
return response.redirect("/")
@app.route("/discuss/<name:string>", methods=['POST', 'GET'])
async def wiki_discuss(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = ''
discuss_get = await db.execute("select title, id, state, date, agree from dis where doc = ?", [name])
discuss_get = await discuss_get.fetchall()
if discuss_get:
for discuss in discuss_get:
data += '<h2><a href="/discuss/' + name + '/' + discuss[1] + '">' + discuss[1] + '. ' + discuss[0] + '</a></h2><hr class="wiki_hr">'
if request.method == "POST":
discuss_title = request.form.get('wiki_discuss_textbox_1', '')
discuss_data = request.form.get('wiki_discuss_textarea_1', '')
if discuss_title == '' or discuss_data == '':
return response.redirect("/error/") # 오류 구현 필요
discuss_number = await db.execute("select id from dis where doc = ? order by id desc", [name])
discuss_number = await discuss_number.fetchall()
if not discuss_number:
discuss_id = '1'
else:
discuss_id = str(int(discuss_number[0][0]) + 1)
await db.execute("insert into dis (doc, title, id, state, date, agree) values (?, ?, ?, 'normal', ?, '0')", [name, discuss_title, discuss_id, await date_time()])
await db.execute("insert into dis_log (id, data, date, ip, block, code, doc) values (?, ?, ?, ?, '0', ?, ?)", ['1', discuss_data, await date_time(), await user_name(request), discuss_id, name])
await db.commit()
return response.redirect("/discuss/" + name + '/' + discuss_id)
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = data + '''
<form method="post">
<input type="text" placeholder="토론 제목" class="wiki_textbox" name="wiki_discuss_textbox_1">
<hr class="wiki_hr">
<textarea placeholder="토론 내용" class="wiki_textarea" name="wiki_discuss_textarea_1"></textarea>
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_discuss_button_1">확인</button>
</form>
''',
title = name,
sub = '토론',
menu = [['w/' + name, '문서']]
)
@app.route("/discuss/<name:string>/<num:int>", methods=['POST', 'GET'])
async def wiki_discuss_thread(request, name, num):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = ''
thread_list = await db.execute("select id, data, date, ip, block, top from dis_log where code = ? and doc = ?", [num, name])
thread_list = await thread_list.fetchall()
thread_user = await db.execute("select ip from dis_log where id = '1'")
thread_user = await thread_user.fetchall()
if not thread_list:
return response.redirect("/error/") # 오류 구현 필요
for thread_data in thread_list: # 비효율적인 구조, 추후 개선 예정.
if thread_data[3] != '1' and thread_user[0][0] == thread_data[3]:
data += '''
<div class="wiki_thread_table_first">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + '''
</div>
<div class="wiki_thread_table_bottom">
''' + thread_data[1] + '''
</div>
</div>
'''
elif thread_data[3] != '1' and thread_user[0][0] != thread_data[3]:
data += '''
<div class="wiki_thread_table_other">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[4] + '''
</div>
<div class="wiki_thread_table_bottom">
''' + thread_data[1] + '''
</div>
</div>
'''
elif thread_data[3] == '1' and thread_user[0][0] == thread_data[3]:
data += '''
<div class="wiki_thread_table_first_blind">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + '''
</div>
<div class="wiki_thread_table_bottom">
블라인드된 스레드입니다.
</div>
</div>
'''
else:
data += '''
<div class="wiki_thread_table_other_blind">
<div class="wiki_thread_table_top">
''' + thread_data[0] + ''' ''' + thread_data[3] + ''' ''' + thread_data[2] + '''
</div>
<div class="wiki_thread_table_bottom">
블라인드된 스레드입니다.
</div>
</div>
'''
if request.method == "POST":
textarea_data = request.form.get('wiki_thread_textarea_1')
if not textarea_data:
return response.redirect("/error/")
discuss_num = await db.execute("select id from dis_log where doc = ? order by id desc", [name])
discuss_num = await discuss_num.fetchall()
discuss_num = int(discuss_num[0][0]) + 1
await db.execute("insert into dis_log (id, data, date, ip, block, top, code, doc) values (?, ?, ?, ?, '0', '0', ?, ?)", [discuss_num, textarea_data, await date_time(), await user_name(request), num, name])
await db.commit()
return response.redirect("/discuss/" + name + "/" + str(num))
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = data + '''
<form method="post">
<textarea class="wiki_textarea" name="wiki_thread_textarea_1"></textarea>
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_thread_button_1">확인</button>
</form>
''',
title = name,
sub = '토론',
menu = [['w/' + name, '문서']]
)
@app.route("/discuss/<name:string>/<num:int>/setting", methods=['POST', 'GET'])
async def wiki_discuss_thread_setting(request, name, num):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
discuss_title = await db.execute("select title from dis where doc = ? and id = ?", [name, num])
discuss_title = await discuss_title.fetchall()
discuss_doc = await db.execute("select doc from dis where doc = ? and id = ?", [name, num])
discuss_doc = await discuss_doc.fetchall()
if request.method == 'POST':
change_title = request.form.get('wiki_thread_textbox_setting_1', '')
change_doc = request.form.get('wiki_thread_textbox_setting_2', '')
if change_title == '' or change_doc == '':
return response.redirect("/error/")
if change_title == discuss_title[0][0] and change_doc == discuss_doc[0][0]:
return response.redirect("setting")
if change_title != discuss_title[0][0]:
await db.execute("update dis set title = ? where doc = ? and id = ?", [change_title, discuss_doc[0][0], str(num)])
await db.commit()
return response.redirect("/discuss/" + discuss_doc[0][0] + "/" + str(num) + "/setting")
if change_doc != discuss_doc[0][0]:
number_check = await db.execute("select id from dis where doc = ? and id = ?", [change_doc, str(num)])
number_check = await number_check.fetchall()
if number_check:
discuss_renew_num = await db.execute("select id from dis where doc = ? order by id desc", [change_doc])
discuss_renew_num = await discuss_renew_num.fetchall()
discuss_renew_num = str(int(discuss_renew_num[0][0]) + 1)
await db.execute("update dis set doc = ?, id = ? where doc = ? and id = ?", [change_doc, discuss_renew_num, discuss_doc[0][0], str(num)])
await db.execute("update dis_log set code = ?, doc = ? where code = ? and doc = ?", [discuss_renew_num, change_doc, str(num), discuss_doc[0][0]])
await db.commit()
return response.redirect("/discuss/" + change_doc + "/" + discuss_renew_num + "/setting")
else:
await db.execute("update dis set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]])
await db.execute("update dis_log set doc = ? where doc = ?", [change_doc, discuss_doc[0][0]])
await db.commit()
return response.redirect("/discuss/" + change_doc + "/" + str(num) + "/setting")
return jinja.render("index.html", request, wiki_set = await wiki_set(request, name),
data = '''
<form method="post">
<input class="wiki_textbox" name="wiki_thread_textbox_setting_1" value="''' + discuss_title[0][0] + '''">
<hr class="wiki_hr">
<input class="wiki_textbox" name="wiki_thread_textbox_setting_2" value="''' + discuss_doc[0][0] + '''">
<hr class="wiki_hr">
<button type="submit" class="wiki_button" name="wiki_thread_button_setting_1">확인</button>
</form>
''',
title = name,
sub = '토론',
menu = [['w/' + name, '문서']]
)
@app.route("/recent/changes")
async def wiki_recent_changes(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = '''
<table class="wiki_changes_table">
<tr class="wiki_changes_table_top">
<td class="wiki_table_changes_top">문서</td>
<td class="wiki_table_changes_top">편집자</td>
<td class="wiki_table_changes_top">시간</td>
</tr>
'''
data_get = await db.execute("select id, title, date, ip, send, leng from doc_his order by id + 0 desc limit 30")
data_get = await data_get.fetchall()
for history_data in data_get:
if data_get:
data += '''
<tr class="wiki_changes_table_middle">
<td class="wiki_table_changes"><a href="/w/''' + history_data[1] + '''">''' + history_data[1] + '''</a> (''' + history_data[5] + ''')</td>
<td class="wiki_table_changes">''' + await user_link(history_data[3]) + '''</td>
<td class="wiki_table_changes">''' + history_data[2] + '''
</tr>
<tr>
<td colspan="3" class="wiki_table_changes">''' + history_data[4] + '''</td>
</tr>
'''
data += '</table>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = data,
title = '최근 변경',
sub = 0,
menu = 0
)
@app.route("/recent/discuss")
async def wiki_recent_discuss(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = '''
<table class="wiki_discuss_table">
<tr class="wiki_discuss_table_top">
<td class="wiki_table_discuss_top">토론</td>
<td class="wiki_table_discuss_top">문서명</td>
<td class="wiki_table_discuss_top">시간</td>
</tr>
'''
data_get = await db.execute("select doc, title, id, date from dis where state = ? order by date desc limit 30", ['normal'])
data_get = await data_get.fetchall()
for discuss_data in data_get:
if data_get:
data += '''
<tr class="wiki_discuss_table_middle">
<td class="wiki_table_discuss"><a href="/discuss/''' + discuss_data[0] + '''/''' + discuss_data[2] + '''">''' + discuss_data[1] + '''</a></td>
<td class="wiki_table_discuss"><a href="/w/''' + discuss_data[0] + '''">''' + discuss_data[0] + '''</a></td>
<td class="wiki_table_discuss">''' + discuss_data[3] + '''</td>
</tr>
'''
data += '</table>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = data,
title = '최근 토론',
sub = 0,
menu = 0
)
@app.route("/raw/<name:string>")
async def wiki_raw(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
args = RequestParameters()
num = request.args.get('num', '1')
raw_data = await db.execute("select data from doc_his where id = ? and title = ?", [num, name])
raw_data = await raw_data.fetchall()
if raw_data:
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = '<textarea class="wiki_textarea" id="wiki_textarea_raw_1" readonly>' + raw_data[0][0] + '</textarea>',
title = name,
sub = 'r' + num + ' RAW',
menu = [['w/' + name, '문서']]
)
else:
return response.redirect("/error/")
@app.route("/diff/<name:string>")
async def wiki_diff(request, name):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
args = RequestParameters()
num1 = request.args.get('first', '1')
num2 = request.args.get('second', '2')
data_get = await db.execute("")
@app.route("/manage")
async def wiki_manage(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/group")
async def wiki_manage_group(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
data = ''
li = ''
permission_get = await db.execute("select name from list_per")
permission_get = await permission_get.fetchall()
if request.method == 'POST':
return 0
for first in permission_get:
li += '<li class="wiki_li" style="margin-left: 20px;"><a href="/manage/group/' + first[0] + '">' + first[0] + '</a></li>'
return jinja.render("index.html", request, wiki_set = await wiki_set(request, 0),
data = li,
title = '권한 그룹',
sub = 0,
menu = [['manage', '이전']]
)
@app.route("/manage/grant")
async def wiki_manage_grant(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/namespace")
async def wiki_manage_namespace(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/restart")
async def wiki_manage_restart(request):
try:
os.execl(sys.executable, sys.executable, *sys.argv)
except:
try:
os.execl(sys.executable, '"' + sys.executable + '"', *sys.argv)
except:
return response.redirect("/error/")
@app.route("/manage/engine")
async def wiki_manage_engine(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/edit_filter")
async def wiki_manage_edit_filter(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
@app.route("/manage/inter_wiki")
async def wiki_manage_inter_wiki(request):
async with aiofiles.open('data/setting.json', encoding = 'utf8') as f:
setting_data = json.loads(await f.read())
db = await aiosqlite.connect(setting_data['db_name'] + '.db')
if __name__ == "__main__":
app.run(debug=False, access_log=False, host=setting_data['host'], port=setting_data['port'])
| 44.087958 | 251 | 0.548713 | [
"BSD-3-Clause"
] | BadaWikiDev/VientoEngine | app.py | 42,730 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization service."""
from neural_compressor.ux.components.db_manager.db_operations import OptimizationAPIInterface
from neural_compressor.ux.web.service.request_data_processor import RequestDataProcessor
from neural_compressor.ux.web.service.workload import WorkloadService
class OptimizationService(WorkloadService):
"""Optimization related services."""
@staticmethod
def _get_workload_data(data: dict) -> dict:
"""Return data for requested Workload."""
optimization_id = RequestDataProcessor.get_string_value(data, "id")
optimization_data = OptimizationAPIInterface.get_optimization_details(
{
"id": optimization_id,
},
)
return optimization_data
| 38.828571 | 93 | 0.740986 | [
"Apache-2.0"
] | intel/lp-opt-tool | neural_compressor/ux/web/service/optimization.py | 1,359 | Python |
# A lista a seguir possui mais uma lista interna, a lista de preços.
# A lista de preços possui 3 sublistas dentro dela com os preços dos produtos.
# para exemplificar, o preço do mamão é de 10.00 - alface crespa é de 2.99 e o feijão 9.0
# Será solicitado o preço de alguns produtos. para imprimir deve ser por f-string refrenciando o nome com o preço
# da seguinte forma: "O preço do {} é R$ {}"
# print('1: imprima o valor do abacaxi')
# print('2: imprima o valor da rucula')
# print('3: imprima o valor da laranja')
# print('4: imprima o valor do repolho')
# print('5: imprima o valor do feijão')
# print('6: imprima o valor do feijão branco')
# print('7: imprima o valor da vergamota')
# print('8: imprima o valor da alface lisa')
# print('9: imprima o valor do mamão')
# print('10: imprima o valor da soja')
# print('11: imprima o valor da lentilha')
# print('12: imprima o valor da uva')
# print('13: imprima o valor da vagem')
# print('14: imprima o valor do almeirão')
# print('15: imprima o valor da ervilha')
# print('16: imprima o valor da maçã')
lista = [['frutas','verduras','legumes','preço'],
['mamão','abacaxi','laranja','uva','pera','maçã','vergamota'],
['alface crespa', 'alface lisa','rucula','almerão','repolho','salsinha',],
['feijão', 'erviha', 'lentilha','vagem','feijão branco','gão de bico','soja'],
[ [10.00, 2.56, 5.25, 9.5, 10.05, 15, 5.75], [2.99, 2.95, 3.5, 3.25, 5.89, 2.9, 2.5],
[9.0, 5.0, 7.5, 1.75, 10.9, 5.99, 3.55]
]
]
print(lista[4][1])
print(lista[5][2])
print(lista[4][2])
print(lista[5][4])
print(lista[6][0])
print(lista[6][4])
print(lista[4][-1])
print(lista[5][1])
print(lista[4][0])
print(lista[6][-1])
print(lista[6][2])
print(lista[4][3])
print(lista[6][3])
print(lista[5][3])
print(lista[6][1])
print(lista[4][5]) | 40.577778 | 114 | 0.637459 | [
"MIT"
] | marcelabbc07/TrabalhosPython | Aula18/rev3.py | 1,853 | Python |
import os
import arcpy
from arcpy import env
import time
def splitGDBTool(inputGDB,inputFrame,splitField,outputDir):
# Get FCs to be cliped
env.workspace = inputGDB
inputFCs = arcpy.ListFeatureClasses()
countFCs =len(inputFCs)
cursor = arcpy.da.SearchCursor(inputFrame,["TID","SHAPE@"])
index = 1
for row in cursor:
arcpy.CreateFileGDB_management(outputDir,row[0],"")
print index,time.strftime("%H:%M:%S "),row[0]+".gdb"
indexfc = 1
for inputFC in inputFCs:
print "\t",index,"-",indexfc, time.strftime("%H:%M:%S "), inputFC
outputFC = outputDir + os.sep + row[0] +".gdb" + os.sep + inputFC
arcpy.Clip_analysis(inputGDB+ os.sep + inputFC, row[1], outputFC)
indexfc += 1
index += 1
if __name__=="__main__":
splitGDBTool(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
| 33.148148 | 77 | 0.622346 | [
"Apache-2.0"
] | AkutoSai/ArcGIS | Arcpy Script/SplitGDB/splitGDBTool.py | 895 | Python |
def test_positive_guess(patched_hangman):
decision = patched_hangman.guess("e")
assert decision is True
def test_negative_guess(patched_hangman):
decision = patched_hangman.guess("r")
assert decision is False
def test_none_guess(patched_hangman):
patched_hangman.guess("e")
decision = patched_hangman.guess("e")
assert decision is None
| 24.533333 | 41 | 0.75 | [
"MIT"
] | julia-shenshina/hangman | tests/test_hangman.py | 368 | Python |
# Copyright 2018, Erlang Solutions Ltd, and S2HC Sweden AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pyrlang.gen_server import GenServer
from pyrlang.node import Node
from term.atom import Atom
LOG = logging.getLogger("pyrlang")
class NetKernel(GenServer):
""" A special process which registers itself as ``net_kernel`` and handles
one specific ``is_auth`` message, which is used by ``net_adm:ping``.
"""
def __init__(self, node) -> None:
""" :param node: pyrlang.node.Node
"""
GenServer.__init__(self,
node_name=node.node_name_,
accepted_calls=['is_auth'])
node.register_name(self, Atom('net_kernel'))
@staticmethod
def is_auth():
return Atom('yes')
__all__ = ['NetKernel']
| 30.837209 | 78 | 0.687029 | [
"Apache-2.0"
] | AlexKovalevych/Pyrlang | pyrlang/net_kernel.py | 1,326 | Python |
#!/bin/python
import platform
import fabric.api
from fabric.contrib.files import exists as remote_exists
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
def _get_distro_info():
distro, _, release = platform.linux_distribution(
full_distribution_name=False)
return '{0} {1}'.format(distro, release)
def retrieve(agent_packages):
ctx.logger.info('Downloading Cloudify Agents...')
if not agent_packages:
raise NonRecoverableError(
'Cannot find agent packages. At least one agent package must be '
'provided compatible with {0}.'.format(_get_distro_info()))
for agent, source_url in agent_packages.items():
dest_path = ctx.instance.runtime_properties['agent_packages_path']
agent_name = agent.replace('_', '-')
# This is a workaround for mapping Centos release names to versions
# to provide a better UX when providing agent inputs.
if agent_name == 'centos-7x-agent':
agent_name = 'centos-core-agent'
elif agent_name == 'centos-6x-agent':
agent_name = 'centos-final-agent'
elif agent_name == 'redhat-7x-agent':
agent_name = 'redhat-maipo-agent'
elif agent_name == 'redhat-6x-agent':
agent_name = 'redhat-santiago-agent'
if agent_name == 'cloudify-windows-agent':
filename = '{0}.exe'.format(agent_name)
else:
filename = '{0}.tar.gz'.format(agent_name)
dest_file = '{0}/{1}'.format(dest_path, filename)
ctx.logger.info('Downloading Agent Package {0} to {1} if it does not '
'already exist...'.format(source_url, dest_file))
if not remote_exists(dest_file):
dl_cmd = 'curl --retry 10 -f -s -S -L {0} --create-dirs -o {1}'
fabric.api.sudo(dl_cmd.format(source_url, dest_file))
| 37.156863 | 78 | 0.643272 | [
"Apache-2.0"
] | AlexAdamenko/cloudify-openstack | components/nginx/scripts/retrieve_agents.py | 1,895 | Python |
# Generated by Django 3.0.5 on 2020-04-22 02:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0057_sugestaoturma_horarios'),
]
operations = [
migrations.RemoveField(
model_name='sugestaoturma',
name='horarios',
),
migrations.AddField(
model_name='sugestaoturma',
name='horarios',
field=models.ManyToManyField(to='core.Horario'),
),
]
| 22.434783 | 60 | 0.585271 | [
"MIT"
] | ArthurGorgonio/suggestclasses | core/migrations/0058_auto_20200421_2342.py | 516 | Python |
"""This module defines classes that handle mesh and mesh operations.
This module defines a factory class for mesh, similar to geometry and
size function factory class. It also defines concrete mesh types.
Currently two concrete mesh types are defined for generic Eucledian
mesh and specific 2D Eucledian mesh.
"""
from functools import lru_cache
import logging
from multiprocessing import Pool, cpu_count
import os
import pathlib
from collections import defaultdict
import warnings
from typing import Union, List, Tuple, Dict, Any, Optional
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
import pandas as pd
import geopandas as gpd
from jigsawpy import jigsaw_msh_t, savemsh, loadmsh, savevtk
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from matplotlib.tri import Triangulation
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from pyproj import CRS, Transformer
from scipy.interpolate import (
RectBivariateSpline, RegularGridInterpolator)
from shapely.geometry import (
LineString, box, Polygon, MultiPolygon)
from shapely.ops import polygonize, linemerge
from ocsmesh import utils
from ocsmesh.raster import Raster
from ocsmesh.mesh.base import BaseMesh
from ocsmesh.mesh.parsers import grd, sms2dm
_logger = logging.getLogger(__name__)
class EuclideanMesh(BaseMesh):
"""Generic Euclidean mesh class
This is the base class for 2D or 3D Euclidean mesh.
Attributes
----------
tria3 : npt.NDArray[jigsaw_msh_t.TRIA3_t]
Reference to underlying jigsaw mesh's triangle element
structure.
triangles : npt.NDArray[np.float32]
Array of node index for triangular elements.
quad4 : npt.NDArray[jigsaw_msh_t.QUAD4_t]
Reference to underlying jigsaw mesh's quadrangle element
structure.
quads : npt.NDArray[np.float32]
Array of node index for quadrangular elements.
crs : CRS
Coodrinate reference system of the mesh object
hull : Hull
Handle to hull calculation helper object
nodes : Nodes
Handle to node handler helper object
elements : Elements
Handle to element handler helper object
Methods
-------
write(path, overwrite=False, format='grd')
Export mesh object to the disk in the specified format.
"""
def __init__(self, mesh: jigsaw_msh_t) -> None:
"""Initialize Euclidean mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
TypeError
If input mesh is not of `jigsaw_msh_t` type.
ValueError
If input mesh's `mshID` is not equal to ``euclidean-mesh``.
If input mesh has `crs` property which is not of `CRS` type.
"""
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID != 'euclidean-mesh':
raise ValueError(f'Argument mesh has property mshID={mesh.mshID}, '
"but expected 'euclidean-mesh'.")
if not hasattr(mesh, 'crs'):
warnings.warn('Input mesh has no CRS information.')
mesh.crs = None
else:
if not isinstance(mesh.crs, CRS):
raise ValueError(f'crs property must be of type {CRS}, not '
f'type {type(mesh.crs)}.')
self._hull = None
self._nodes = None
self._elements = None
self._msh_t = mesh
def write(
self,
path: Union[str, os.PathLike],
overwrite: bool = False,
format : Literal['grd', '2dm', 'msh', 'vtk'] = 'grd', # pylint: disable=W0622
) -> None:
"""Export the mesh object to the disk
Parameters
----------
path : path-like
Path to which the mesh should be exported.
overwrite : bool, default=False
Whether to overwrite, if a file already exists in `path`
format : { 'grd', '2dm', 'msh', 'vtk' }
Format of the export, SMS-2DM or GRD.
Returns
-------
None
Raises
------
ValueError
If specified export format is **not** supported.
"""
path = pathlib.Path(path)
if path.exists() and overwrite is not True:
raise IOError(
f'File {str(path)} exists and overwrite is not True.')
if format == 'grd':
grd_dict = utils.msh_t_to_grd(self.msh_t)
if self._boundaries and self._boundaries.data:
grd_dict.update(boundaries=self._boundaries.data)
grd.write(grd_dict, path, overwrite)
elif format == '2dm':
sms2dm.writer(utils.msh_t_to_2dm(self.msh_t), path, overwrite)
elif format == 'msh':
savemsh(str(path), self.msh_t)
elif format == 'vtk':
savevtk(str(path), self.msh_t)
else:
raise ValueError(f'Unhandled format {format}.')
@property
def tria3(self):
"""Reference to underlying mesh tirangle element structure"""
return self.msh_t.tria3
@property
def triangles(self):
"""Reference to underlying mesh triangle element index array"""
return self.msh_t.tria3['index']
@property
def quad4(self):
"""Reference to underlying mesh quadrangle element structure"""
return self.msh_t.quad4
@property
def quads(self):
"""Reference to underlying mesh quadrangle element index array"""
return self.msh_t.quad4['index']
@property
def crs(self):
"""Reference to underlying mesh crs"""
return self.msh_t.crs
@property
def hull(self):
"""Reference to hull calculator helper object"""
if self._hull is None:
self._hull = Hull(self)
return self._hull
@property
def nodes(self):
"""Reference to node handler helper object"""
if self._nodes is None:
self._nodes = Nodes(self)
return self._nodes
@property
def elements(self):
"""Reference to element handler helper object"""
if self._elements is None:
self._elements = Elements(self)
return self._elements
class EuclideanMesh2D(EuclideanMesh):
"""2D Euclidean mesh definition
Attributes
----------
boundaries
vert2
value
bbox
Methods
-------
get_bbox(crs=None, output_type=None)
Gets the bounding box of the mesh elements.
tricontourf(**kwargs)
Create a contour plot from the value data on the nodes of
the mesh
interpolate(raster, method='spline', nprocs=None)
Interpolate raster date on the nodes.
get_contour(level)
Get contour lines from node value data at specified levels.
get_multipolygon(zmin=None, zmax=None)
Get multipolygon of the mesh hull.
"""
def __init__(self, mesh: jigsaw_msh_t) -> None:
"""Initialize Euclidean 2D mesh object.
Parameters
----------
mesh : jigsaw_msh_t
The underlying jigsaw_msh_t object to hold onto mesh data.
Raises
------
ValueError
If number of mesh dimensions is not equal to ``2``.
"""
super().__init__(mesh)
self._boundaries = None
if mesh.ndims != +2:
raise ValueError(f'Argument mesh has property ndims={mesh.ndims}, '
"but expected ndims=2.")
if len(self.msh_t.value) == 0:
self.msh_t.value = np.array(
np.full((self.vert2['coord'].shape[0], 1), np.nan))
def get_bbox(
self,
crs: Union[str, CRS, None] = None,
output_type: Literal[None, 'polygon', 'bbox'] = None
) -> Union[Polygon, Bbox]:
"""Get the bounding box of mesh elements.
Parameters
----------
crs : str or CRS or None, default=None
CRS to transform the calculated bounding box into before
returning
output_type : { None, 'polygon', 'bbox'}, default=None
Output type
Returns
-------
Polygon or Bbox
Bounding box of the mesh elements.
"""
output_type = 'polygon' if output_type is None else output_type
xmin, xmax = np.min(self.coord[:, 0]), np.max(self.coord[:, 0])
ymin, ymax = np.min(self.coord[:, 1]), np.max(self.coord[:, 1])
crs = self.crs if crs is None else crs
if crs is not None:
if not self.crs.equals(crs):
transformer = Transformer.from_crs(
self.crs, crs, always_xy=True)
# pylint: disable=E0633
(xmin, xmax), (ymin, ymax) = transformer.transform(
(xmin, xmax), (ymin, ymax))
if output_type == 'polygon': # pylint: disable=R1705
return box(xmin, ymin, xmax, ymax)
elif output_type == 'bbox':
return Bbox([[xmin, ymin], [xmax, ymax]])
raise TypeError(
'Argument output_type must a string literal \'polygon\' or '
'\'bbox\'')
@property
def boundaries(self):
"""Handle to boundaries calculator helper object"""
if self._boundaries is None:
self._boundaries = Boundaries(self)
return self._boundaries
def tricontourf(self, **kwargs) -> Axes:
"""Generate contour for the data of triangular elements of the mesh
Parameters
----------
**kwargs : dict, optional
Passed to underlying `matplotlib` API.
Returns
-------
Axes
Axes on which the filled contour is drawn.
"""
return utils.tricontourf(self.msh_t, **kwargs)
def interpolate(
self,
raster: Union[Raster, List[Raster]],
method: Literal['spline', 'linear', 'nearest'] = 'spline',
nprocs: Optional[int] = None,
info_out_path: Union[pathlib.Path, str, None] = None,
filter_by_shape: bool = False
) -> None:
"""Interplate values from raster inputs to the mesh nodes.
Parameters
----------
raster : Raster or list of Raster
A single or a list of rasters from which values are
interpolated onto the mesh
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
nprocs : int or None, default=None
Number of workers to use when interpolating data.
info_out_path : pathlike or str or None
Path for the output node interpolation information file
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
None
"""
if isinstance(raster, Raster):
raster = [raster]
nprocs = -1 if nprocs is None else nprocs
nprocs = cpu_count() if nprocs == -1 else nprocs
# Fix an issue on Jupyter notebook where having pool execute
# interpolation even in case of nprocs == 1 would results in
# application getting stuck
if nprocs > 1:
with Pool(processes=nprocs) as pool:
res = pool.starmap(
_mesh_interpolate_worker,
[(self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
)
pool.join()
else:
res = [_mesh_interpolate_worker(
self.vert2['coord'], self.crs,
_raster.tmpfile, _raster.chunk_size,
method, filter_by_shape)
for _raster in raster]
values = self.msh_t.value.flatten()
interp_info_map = {}
for (mask, _values), rast in zip(res, raster):
values[mask] = _values
if info_out_path is not None:
vert_cs = None
rast_crs = rast.crs
if rast_crs.is_vertical:
if rast_crs.sub_crs_list is not None:
for sub_crs in rast_crs.sub_crs_list:
if sub_crs.is_vertical:
# TODO: What if sub CRS is compound, etc.?
vert_cs = sub_crs
elif rast_crs.source_crs is not None:
if rast_crs.source_crs.is_vertical:
# TODO: What if source CRS is compound, etc.?
vert_cs = rast_crs.source_crs
vert_cs_name = vert_cs.name
idxs = np.argwhere(mask).ravel()
interp_info_map.update({
idx: (rast.path, vert_cs_name)
for idx in idxs})
if info_out_path is not None:
coords = self.msh_t.vert2['coord'].copy()
geo_coords = coords.copy()
if not self.crs.is_geographic:
transformer = Transformer.from_crs(
self.crs, CRS.from_epsg(4326), always_xy=True)
# pylint: disable=E0633
geo_coords[:, 0], geo_coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
vd_idxs=np.array(list(interp_info_map.keys()))
df_interp_info = pd.DataFrame(
index=vd_idxs,
data={
'x': coords[vd_idxs, 0],
'y': coords[vd_idxs, 1],
'lat': geo_coords[vd_idxs, 0],
'lon': geo_coords[vd_idxs, 1],
'elev': values[vd_idxs],
'crs': [i[1] for i in interp_info_map.values()],
'source': [i[0] for i in interp_info_map.values()]
}
)
df_interp_info.sort_index().to_csv(
info_out_path, header=False, index=True)
self.msh_t.value = np.array(values.reshape((values.shape[0], 1)),
dtype=jigsaw_msh_t.REALS_t)
def get_contour(self, level: float) -> LineString:
"""Extract contour lines at the specified `level` from mesh values
Parameters
----------
level : float
The level at which contour lines must be extracted.
Returns
-------
LineString
Extracted and merged contour lines.
Raises
------
ValueError
If mesh has nodes that have null value `np.nan`.
"""
# ONLY SUPPORTS TRIANGLES
for attr in ['quad4', 'hexa8']:
if len(getattr(self.msh_t, attr)) > 0:
warnings.warn(
'Mesh contour extraction only supports triangles')
coords = self.msh_t.vert2['coord']
values = self.msh_t.value
trias = self.msh_t.tria3['index']
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
x, y = coords[:, 0], coords[:, 1]
features = []
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
_logger.debug('Computing contours...')
fig, ax = plt.subplots()
ax.tricontour(
x, y, trias, values.ravel(), levels=[level])
plt.close(fig)
for path_collection in ax.collections:
for path in path_collection.get_paths():
try:
features.append(LineString(path.vertices))
except ValueError:
# LineStrings must have at least 2 coordinate tuples
pass
return linemerge(features)
def get_multipolygon(
self,
zmin: Optional[float] = None,
zmax: Optional[float] = None
) -> MultiPolygon:
"""Calculate multipolygon covering mesh elements (hull)
Parameters
----------
zmin : float or None
Minimum elevation to consider for multipolygon extraction
zmax : float or None
Maximum elevation to consider for multipolygon extraction
Returns
-------
MultiPolygon
Calculated multipolygon shape
"""
values = self.msh_t.value
mask = np.ones(values.shape)
if zmin is not None:
mask = np.logical_and(mask, values > zmin)
if zmax is not None:
mask = np.logical_and(mask, values < zmax)
# Assuming value is of shape (N, 1)
# ravel to make sure it's 1D
verts_in = np.argwhere(mask).ravel()
clipped_mesh = utils.clip_mesh_by_vertex(
self.msh_t, verts_in,
can_use_other_verts=True)
boundary_edges = utils.get_boundary_edges(clipped_mesh)
coords = clipped_mesh.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
poly_gen = polygonize(coords[boundary_edges])
polys = list(poly_gen)
polys = sorted(polys, key=lambda p: p.area, reverse=True)
rings = [p.exterior for p in polys]
n_parents = np.zeros((len(rings),))
represent = np.array([r.coords[0] for r in rings])
for e, ring in enumerate(rings[:-1]):
path = Path(ring.coords, closed=True)
n_parents = n_parents + np.pad(
np.array([
path.contains_point(pt) for pt in represent[e+1:]]),
(e+1, 0), 'constant', constant_values=0)
# Get actual polygons based on logic described above
polys = [p for e, p in enumerate(polys) if not n_parents[e] % 2]
return MultiPolygon(polys)
@property
def vert2(self):
"""Reference to underlying mesh 2D vertices structure"""
return self.msh_t.vert2
@property
def value(self):
"""Reference to underlying mesh values"""
return self.msh_t.value
@property
def bbox(self):
"""Calculates and returns bounding box of the mesh hull.
See Also
--------
get_bbox
"""
return self.get_bbox()
MeshType = Union[EuclideanMesh2D]
class Mesh(BaseMesh):
"""Mesh object factory
Factory class that creates and returns concrete mesh object
based on the input types.
Methods
-------
open(path, crs=None)
Read mesh data from a file on disk.
"""
def __new__(cls, mesh: jigsaw_msh_t) -> MeshType:
"""Construct a concrete mesh object.
Parameters
----------
mesh : jigsaw_msh_t
Input jigsaw mesh object
Returns
-------
MeshType
Mesh object created from the input
Raises
------
TypeError
Input `mesh` is not a `jigsaw_msh_t` object.
NotImplementedError
Input `mesh` object cannot be used to create a EuclideanMesh2D
"""
if not isinstance(mesh, jigsaw_msh_t):
raise TypeError(f'Argument mesh must be of type {jigsaw_msh_t}, '
f'not type {type(mesh)}.')
if mesh.mshID == 'euclidean-mesh':
if mesh.ndims == 2:
return EuclideanMesh2D(mesh)
raise NotImplementedError(
f'mshID={mesh.mshID} + mesh.ndims={mesh.ndims} not '
'handled.')
raise NotImplementedError(f'mshID={mesh.mshID} not handled.')
@staticmethod
def open(path: Union[str, Path], crs: Optional[CRS] = None) -> MeshType:
"""Read mesh from a file on disk
Parameters
----------
path : path-like
Path to the file containig mesh.
crs : CRS or None, default=None
CRS of the mesh in the path. Overwrites any info read
from file, no transformation is done.
Returns
-------
MeshType
Mesh object created by reading the file.
Raises
------
TypeError
If cannot determine the input mesh type.
Notes
-----
Currently only SMS-2DM and GRD formats are supported for
reading.
"""
try:
msh_t = utils.grd_to_msh_t(grd.read(path, crs=crs))
msh_t.value = np.negative(msh_t.value)
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
if 'not a valid grd file' in str(e):
pass
else:
raise e
try:
return Mesh(utils.sms2dm_to_msh_t(sms2dm.read(path, crs=crs)))
except ValueError:
pass
try:
msh_t = jigsaw_msh_t()
loadmsh(msh_t, path)
msh_t.crs = crs
return Mesh(msh_t)
except Exception as e: #pylint: disable=W0703
pass
raise TypeError(
f'Unable to automatically determine file type for {str(path)}.')
class Rings:
"""Helper class for handling mesh rings.
This is a helper class to manage the calculation of internal
and external rings of the mesh polygon or hull.
Attributes
----------
Methods
-------
__call__()
Returns all rings of the mesh hull
interior()
Return the interior rings of the mesh hull
exterior()
Return the exterior rings of the mesh hull
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes the ring calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates rings.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calcluates all the polygons of the mesh and extracts its rings.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all rings of the mesh hull polygon.
The rings are in the form of `shapely.geometry.LinearRing`.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
polys = utils.get_mesh_polygons(self.mesh.msh_t)
data = []
bnd_id = 0
for poly in polys:
data.append({
"geometry": poly.exterior,
"bnd_id": bnd_id,
"type": 'exterior'
})
for interior in poly.interiors:
data.append({
"geometry": interior,
"bnd_id": bnd_id,
"type": 'interior'
})
bnd_id = bnd_id + 1
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Extracts the exterior ring from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior ring of the mesh hull polygon.
"""
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
"""Extracts the interior rings from the results of `__call__`.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior rings of the mesh hull polygon.
"""
return self().loc[self()['type'] == 'interior']
class Edges:
"""Helper class for handling mesh boundary edges.
Attributes
----------
Methods
-------
__call__()
Return all boundary edges of the mesh hull
interior()
Return the interior boundary edges of the mesh hull
exterior()
Return the exterior boundary edges of the mesh hull
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes the edge calculator object for the input `mesh`
Parameters
----------
mesh : EuclideanMesh
Input mesh for which boundary edges are calculated.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calculates all boundary edges for the mesh.
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all boundary edges of the mesh in
the form of `shapely.geometry.LineString` for each
coordinate couple.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
data = []
for ring in self.mesh.hull.rings().itertuples():
coords = ring.geometry.coords
for i in range(1, len(coords)):
data.append({
"geometry": LineString([coords[i-1], coords[i]]),
"bnd_id": ring.bnd_id,
"type": ring.type})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Retruns exterior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing exterior boundary edges of the mesh in
the form of line string couples.
"""
return self().loc[self()['type'] == 'exterior']
def interior(self) -> gpd.GeoDataFrame:
"""Retruns interior boundary edges from the results of `__call__`
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing interior boundary edges of the mesh in
the form of line string couples.
"""
return self().loc[self()['type'] == 'interior']
class Hull:
"""Helper class for handling mesh hull calculations.
This class wraps the functionality of ring and edge classes and
adds additional methods to calculate or extract the polygon or
triangulation of the mesh
Attributes
----------
Methods
-------
__call__()
Calculates all the polys from all mesh rings
exterior()
Calculates the exterior rings of the mesh hull.
interior()
Calculates the interior rings of the mesh hull.
implode()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a table of polygons.
multipolygon()
Calculates all the polygons (including isolated domain
islands) in the mesh and returns a multipolygon.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize helper class for handling mesh hull calculations
Parameters
----------
mesh : EuclideanMesh
Input mesh for which hull calculations are done.
Notes
-----
This object holds onto the ring and edge calculator objects
as well as a reference to the input mesh.
"""
self.mesh = mesh
self.rings = Rings(mesh)
self.edges = Edges(mesh)
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Calculates all polygons of the mesh including domain islands
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing all polygons of the mesh.
See Also
--------
implode()
Dataframe with a single combined multipolygon.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
data = []
for bnd_id in np.unique(self.rings()['bnd_id'].tolist()):
exterior = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'exterior')]
interiors = self.rings().loc[
(self.rings()['bnd_id'] == bnd_id) &
(self.rings()['type'] == 'interior')]
data.append({
"geometry": Polygon(
exterior.iloc[0].geometry.coords,
[row.geometry.coords for _, row
in interiors.iterrows()]),
"bnd_id": bnd_id
})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def exterior(self) -> gpd.GeoDataFrame:
"""Creates polygons from exterior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from exterior rings of the mesh hull
"""
data = []
for exterior in self.rings().loc[
self.rings()['type'] == 'exterior'].itertuples():
data.append({"geometry": Polygon(exterior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def interior(self) -> gpd.GeoDataFrame:
"""Creates polygons from interior rings of the mesh hull
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Polygons created from interior rings of the mesh hull
"""
data = []
for interior in self.rings().loc[
self.rings()['type'] == 'interior'].itertuples():
data.append({"geometry": Polygon(interior.geometry.coords)})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def implode(self) -> gpd.GeoDataFrame:
"""Creates a dataframe from mesh polygons.
Parameters
----------
Returns
------
gpd.GeoDataFrame
Dataframe containing polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
multipolygon()
`shapely` multipolygon shape of combined mesh polygons.
Notes
-----
The difference of the return value of this method and
`__call__` is that the `implode` returns a dataframe with
a single `MultiPolygon` where as `__call__` returns a
dataframe with multiple `Polygon` entries with associated
`bnd_id`.
"""
return gpd.GeoDataFrame(
{"geometry": MultiPolygon([polygon.geometry for polygon
in self().itertuples()])},
crs=self.mesh.crs)
def multipolygon(self) -> MultiPolygon:
"""Returns mesh multi-polygons.
Parameters
----------
Returns
------
MultiPolygon
Combined shape of polygons of the mesh.
See Also
--------
__call__()
Dataframe with multiple polygon and boundary ID entries
of the mesh polygons.
implode()
Dataframe with a single combined multipolygon of the mesh
polygons.
Notes
-----
The difference of the return value of this method and `implode`
is that `multipolygon` returns a `MultiPolygon` object where
as `implode` returns a dataframe warpping the multipolygon
object.
"""
mp = self.implode().iloc[0].geometry
if isinstance(mp, Polygon):
mp = MultiPolygon([mp])
return mp
def triangulation(self) -> Triangulation:
"""Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
"""
triangles = self.mesh.msh_t.tria3['index'].tolist()
for quad in self.mesh.msh_t.quad4['index']:
triangles.extend([
[quad[0], quad[1], quad[3]],
[quad[1], quad[2], quad[3]]
])
return Triangulation(self.mesh.coord[:, 0], self.mesh.coord[:, 1], triangles)
class Nodes:
"""Helper class for handling mesh nodes.
Attributes
----------
id_to_index : dict
Mapping to convert node IDs to node indexes.
index_to_id : dict
Mapping to convert node indexes to node IDs.
Methods
-------
__call__()
Creates a mapping between node IDs (index + 1) and node
coordinates
id()
Returns list of node IDs.
index()
Return array of node indices.
coords()
Return mesh coordinates.
values()
Return values stored for mesh nodes.
get_index_by_id(node_id)
Get the node index based on node ID.
get_id_by_index(index)
Get the node ID based on the node index.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initializes node handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles nodes info.
"""
self.mesh = mesh
self._id_to_index = None
self._index_to_id = None
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, int]:
"""Creates a mapping between node IDs and indexes.
Parameters
----------
Returns
-------
dict
Mapping between node IDs and indexes.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return {i+1: coord for i, coord in enumerate(self.coords())}
def id(self) -> List[int]:
"""Retrives a list of element IDs.
Parameters
----------
Returns
-------
list of int
List of node IDs as created by `__call__`
"""
return list(self().keys())
def index(self) -> npt.NDArray[int]:
"""Retrives an array of element indexes.
Parameters
----------
Returns
-------
array-like
Array of node indexes.
"""
return np.arange(len(self()))
def coords(self) -> npt.NDArray[np.float32]:
"""Retrieve the coordinates of mesh nodes
Parameters
----------
Returns
-------
array-like
Coordinates of the mesh nodes as returned by `BaseMesh.coord`
"""
return self.mesh.coord
def values(self):
"""Retrieve the values stored for mesh nodes
Parameters
----------
Returns
-------
array-like
Values on the mesh nodes as returned by `BaseMesh.values`
"""
return self.mesh.values
def get_index_by_id(self, node_id):
"""Converts mesh ID to mesh index.
Parameters
----------
node_id : int
ID of the node of interest
Returns
-------
int
Index of the node of interest
"""
return self.id_to_index[node_id]
def get_id_by_index(self, index: int):
"""Converts mesh index to mesh ID.
Parameters
----------
index : int
Index of the node of interest.
Returns
-------
int
ID of the node of interest
"""
return self.index_to_id[index]
@property
def id_to_index(self) -> Dict[int, int]:
"""Read-only property returning the mapping of ID to index
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
"""
if self._id_to_index is None:
self._id_to_index = {node_id: index for index, node_id
in enumerate(self().keys())}
return self._id_to_index
@property
def index_to_id(self) -> Dict[int, int]:
"""Read-only property returning the mapping of index to ID
Notes
-----
Although the property is read-only, the return value object
is a cached mutable dictionary object. Modifying the mesh
without clearing the cache properly or mutating the
returned object could result in undefined behavior
"""
if self._index_to_id is None:
self._index_to_id = dict(enumerate(self().keys()))
return self._index_to_id
# def get_indexes_around_index(self, index):
# indexes_around_index = self.__dict__.get('indexes_around_index')
# if indexes_around_index is None:
# def append(geom):
# for simplex in geom:
# for i, j in permutations(simplex, 2):
# indexes_around_index[i].add(j)
# indexes_around_index = defaultdict(set)
# append(self.gr3.elements.triangles())
# append(self.gr3.elements.quads())
# self.__dict__['indexes_around_index'] = indexes_around_index
# return list(indexes_around_index[index])
class Elements:
"""Helper class for handling mesh elements.
Attributes
----------
Methods
--------
__call__()
Creates a mapping between element IDs and associated node IDs.
id()
Returns a list of element IDs.
index()
Returns an array of element indexes.
array()
Creates and returns a masked array of element node indices.
triangles()
Creates and returns a 2D array of triangular element node indices.
quads()
Creates and returns a 2D array of quadrangular element node indices.
triangulation()
Calcluates a triangulation from the triangles and quads of
the mesh.
geodataframe()
Creates and returns a dataframe of with polygon entires for
each element.
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize the element handler helper object.
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object handles elements info.
"""
self.mesh = mesh
@lru_cache(maxsize=1)
def __call__(self) -> Dict[int, npt.NDArray[int]]:
"""Creates a mapping between element IDs and associated node IDs.
Parameters
----------
Returns
-------
dict
Mapping between element IDs and associated node Ids
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
elements = {i+1: index+1 for i, index
in enumerate(self.mesh.msh_t.tria3['index'])}
elements.update({i+len(elements)+1: index+1 for i, index
in enumerate(self.mesh.msh_t.quad4['index'])})
return elements
@lru_cache(maxsize=1)
def id(self) -> List[int]:
"""Retrieves the list of element IDs as returned by `__call__`
Parameters
----------
Returns
-------
list of int
List of element IDs.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return list(self().keys())
@lru_cache(maxsize=1)
def index(self) -> npt.NDArray[int]:
"""Retrieves an array of element indices
Parameters
----------
Returns
-------
npt.NDArray
1D array of element indices.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.arange(len(self()))
def array(self) -> npt.NDArray[int]:
"""Retrieves a masked array of element node IDs.
The return value is ``n x m`` where ``n`` is the number of
elements and ``m`` is the maximum number of element nodes, e.g.
if there are only trias, then it's 3, for trias and quads it
is 4.
Parameters
----------
Returns
-------
npt.NDArray
Masked array where elements with fewer associated nodes
have trailing masked node columns in the array.
"""
rank = int(max(map(len, self().values())))
array = np.full((len(self()), rank), -1)
for i, elem_nd_ids in enumerate(self().values()):
row = np.array(list(map(self.mesh.nodes.get_index_by_id, elem_nd_ids)))
array[i, :len(row)] = row
return np.ma.masked_equal(array, -1)
@lru_cache(maxsize=1)
def triangles(self) -> npt.NDArray[int]:
"""Retrieves an array of tria element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for triangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 3])
@lru_cache(maxsize=1)
def quads(self):
"""Retrieves an array of quad element node indices
Parameters
----------
Returns
-------
npt.NDArray
2D array of element nodes for quadrangle nodes
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
return np.array(
[list(map(self.mesh.nodes.get_index_by_id, element))
for element in self().values()
if len(element) == 4])
def triangulation(self) -> Triangulation:
"""Create triangulation object from all the mesh elements.
Parameters
----------
Returns
-------
Triangulation
The `matplotlib` triangulation object create from all
the elements of the parent mesh.
Notes
-----
Currently only tria3 and quad4 elements are considered.
"""
triangles = self.triangles().tolist()
for quad in self.quads():
# TODO: Not tested.
triangles.append([quad[0], quad[1], quad[3]])
triangles.append([quad[1], quad[2], quad[3]])
return Triangulation(
self.mesh.coord[:, 0],
self.mesh.coord[:, 1],
triangles)
def geodataframe(self) -> gpd.GeoDataFrame:
"""Create polygons for each element and return in dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe created from entries of `Polygon` type for
each element.
"""
data = []
for elem_id, elem_nd_ids in self().items():
data.append({
'geometry': Polygon(
self.mesh.coord[list(
map(self.mesh.nodes.get_index_by_id, elem_nd_ids))]),
'id': elem_id})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
class Boundaries:
"""Helper class for mesh boundary condition calculation
Attributes
----------
data : dict
Mapping for boundary information
Methods
-------
__call__()
Retrieves a dataframe for all boundary shapes and type info.
__len__()
Gets the number of calculated boundary segments.
ocean()
Retrieves a dataframe containing shapes and type info of ocean
boundaries
land()
Retrieves a dataframe containing shapes and type info of land
boundaries
interior()
Retrieves a dataframe containing shapes and type info of island
boundaries
auto_generate(threshold=0., land_ibtype=0, interior_ibtype=1)
Automatically generate boundary information based on the
input land indicator `threshold`
"""
def __init__(self, mesh: EuclideanMesh) -> None:
"""Initialize boundary helper object
Parameters
----------
mesh : EuclideanMesh
Input mesh for which this object calculates boundaries.
"""
# TODO: Add a way to manually initialize
self.mesh = mesh
self._ocean = gpd.GeoDataFrame()
self._land = gpd.GeoDataFrame()
self._interior = gpd.GeoDataFrame()
self._data = defaultdict(defaultdict)
@lru_cache(maxsize=1)
def _init_dataframes(self) -> None:
"""Internal: Creates boundary dataframes based on boundary data
Parameters
----------
Returns
-------
None
Notes
-----
This method doesn't have any return value, but it is cached
so that on re-execution it doesn't recalculate.
"""
boundaries = self._data
ocean_boundaries = []
land_boundaries = []
interior_boundaries = []
if boundaries is not None:
for ibtype, bnds in boundaries.items():
if ibtype is None:
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
ocean_boundaries.append({
'id': bnd_id,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
elif str(ibtype).endswith('1'):
for bnd_id, data in bnds.items():
indexes = list(map(self.mesh.nodes.get_index_by_id,
data['indexes']))
interior_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
else:
for bnd_id, data in bnds.items():
_indexes = np.array(data['indexes'])
if _indexes.ndim > 1:
# ndim > 1 implies we're dealing with an ADCIRC
# mesh that includes boundary pairs, such as weir
new_indexes = []
for i, line in enumerate(_indexes.T):
if i % 2 != 0:
new_indexes.extend(np.flip(line))
else:
new_indexes.extend(line)
_indexes = np.array(new_indexes).flatten()
else:
_indexes = _indexes.flatten()
indexes = list(map(self.mesh.nodes.get_index_by_id,
_indexes))
land_boundaries.append({
'id': bnd_id,
'ibtype': ibtype,
"index_id": data['indexes'],
"indexes": indexes,
'geometry': LineString(self.mesh.coord[indexes])
})
self._ocean = gpd.GeoDataFrame(ocean_boundaries)
self._land = gpd.GeoDataFrame(land_boundaries)
self._interior = gpd.GeoDataFrame(interior_boundaries)
def ocean(self) -> gpd.GeoDataFrame:
"""Retrieve the ocean boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
ocean open boundary.
"""
self._init_dataframes()
return self._ocean
def land(self):
"""Retrieve the land boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
land boundary.
"""
self._init_dataframes()
return self._land
def interior(self):
"""Retrieve the island boundary information dataframe
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing the geometry and information of
island boundary.
"""
self._init_dataframes()
return self._interior
@property
def data(self) -> Dict[Optional[int], Any]:
"""Read-only property referencing the boundary data dictionary"""
return self._data
@lru_cache(maxsize=1)
def __call__(self) -> gpd.GeoDataFrame:
"""Retrieve the dataframe for all boundaries information
Parameters
----------
Returns
-------
gpd.GeoDataFrame
Dataframe containing information for all boundaries shape
and type.
Notes
-----
The result of this method is cached, so that multiple calls
to it won't result in multiple calculations. If the mesh
is modified and the cache is not properly clear the calls
to this method can result in invalid return values.
"""
self._init_dataframes()
data = []
for bnd in self.ocean().itertuples():
data.append({
'id': bnd.id,
'ibtype': None,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.land().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
for bnd in self.interior().itertuples():
data.append({
'id': bnd.id,
'ibtype': bnd.ibtype,
"index_id": bnd.index_id,
"indexes": bnd.indexes,
'geometry': bnd.geometry})
return gpd.GeoDataFrame(data, crs=self.mesh.crs)
def __len__(self) -> int:
"""Returns the number of boundary segments"""
return len(self())
def auto_generate(
self,
threshold: float = 0.,
land_ibtype: int = 0,
interior_ibtype: int = 1,
):
"""Automatically detect boundaries based on elevation data.
Parameters
----------
threshold : float, default=0
Threshold above which nodes are considered dry nodes
for ocean vs land boundary detection
land_ibtype : int, default=0
Value to assign to land boundary type
interior_ibtype : int, default=1
Value to assign to island boundary type
Returns
-------
None
Raises
------
ValueError
If any of the values assigned to a mesh node is `np.nan`.
Notes
-----
An edge is considered dry if any of the attached nodes are
dry (its elevation is larger than or equal to the `threshold`).
"""
values = self.mesh.value
if np.any(np.isnan(values)):
raise ValueError(
"Mesh contains invalid values. Raster values must"
"be interpolated to the mesh before generating "
"boundaries.")
coords = self.mesh.msh_t.vert2['coord']
coo_to_idx = {
tuple(coo): idx
for idx, coo in enumerate(coords)}
polys = utils.get_mesh_polygons(self.mesh.msh_t)
# TODO: Split using shapely to get bdry segments
boundaries = defaultdict(defaultdict)
bdry_type = dict
get_id = self.mesh.nodes.get_id_by_index
# generate exterior boundaries
for poly in polys:
ext_ring_coo = poly.exterior.coords
ext_ring = np.array([
(coo_to_idx[ext_ring_coo[e]],
coo_to_idx[ext_ring_coo[e + 1]])
for e, coo in enumerate(ext_ring_coo[:-1])])
# find boundary edges
edge_tag = np.full(ext_ring.shape, 0)
edge_tag[
np.where(values[ext_ring[:, 0]] < threshold)[0], 0] = -1
edge_tag[
np.where(values[ext_ring[:, 1]] < threshold)[0], 1] = -1
edge_tag[
np.where(values[ext_ring[:, 0]] >= threshold)[0], 0] = 1
edge_tag[
np.where(values[ext_ring[:, 1]] >= threshold)[0], 1] = 1
# sort boundary edges
ocean_boundary = []
land_boundary = []
for i, (e0, e1) in enumerate(edge_tag):
if np.any(np.asarray((e0, e1)) == 1):
land_boundary.append(tuple(ext_ring[i, :]))
elif np.any(np.asarray((e0, e1)) == -1):
ocean_boundary.append(tuple(ext_ring[i, :]))
# ocean_boundaries = utils.sort_edges(ocean_boundary)
# land_boundaries = utils.sort_edges(land_boundary)
ocean_boundaries = []
if len(ocean_boundary) != 0:
#pylint: disable=not-an-iterable
ocean_segs = linemerge(coords[np.array(ocean_boundary)].tolist())
ocean_segs = [ocean_segs] if isinstance(ocean_segs, LineString) else ocean_segs
ocean_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in ocean_segs]
land_boundaries = []
if len(land_boundary) != 0:
#pylint: disable=not-an-iterable
land_segs = linemerge(coords[np.array(land_boundary)].tolist())
land_segs = [land_segs] if isinstance(land_segs, LineString) else land_segs
land_boundaries = [
[(coo_to_idx[seg.coords[e]], coo_to_idx[seg.coords[e + 1]])
for e, coo in enumerate(seg.coords[:-1])]
for seg in land_segs]
_bnd_id = len(boundaries[None])
for bnd in ocean_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[None][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
# add land boundaries
_bnd_id = len(boundaries[land_ibtype])
for bnd in land_boundaries:
e0, e1 = [list(t) for t in zip(*bnd)]
e0 = [get_id(vert) for vert in e0]
data = e0 + [get_id(e1[-1])]
boundaries[land_ibtype][_bnd_id] = bdry_type(
indexes=data, properties={})
_bnd_id += 1
# generate interior boundaries
_bnd_id = 0
interior_boundaries = defaultdict()
for poly in polys:
interiors = poly.interiors
for interior in interiors:
int_ring_coo = interior.coords
int_ring = [
(coo_to_idx[int_ring_coo[e]],
coo_to_idx[int_ring_coo[e + 1]])
for e, coo in enumerate(int_ring_coo[:-1])]
# TODO: Do we still need these?
e0, e1 = [list(t) for t in zip(*int_ring)]
if utils.signed_polygon_area(self.mesh.coord[e0, :]) < 0:
e0 = e0[::-1]
e1 = e1[::-1]
e0 = [get_id(vert) for vert in e0]
e0.append(e0[0])
interior_boundaries[_bnd_id] = e0
_bnd_id += 1
for bnd_id, data in interior_boundaries.items():
boundaries[interior_ibtype][bnd_id] = bdry_type(
indexes=data, properties={})
self._data = boundaries
self._init_dataframes.cache_clear()
self.__call__.cache_clear()
self._init_dataframes()
SortedRingType = Dict[int,
Dict[Literal['exterior', 'interiors'],
Union[npt.NDArray, List[npt.NDArray]]]
]
def sort_rings(
index_rings: List[List[Tuple[int, int]]],
vertices: npt.NDArray[np.float32]) -> SortedRingType:
"""Sorts a list of index-rings.
Takes a list of unsorted index rings and sorts them into
"exterior" and "interior" components. Any doubly-nested rings
are considered exterior rings.
Parameters
----------
index_rings : List[List[Tuple[int, int]]]
Unosorted list of list of mesh edges as specified by end node
indexs of each edge.
vertices : npt.NDArray[np.float32]
2D ``n x 2`` array of node coordinate couples.
Returns
-------
SortedRingType
Dictionary of information aboout polygon boundaries extracted
based on the input
Notes
-----
The return value is a mapping of ring index to dictionary
containing exterior and interior linear ring information as
numpy array
This function is not currently used, instead a different faster
approach is used for boundary and polygon calculation from
elements.
"""
# TODO: Refactor and optimize. Calls that use :class:matplotlib.path.Path can
# probably be optimized using shapely.
# sort index_rings into corresponding "polygons"
areas = []
for index_ring in index_rings:
e0, e1 = [list(t) for t in zip(*index_ring)]
areas.append(float(Polygon(vertices[e0, :]).area))
# maximum area must be main mesh
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id = 0
_index_rings = {}
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
while len(index_rings) > 0:
# find all internal rings
potential_interiors = []
for i, index_ring in enumerate(index_rings):
e0, e1 = [list(t) for t in zip(*index_ring)]
if path.contains_point(vertices[e0[0], :]):
potential_interiors.append(i)
# filter out nested rings
real_interiors = []
for i, p_interior in reversed(
list(enumerate(potential_interiors))):
_p_interior = index_rings[p_interior]
check = [index_rings[k]
for j, k in
reversed(list(enumerate(potential_interiors)))
if i != j]
has_parent = False
for _path in check:
e0, e1 = [list(t) for t in zip(*_path)]
_path = Path(vertices[e0 + [e0[0]], :], closed=True)
if _path.contains_point(vertices[_p_interior[0][0], :]):
has_parent = True
if not has_parent:
real_interiors.append(p_interior)
# pop real rings from collection
for i in reversed(sorted(real_interiors)):
_index_rings[_id]['interiors'].append(
np.asarray(index_rings.pop(i)))
areas.pop(i)
# if no internal rings found, initialize next polygon
if len(index_rings) > 0:
idx = areas.index(np.max(areas))
exterior = index_rings.pop(idx)
areas.pop(idx)
_id += 1
_index_rings[_id] = {
'exterior': np.asarray(exterior),
'interiors': []
}
e0, e1 = [list(t) for t in zip(*exterior)]
path = Path(vertices[e0 + [e0[0]], :], closed=True)
return _index_rings
def _mesh_interpolate_worker(
coords: npt.NDArray[np.float32],
coords_crs: CRS,
raster_path: Union[str, Path],
chunk_size: Optional[int],
method: Literal['spline', 'linear', 'nearest'] = "spline",
filter_by_shape: bool = False):
"""Interpolator worker function to be used in parallel calls
Parameters
----------
coords : npt.NDArray[np.float32]
Mesh node coordinates.
coords_crs : CRS
Coordinate reference system of the input mesh coordinates.
raster_path : str or Path
Path to the raster temporary working file.
chunk_size : int or None
Chunk size for windowing over the raster.
method : {'spline', 'linear', 'nearest'}, default='spline'
Method of interpolation.
filter_by_shape : bool
Flag for node filtering based on raster bbox or shape
Returns
-------
idxs : npt.NDArray[bool]
Mask of the nodes whose values are updated by current
interpolation
values : npt.NDArray[np.float32]
Interpolated values.
Raises
------
ValueError
If specified interpolation `method` is not supported.
"""
coords = np.array(coords)
raster = Raster(raster_path)
idxs = []
values = []
for window in raster.iter_windows(chunk_size=chunk_size, overlap=2):
if not raster.crs.equals(coords_crs):
transformer = Transformer.from_crs(
coords_crs, raster.crs, always_xy=True)
# pylint: disable=E0633
coords[:, 0], coords[:, 1] = transformer.transform(
coords[:, 0], coords[:, 1])
xi = raster.get_x(window)
yi = raster.get_y(window)
# Use masked array to ignore missing values from DEM
zi = raster.get_values(window=window, masked=True)
if not filter_by_shape:
_idxs = np.logical_and(
np.logical_and(
np.min(xi) <= coords[:, 0],
np.max(xi) >= coords[:, 0]),
np.logical_and(
np.min(yi) <= coords[:, 1],
np.max(yi) >= coords[:, 1]))
else:
shape = raster.get_multipolygon()
gs_pt = gpd.points_from_xy(coords[:, 0], coords[:, 1])
_idxs = gs_pt.intersects(shape)
interp_mask = None
if method == 'spline':
f = RectBivariateSpline(
xi,
np.flip(yi),
np.flipud(zi).T,
kx=3, ky=3, s=0,
# bbox=[min(x), max(x), min(y), max(y)] # ??
)
_values = f.ev(coords[_idxs, 0], coords[_idxs, 1])
elif method in ['nearest', 'linear']:
# Inspired by StackOverflow 35807321
if np.any(zi.mask):
m_interp = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi.mask).T.astype(bool),
method=method
)
# Pick nodes NOT "contaminated" by masked values
interp_mask = m_interp(coords[_idxs]) > 0
f = RegularGridInterpolator(
(xi, np.flip(yi)),
np.flipud(zi).T,
method=method
)
_values = f(coords[_idxs])
else:
raise ValueError(
f"Invalid value method specified <{method}>!")
if interp_mask is not None:
# pylint: disable=invalid-unary-operand-type
helper = np.ones_like(_values).astype(bool)
helper[interp_mask] = False
# _idxs is inverse mask
_idxs[_idxs] = helper
_values = _values[~interp_mask]
idxs.append(_idxs)
values.append(_values)
return (np.hstack(idxs), np.hstack(values))
| 31.437795 | 95 | 0.548111 | [
"CC0-1.0"
] | noaa-ocs-modeling/OCSMesh | ocsmesh/mesh/mesh.py | 66,711 | Python |
#
# Solution to Project Euler problem 287
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
# Let R = 2^(N-1) denote the radius of the circle (filled disk) being drawn.
#
# First, we can simplify the problem by translating (shifting) the coordinate system.
# Instead of x and y each in [0, 2^N) for the formula [x - 2^(N-1)]^2 + [y - 2^(N-1)]^2 <= R^2,
# we shall consider x and y each in [-(2^(N-1)), 2^(N-1)) for the formula x^2 + y^2 <= R^2.
#
# Suppose we are given a square 2D region with endpoints [xstart, xend) and [ystart, yend).
# If the region is entirely white or entirely black, then it takes 2 bits to encode the region.
# Otherwise the region must have both white and black pixels, so we use 1 bit
# to encode the split, recurse on the 4 sub-squares, and sum their code lengths.
#
# Within the region, what are the possible values of the left side of the formula, x^2 + y^2?
# To minimize or maximize x^2 + y^2, we can min/maximize each of x^2 and y^2 independently.
# - To minimize x^2, we minimize |x|. If 0 is in [xstart, xend),
# then the minimum |x| is 0, and thus the minimum x^2 is 0.
# Otherwise, either all possible x values are negative or all
# are positive, so the minimum |x| is min(|xstart|, |xend-1|).
# - To maximize x^2, we maximize |x|. This simply equals max(|xstart|, |xend-1|).
# - The same arguments apply to minimizing/maximizing y^2.
#
# Now evaluate minR^2 = minX^2 + minY^2, and maxR^2 = maxX^2 + maxY^2.
# - If maxR^2 <= R^2, then all points in the region satisfy
# x^2 + y^2 <= R^2, hence the entire region is black.
# - Similarly, if minR^2 > R^2, then all points in the region
# satisfy x^2 + y^2 > R^2, hence the entire region is white.
# - Otherwise, the region must contain both black and white points,
# so we split into 4 subregions and recurse.
#
# One further optimization: If the region [xstart, xend) * [ystart, yend) lies
# entirely within a quadrant, then calculating minR and maxR becomes trivial.
# In fact, only the root call to compressed_length() spans both positive
# and negative coordinates; all deeper calls are entirely within a quadrant.
# For a region with [xstart, xend) where xstart < xend <= 0, compressed_length()
# yields the same result when the range is replaced with [-xend + 1, -xstart + 1).
# Hence by symmetry, we can only consider cases where 0 <= xstart < xend,
# and not deal with negative ranges. This optimized bit length algorithm can
# no longer be adapted to encode the actual compressed bit stream, however.
def compute():
N = 24
RADIUS_SQUARED = 2**(2 * N - 2)
# Returns the exact minimum number of bits required to encode
# the circle image's region of [xstart, end) * [ystart, yend),
# requiring 0 <= xstart < xend and 0 <= ystart < yend.
def compressed_length(xstart, xend, ystart, yend):
if xstart * xstart + ystart * ystart > RADIUS_SQUARED: # All white
return 2
elif (xend - 1) * (xend - 1) + (yend - 1) * (yend - 1) <= RADIUS_SQUARED: # All black
return 2
else: # Subdivide and recurse
xmid = (xstart + xend) >> 1
ymid = (ystart + yend) >> 1
return (1 +
compressed_length(xstart, xmid, ymid , yend) + # Top left
compressed_length(xmid , xend, ymid , yend) + # Top right
compressed_length(xstart, xmid, ystart, ymid) + # Bottom left
compressed_length(xmid , xend, ystart, ymid)) # Bottom right
temp = 2**(N - 1)
return str(1 +
compressed_length(0, temp, 0, temp) +
compressed_length(0, temp, 1, temp + 1) +
compressed_length(1, temp + 1, 0, temp) +
compressed_length(1, temp + 1, 1, temp + 1))
if __name__ == "__main__":
print(compute())
| 47.935897 | 95 | 0.682268 | [
"MIT"
] | xianlinfeng/project_euler_python3 | solutions/p287.py | 3,739 | Python |
from probs import Binomial
class TestBinomial:
@staticmethod
def test_binomial() -> None:
d = Binomial()
assert d.expectation() == 0
assert d.variance() == 0
# TODO: Python 3.7 implementation differs from 3.8+
# assert P(d == 0) == 1
# assert P(d == 1) == 0
# assert P(d == 2) == 0
# d = Binomial(n=6, p=0.7)
# assert P(d == 0) == 0.000729
# assert P(d == 1) == 0.010206
# assert P(d == 2) == 0.059535
# assert P(d == 3) == 0.18522
# assert P(d == 4) == 0.324135
# assert P(d == 5) == 0.302526
# assert P(d == 6) == 0.117649
# assert P(d == 7) == 0
@staticmethod
def test_sum() -> None:
d = Binomial() + Binomial()
assert d.expectation() == 0
assert d.variance() == 0
# TODO
assert d.pmf == {}
# assert P(d == 2) == 1 / 36
# assert P(d == 8) == 5 / 36
# assert P(d == 60) == 0
@staticmethod
def test_repr() -> None:
d = Binomial() + Binomial()
assert str(d) == "Binomial(pmf={}, n=0, p=1)"
| 25.704545 | 59 | 0.458002 | [
"MIT"
] | TylerYep/probs | tests/discrete/binomial_test.py | 1,131 | Python |
from collections import Counter
class MajorityBaselineClassifier:
@staticmethod
def train(_, labels):
c = Counter(labels)
return c.most_common()[0][0]
@staticmethod
def predict(_, majority_label):
return majority_label
| 20.153846 | 36 | 0.675573 | [
"MIT"
] | dompuiu/PROEA-821-005-Spring-2018 | HW2/majority_baseline_classifier.py | 262 | Python |
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params 语料文件路径定义
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters 定义网络超参数
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
# 将词向量填充至max_length的长度
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
print(x[:10])
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary: ", vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
print(x_train.shape[0], x_train.shape[1]) | 42.447368 | 125 | 0.725976 | [
"Apache-2.0"
] | tangku006/cnn-text-classification-tf-master | test.py | 3,276 | Python |
import pytest
import json
from collections import OrderedDict
from great_expectations.profile.base import DatasetProfiler
from great_expectations.profile.basic_dataset_profiler import BasicDatasetProfiler
from great_expectations.profile.columns_exist import ColumnsExistProfiler
from great_expectations.dataset.pandas_dataset import PandasDataset
import great_expectations as ge
from ..test_utils import assertDeepAlmostEqual
from six import PY2
# Tests to write:
# test_cli_method_works -> test_cli
# test context-based profile methods
# test class-based profile methods
# noinspection PyPep8Naming
def test_DataSetProfiler_methods():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
assert DatasetProfiler.validate(1) == False
assert DatasetProfiler.validate(toy_dataset)
with pytest.raises(NotImplementedError) as e_info:
DatasetProfiler.profile(toy_dataset)
# noinspection PyPep8Naming
def test_ColumnsExistProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]})
expectations_config, evr_config = ColumnsExistProfiler.profile(toy_dataset)
assert len(expectations_config["expectations"]) == 1
assert expectations_config["expectations"][0]["expectation_type"] == "expect_column_to_exist"
assert expectations_config["expectations"][0]["kwargs"]["column"] == "x"
# noinspection PyPep8Naming
def test_BasicDatasetProfiler():
toy_dataset = PandasDataset({"x": [1, 2, 3]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# print(json.dumps(expectations_config, indent=2))
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) > 0
assert expectations_config["data_asset_name"] == "toy_dataset"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at"
}
assert "notes" in expectations_config["meta"]
assert set(expectations_config["meta"]["notes"].keys()) == {"format", "content"}
assert "To add additional notes" in expectations_config["meta"]["notes"]["content"][0]
added_expectations = set()
for exp in expectations_config["expectations"]:
added_expectations.add(exp["expectation_type"])
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
expected_expectations = {
'expect_table_row_count_to_be_between',
'expect_table_columns_to_match_ordered_list',
'expect_column_values_to_be_in_set',
'expect_column_unique_value_count_to_be_between',
'expect_column_proportion_of_unique_values_to_be_between',
'expect_column_values_to_not_be_null',
'expect_column_values_to_be_in_type_list',
'expect_column_values_to_be_unique'}
assert expected_expectations.issubset(added_expectations)
def test_BasicDatasetProfiler_null_column():
"""
The profiler should determine that null columns are of null cardinality and of null type and
not to generate expectations specific to types and cardinality categories.
We verify this by running the basic profiler on a Pandas dataset with an empty column
and asserting the number of successful results for the empty columns.
"""
toy_dataset = PandasDataset({"x": [1, 2, 3], "y": [None, None, None]}, data_asset_name="toy_dataset")
assert len(toy_dataset.get_expectation_suite(
suppress_warnings=True)["expectations"]) == 0
expectations_config, evr_config = BasicDatasetProfiler.profile(toy_dataset)
# TODO: assert set - specific expectations
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) == 4
assert len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'y' and result['success']]) < \
len([result for result in evr_config['results'] if
result['expectation_config']['kwargs'].get('column') == 'x' and result['success']])
def test_BasicDatasetProfiler_partially_null_column(dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a partially null column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"nulls" is the partially null column in the fixture dataset
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "nulls"])
def test_BasicDatasetProfiler_non_numeric_low_cardinality(non_numeric_low_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a low cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_low_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "lowcardnonnum"])
def test_BasicDatasetProfiler_non_numeric_high_cardinality(non_numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
non numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(non_numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_not_match_regex"]) == \
set([expectation['expectation_type'] for expectation in expectations_config["expectations"] if expectation["kwargs"].get("column") == "highcardnonnum"])
def test_BasicDatasetProfiler_numeric_high_cardinality(numeric_high_card_dataset):
"""
Unit test to check the expectations that BasicDatasetProfiler creates for a high cardinality
numeric column.
The test is executed against all the backends (Pandas, Spark, etc.), because it uses
the fixture.
"""
expectations_config, evr_config = BasicDatasetProfiler.profile(numeric_high_card_dataset)
assert set(["expect_column_to_exist", "expect_table_row_count_to_be_between", "expect_table_columns_to_match_ordered_list", "expect_column_values_to_be_in_type_list", "expect_column_unique_value_count_to_be_between", "expect_column_proportion_of_unique_values_to_be_between", "expect_column_values_to_not_be_null", "expect_column_values_to_be_in_set", "expect_column_values_to_be_unique"]) == set([expectation['expectation_type'] for expectation in expectations_config["expectations"]])
# noinspection PyPep8Naming
def test_BasicDatasetProfiler_with_context(empty_data_context, filesystem_csv_2):
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
not_so_empty_data_context.create_expectation_suite("my_datasource/f1", "default")
batch_kwargs = not_so_empty_data_context.yield_batch_kwargs("my_datasource/f1")
batch = not_so_empty_data_context.get_batch("my_datasource/f1", "default", batch_kwargs)
expectations_config, validation_results = BasicDatasetProfiler.profile(
batch)
# print(batch.get_batch_kwargs())
# print(json.dumps(expectations_config, indent=2))
assert expectations_config["data_asset_name"] == "my_datasource/default/f1"
assert expectations_config["expectation_suite_name"] == "default"
assert "BasicDatasetProfiler" in expectations_config["meta"]
assert set(expectations_config["meta"]["BasicDatasetProfiler"].keys()) == {
"created_by", "created_at", "batch_kwargs"
}
for exp in expectations_config["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert validation_results["meta"]["data_asset_name"] == "my_datasource/default/f1"
assert set(validation_results["meta"].keys()) == {
"great_expectations.__version__", "data_asset_name", "expectation_suite_name", "run_id", "batch_kwargs",
"batch_id"
}
# noinspection PyPep8Naming
def test_context_profiler(empty_data_context, filesystem_csv_2):
"""This just validates that it's possible to profile using the datasource hook, and have
validation results available in the DataContext"""
empty_data_context.add_datasource("my_datasource",
module_name="great_expectations.datasource",
class_name="PandasDatasource",
base_directory=str(filesystem_csv_2))
not_so_empty_data_context = empty_data_context
assert not_so_empty_data_context.list_expectation_suite_keys() == []
not_so_empty_data_context.profile_datasource("my_datasource", profiler=BasicDatasetProfiler)
assert len(not_so_empty_data_context.list_expectation_suite_keys()) == 1
profiled_expectations = not_so_empty_data_context.get_expectation_suite('f1', "BasicDatasetProfiler")
print(json.dumps(profiled_expectations, indent=2))
for exp in profiled_expectations["expectations"]:
assert "BasicDatasetProfiler" in exp["meta"]
assert "confidence" in exp["meta"]["BasicDatasetProfiler"]
assert profiled_expectations["data_asset_name"] == "my_datasource/default/f1"
assert profiled_expectations["expectation_suite_name"] == "BasicDatasetProfiler"
assert "batch_kwargs" in profiled_expectations["meta"]["BasicDatasetProfiler"]
assert len(profiled_expectations["expectations"]) > 0
# noinspection PyPep8Naming
def test_BasicDatasetProfiler_on_titanic():
"""
A snapshot test for BasicDatasetProfiler.
We are running the profiler on the Titanic dataset
and comparing the EVRs to ones retrieved from a
previously stored file.
"""
df = ge.read_csv("./tests/test_sets/Titanic.csv")
suite, evrs = df.profile(BasicDatasetProfiler)
# Check to make sure BasicDatasetProfiler is adding meta.columns with a single "description" field for each column
print(json.dumps(suite["meta"], indent=2))
assert "columns" in suite["meta"]
for k,v in suite["meta"]["columns"].items():
assert v == {"description": ""}
# Note: the above already produces an EVR; rerunning isn't strictly necessary just for EVRs
evrs = df.validate(result_format="SUMMARY") # ["results"]
# with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
#
# with open('tests/render/fixtures/BasicDatasetProfiler_evrs.json', 'w+') as file:
# file.write(json.dumps(evrs, indent=2))
with open('tests/test_sets/expected_evrs_BasicDatasetProfiler_on_titanic.json', 'r') as file:
expected_evrs = json.load(file, object_pairs_hook=OrderedDict)
expected_evrs.pop("meta")
evrs.pop("meta")
# We know that python 2 does not guarantee the order of value_counts, which causes a different
# order for items in the partial_unexpected_value_counts list
# Remove those before test.
for result in evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
for result in expected_evrs["results"]:
if "partial_unexpected_counts" in result["result"]:
result["result"].pop("partial_unexpected_counts")
# DISABLE TEST IN PY2 BECAUSE OF ORDER ISSUE AND NEAR-EOL
if not PY2:
assertDeepAlmostEqual(expected_evrs, evrs)
| 48.639405 | 490 | 0.747249 | [
"Apache-2.0"
] | AdamHepner/great_expectations | tests/profile/test_profile.py | 13,084 | Python |
import os
import codecs
from busSchedules import schedule1B
from busSchedules import schedule2
from busSchedules import schedule3
from busSchedules import schedule4
from busSchedules import schedule5
from busSchedules import schedule6
from busZonesTimes import busZonesTimesOne
from busZonesTimes import busZonesTimesOneB
from busZonesTimes import busZonesTimesTwo
from busZonesTimes import busZonesTimesThree
from busZonesTimes import busZonesTimesFour
from busZonesTimes import busZonesTimesFive
from busZonesTimes import busZonesTimesSix
from busZonesTimes import busZonesTimesOneSaturday
from busZonesTimes import busZonesTimesOneBSaturday
from busZonesTimes import busZonesTimesTwoSaturday
from busZonesTimes import busZonesTimesThreeSaturday
from busZonesTimes import busZonesTimesFourSaturday
from busZonesTimes import busZonesTimesFiveSaturday
from busZonesTimes import busZonesTimesSixSaturday
from busZonesTimes import busZonesTimesOneSunday
from busZonesTimes import busZonesTimesOneBSunday
from busZonesTimes import busZonesTimesTwoSaturday
from busZonesTimes import busZonesTimesThreeSunday
from busZonesTimes import busZonesTimesFourSunday
from busZonesTimes import busZonesTimesFiveSunday
from busZonesTimes import busZonesTimesSixSunday
from busRoutes import lineOne
from busRoutes import lineOneB
from busRoutes import lineTwo
from busRoutes import lineThree
from busRoutes import lineFour
from busRoutes import lineFive
from busRoutes import lineSix
from busRoutes import line242
from busStops import busStopsDict
from busStops import linesDict
from datetime import datetime
from flask_caching import Cache
from flask import Flask, send_from_directory, jsonify
from bs4 import BeautifulSoup
VERSION = "1.0"
CACHE_TIMEOUT_SECONDS = os.getenv('CACHE_TIMEOUT', 3600)
GIT_REPO_URL = 'https://github.com/NazarenoCavazzon/BlueAPI'
DOLAR_URL = 'https://www.paralelohoy.com.ar/p/cotizacion-dolar-hoy-argentina.html'
EURO_URL = 'https://www.paralelohoy.com.ar/p/cotizacion-euro-hoy-argentina.html'
REAL_URL = 'https://www.paralelohoy.com.ar/p/cotizacion-real-hoy-argentina.html'
# Create a class called BusStop that will take line, name, address, latitude and longitude.
class BusStop:
def __init__(self, line, name, address, latitude, longitude):
self.line = line
self.name = name
self.address = address
self.latitude = latitude
self.longitude = longitude
def getValues(url):
import requests
html_source = requests.get(url).text
soup = BeautifulSoup(html_source, 'lxml')
table = soup.find("table")
span = table.tbody.text
splittedSpan = span.split("\n")
splittedSpan = filter(None, splittedSpan)
list = []
for x in splittedSpan:
value = []
value = x.split(":")[1].split("$")
value.pop(0)
list.append(value)
return list
def formatResponse(value):
return {
"fecha": datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
"compra" : f"{value[0]}",
"venta" : f"{value[1]}"
}
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
@app.route("/favicon.ico")
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.ico')
@app.route("/")
def getRoot():
html = ""
with codecs.open('index.html', "r", "utf-8") as f:
codeHTML = f.read()
for element in codeHTML:
if element == "¡":
element = VERSION
html += element
elif element == "ñ":
element = GIT_REPO_URL
html += element
else:
html += element
return html
@app.route("/api/ping")
def ping():
return "pong"
@app.route("/api/dolar/oficial")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getDolarOficial():
dolarValues = getValues(DOLAR_URL)
dolarOficial = formatResponse(dolarValues[0])
return jsonify(dolarOficial)
@app.route("/api/dolar/blue")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getDolarBlue():
dolarValues = getValues(DOLAR_URL)
dolarBlue = formatResponse(dolarValues[1])
return jsonify(dolarBlue)
@app.route("/api/euro/oficial")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getEuroOficial():
euroValues = getValues(EURO_URL)
euroOficial = formatResponse(euroValues[0])
return jsonify(euroOficial)
@app.route("/api/euro/blue")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getEuroBlue():
euroValues = getValues(EURO_URL)
euroBlue = formatResponse(euroValues[1])
return jsonify(euroBlue)
@app.route("/api/real/oficial")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getRealOficial():
realValues = getValues(REAL_URL)
realOficial = formatResponse(realValues[0])
return jsonify(realOficial)
@app.route("/api/real/blue")
@cache.cached(timeout=CACHE_TIMEOUT_SECONDS)
def getRealBlue():
realValues = getValues(REAL_URL)
realBlue = formatResponse(realValues[1])
return jsonify(realBlue)
@app.route("/api/busstops")
def getBusStops():
return jsonify(busStopsDict)
@app.route("/api/1")
def getLine1():
return jsonify(lineOne)
@app.route("/api/1B")
def getLine1B():
return jsonify(lineOneB)
@app.route("/api/2")
def getLine2():
return jsonify(lineTwo)
@app.route("/api/3")
def getLine3():
return jsonify(lineThree)
@app.route("/api/4")
def getLine4():
return jsonify(lineFour)
@app.route("/api/5")
def getLine5():
return jsonify(lineFive)
@app.route("/api/6")
def getLine6():
return jsonify(lineSix)
@app.route("/api/linesDict")
def getLines():
return jsonify(linesDict)
# Horarios por ZONA ============================================================
@app.route("/api/busZonesTimes/1")
def getBusZonesOne():
return jsonify(busZonesTimesOne)
@app.route("/api/busZonesTimes/1B")
def getBusZonesOneB():
return jsonify(busZonesTimesOneB)
@app.route("/api/busZonesTimes/2")
def getBusZonesTwo():
return jsonify(busZonesTimesTwo)
@app.route("/api/busZonesTimes/3")
def getBusZonesThree():
return jsonify(busZonesTimesThree)
@app.route("/api/busZonesTimes/4")
def getBusZonesFour():
return jsonify(busZonesTimesFour)
@app.route("/api/busZonesTimes/5")
def getBusZonesFive():
return jsonify(busZonesTimesFive)
@app.route("/api/busZonesTimes/6")
def getBusZonesSix():
return jsonify(busZonesTimesSix)
# Horarios por ZONA Domingo ============================================================
@app.route("/api/busZonesTimes/1/sunday")
def getBusZonesOneSunday():
return jsonify(busZonesTimesOneSunday)
@app.route("/api/busZonesTimes/1B/sunday")
def getBusZonesOneBSunday():
return jsonify(busZonesTimesOneBSunday)
@app.route("/api/busZonesTimes/2/sunday")
def getBusZonesTwoSunday():
return jsonify(busZonesTimesTwoSunday)
@app.route("/api/busZonesTimes/3/sunday")
def getBusZonesThreeSunday():
return jsonify(busZonesTimesThreeSunday)
@app.route("/api/busZonesTimes/4/sunday")
def getBusZonesFourSunday():
return jsonify(busZonesTimesFourSunday)
@app.route("/api/busZonesTimes/5/sunday")
def getBusZonesFiveSunday():
return jsonify(busZonesTimesFiveSunday)
@app.route("/api/busZonesTimes/6/sunday")
def getBusZonesSixSunday():
return jsonify(busZonesTimesSixSunday)
# Horarios por ZONA Sabado ============================================================
@app.route("/api/busZonesTimes/1/saturday")
def getBusZonesOneSaturday():
return jsonify(busZonesTimesOneSaturday)
@app.route("/api/busZonesTimes/1B/saturday")
def getBusZonesOneBSaturday():
return jsonify(busZonesTimesOneBSaturday)
@app.route("/api/busZonesTimes/2/saturday")
def getBusZonesTwoSaturday():
return jsonify(busZonesTimesTwoSaturday)
@app.route("/api/busZonesTimes/3/saturday")
def getBusZonesThreeSaturday():
return jsonify(busZonesTimesThreeSaturday)
@app.route("/api/busZonesTimes/4/saturday")
def getBusZonesFourSaturday():
return jsonify(busZonesTimesFourSaturday)
@app.route("/api/busZonesTimes/5/saturday")
def getBusZonesFiveSaturday():
return jsonify(busZonesTimesFiveSaturday)
@app.route("/api/busZonesTimes/6/saturday")
def getBusZonesSixSaturday():
return jsonify(busZonesTimesSixSaturday)
# Botones ============================================================
@app.route("/api/gmaps")
def getGMaps():
return jsonify("https://www.google.com/maps/d/u/0/viewer?mid=1d5o2MklEFr0DpG_i_mRwcUd9yjc&ll=-31.654431124663883%2C-64.43315245330842&z=15")
@app.route("/api/donacion")
def getDonationPage():
return jsonify("https://cafecito.app/paragracia")
# Horarios de las lineas de las semanas ============================================================
@app.route("/api/1B/schedule")
def get1Bchedule():
return jsonify(schedule1B)
@app.route("/api/2/schedule")
def get2chedule():
return jsonify(schedule2)
@app.route("/api/3/schedule")
def get3chedule():
return jsonify(schedule3)
@app.route("/api/4/schedule")
def get4chedule():
return jsonify(schedule4)
@app.route("/api/5/schedule")
def get5chedule():
return jsonify(schedule5)
@app.route("/api/6/schedule")
def get6chedule():
return jsonify(schedule6)
# Horarios de las lineas de los fines de semana ============================================================
"""
@app.route("/api/1B/schedule/saturday")
def get1Bchedule():
return jsonify(schedule1B)
@app.route("/api/2/schedule/saturday")
def get2chedule():
return jsonify(schedule2)
@app.route("/api/3/schedule/saturday")
def get3chedule():
return jsonify(schedule3)
@app.route("/api/4/schedule/saturday")
def get4chedule():
return jsonify(schedule4)
@app.route("/api/5/schedule/saturday")
def get5chedule():
return jsonify(schedule5)
@app.route("/api/6/schedule/saturday")
def get6chedule():
return jsonify(schedule6)
"""
if __name__ == '__main__':
app.run(debug=False, port=os.getenv('PORT', 5000))
| 29.153409 | 145 | 0.693432 | [
"MIT"
] | NazarenoCavazzon/BlueAPI | app.py | 10,264 | Python |
"""
Concatenate the labels with the notes data and split using the saved splits
"""
import csv
from datetime import datetime
import random
from constants import DATA_DIR
from constants import MIMIC_3_DIR
import pandas as pd
DATETIME_FORMAT = "%Y-%m-%d %H-%M-%S"
def concat_data(labelsfile, notes_file):
"""
INPUTS:
labelsfile: sorted by hadm id, contains one label per line
notes_file: sorted by hadm id, contains one note per line
"""
with open(labelsfile, 'r') as lf:
print("CONCATENATING")
with open(notes_file, 'r') as notesfile:
outfilename = '%s/notes_labeled.csv' % MIMIC_3_DIR
with open(outfilename, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS'])
labels_gen = next_labels(lf)
notes_gen = next_notes(notesfile)
for i, (subj_id, text, hadm_id) in enumerate(notes_gen):
if i % 10000 == 0:
print(str(i) + " done")
cur_subj, cur_labels, cur_hadm = next(labels_gen)
if cur_hadm == hadm_id:
w.writerow([subj_id, str(hadm_id), text, ';'.join(cur_labels)])
else:
print("couldn't find matching hadm_id. data is probably not sorted correctly")
break
return outfilename
def split_data(labeledfile, base_name):
print("SPLITTING2")
#create and write headers for train, dev, test
train_name = '%s_train_split.csv' % (base_name)
dev_name = '%s_dev_split.csv' % (base_name)
test_name = '%s_test_split.csv' % (base_name)
train_file = open(train_name, 'w')
dev_file = open(dev_name, 'w')
test_file = open(test_name, 'w')
train_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
dev_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
test_file.write(','.join(['SUBJECT_ID', 'HADM_ID', 'TEXT', 'LABELS']) + "\n")
hadm_ids = {}
#read in train, dev, test splits
for splt in ['train', 'dev', 'test']:
hadm_ids[splt] = set()
with open('%s/%s_full_hadm_ids.csv' % (MIMIC_3_DIR, splt), 'r') as f:
for line in f:
hadm_ids[splt].add(line.rstrip())
with open(labeledfile, 'r') as lf:
reader = csv.reader(lf)
next(reader)
i = 0
cur_hadm = 0
for row in reader:
#filter text, write to file according to train/dev/test split
if i % 10000 == 0:
print(str(i) + " read")
if len(row) > 0: # windows fix
hadm_id = row[1]
if hadm_id in hadm_ids['train']:
train_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['dev']:
dev_file.write(','.join(row) + "\n")
elif hadm_id in hadm_ids['test']:
test_file.write(','.join(row) + "\n")
i += 1
train_file.close()
dev_file.close()
test_file.close()
return train_name, dev_name, test_name
def next_labels(labelsfile):
"""
Generator for label sets from the label file
"""
labels_reader = csv.reader(labelsfile)
#header
next(labels_reader)
first_label_line = next(labels_reader)
cur_subj = int(first_label_line[0])
cur_hadm = int(first_label_line[1])
cur_labels = [first_label_line[2]]
for row in labels_reader:
subj_id = int(row[0])
hadm_id = int(row[1])
code = row[2]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_labels, cur_hadm
cur_labels = [code]
cur_subj = subj_id
cur_hadm = hadm_id
else:
#add to the labels and move on
cur_labels.append(code)
yield cur_subj, cur_labels, cur_hadm
def next_notes(notesfile):
"""
Generator for notes from the notes file
This will also concatenate discharge summaries and their addenda, which have the same subject and hadm id
"""
nr = csv.reader(notesfile)
#header
next(nr)
first_note = next(nr)
cur_subj = int(first_note[0])
cur_hadm = int(first_note[1])
cur_text = first_note[3]
for row in nr:
subj_id = int(row[0])
hadm_id = int(row[1])
text = row[3]
#keep reading until you hit a new hadm id
if hadm_id != cur_hadm or subj_id != cur_subj:
yield cur_subj, cur_text, cur_hadm
cur_text = text
cur_subj = subj_id
cur_hadm = hadm_id
else:
#concatenate to the discharge summary and move on
cur_text += " " + text
yield cur_subj, cur_text, cur_hadm
| 28.544872 | 113 | 0.628116 | [
"MIT"
] | franzbischoff/caml-mimic | dataproc/concat_and_split.py | 4,453 | Python |
import os
import json
import logging
from dataclasses import dataclass, field
from typing import Dict, Optional, Callable
import torch
import wandb
import numpy as np
from tqdm.auto import tqdm
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
from transformers import (
Trainer,
TrainingArguments,
EvalPrediction,
DataCollator,
DefaultDataCollator,
)
from transformers.trainer_utils import PredictionOutput
from transformers.training_args import is_tpu_available
from src.data.task_data_processors import task_output_modes
from src.data.data_utils import compute_task_metrics
if is_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
logger = logging.getLogger(__name__)
@dataclass
class MultiTaskTrainingArguments(TrainingArguments):
use_mt_uncertainty: bool = field(
default=False,
metadata={"help": "Use MT-Uncertainty sampling method"},
)
uniform_mt_sampling: bool = field(
default=False,
metadata={"help": "Sample each task an equal amount to times per epoch."},
)
percent_of_max_data_size: float = field(
default=1.0,
metadata={
"help": "If uniform_mt_sampling=True, specify the samples per task per "
"epoch based on the maximum dataset length. If below 0.0 or above 1.0,"
"it will be set to the closest of 0.0 or 1.0."
},
)
class MultiTaskTrainer(Trainer):
def __init__(
self,
tokenizer,
data_args,
eval_datasets=None,
test_datasets=None,
*args,
**kwargs,
):
super(MultiTaskTrainer, self).__init__(*args, **kwargs)
self.tokenizer = tokenizer
self.data_args = data_args
self.eval_datasets = eval_datasets
self.test_datasets = test_datasets
# self.data_collator = DefaultDataCollator()
def get_train_dataloader(self) -> DataLoader:
if self.args.use_mt_uncertainty:
return self._create_custom_dataloader()
else:
return super().get_train_dataloader()
def _create_custom_dataloader(self):
class MtUcertaintyIterator:
"""Sample tasks using uncertainty measure."""
def __init__(self, my_loader):
self.my_loader = my_loader
self.loader_iters = [iter(loader) for loader in self.my_loader.loaders]
self.loader_iter_sizes = [len(i) for i in self.loader_iters]
self.max_count = len(self.my_loader)
self.batch_count = 0
def __iter__(self):
return self
def __next__(self):
if self.batch_count == self.max_count:
self.batch_count = 0
raise StopIteration()
test_batch = {}
for idx, loader_iter in enumerate(self.loader_iters):
try:
batch = loader_iter.__next__()
except StopIteration:
new_loader_iter = iter(self.my_loader.loaders[idx])
self.loader_iters[idx] = new_loader_iter
batch = new_loader_iter.__next__()
test_batch = self.batchify_data(batch, test_batch)
inputs = {}
for k, v in test_batch.items():
if k not in ["labels"]:
inputs[k] = v.detach().to(self.my_loader.args.device)
with torch.no_grad():
model.select_batch_mode = True
outputs = model(**inputs)
model.select_batch_mode = False
(
test_batch_entropy,
test_batch_entropy_mean,
max_mean_batch_entropy,
) = outputs[-3:]
for _, v in inputs.items():
del v # free GPU mem
del inputs
test_batch_entropy_mean = (
test_batch_entropy_mean / max_mean_batch_entropy
)
test_batch_entropy = test_batch_entropy * test_batch_entropy_mean
select_size = min(
self.my_loader.args.train_batch_size,
test_batch["input_ids"].shape[0],
) # Handled the last batch if it is lower than the batch size
top_entropy = torch.topk(test_batch_entropy, select_size)
for k, v in test_batch.items():
test_batch[k] = torch.index_select(v, 0, top_entropy.indices)
self.batch_count += 1
return test_batch
@staticmethod
def batchify_data(data, curr_batch):
for k in data.keys():
if k in curr_batch.keys():
curr_batch[k] = torch.cat((curr_batch[k], data[k]), dim=0)
else:
curr_batch[k] = data[k]
return curr_batch
class CustomLoader:
def __init__(self, loaders, datasets, loader_args):
self.loaders = loaders
self.dataset = datasets
self.args = loader_args
self.current_epoch = 0
def __iter__(self):
iterator = MtUcertaintyIterator(self)
# for determinism across runs
# https://github.com/pytorch/examples/issues/501
for l in self.loaders:
if isinstance(l.sampler, DistributedSampler):
l.sampler.set_epoch(self.current_epoch)
self.current_epoch += 1
return iterator
def __len__(self):
loader_len = [len(loader) for loader in self.loaders]
if self.args.uniform_mt_sampling:
return int(
self.args.percent_of_max_data_size
* max(loader_len)
* len(self.loaders)
/ self.args.train_batch_size
)
elif self.args.uncert_batch:
return int(
max(loader_len)
* len(self.loaders)
* self.args.percent_of_max_data_size
)
else:
return sum(loader_len)
model = self.model
tasks = self.data_args.tasks
data_loaders = []
for dataset in self.train_dataset.datasets:
train_sampler = (
RandomSampler(dataset)
if self.args.local_rank == -1
else DistributedSampler(dataset)
)
data_loader = DataLoader(
dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator.collate_batch,
)
data_loaders.append(data_loader)
return CustomLoader(data_loaders, self.train_dataset, self.args)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
context: str = None,
do_test_if_needed: bool = True,
):
datasets = eval_dataset or self.eval_datasets
logger.info("*** Evaluate on dev ***")
for task_name, eval_dataset in datasets.items():
logger.info(task_name)
self.compute_metrics = self.build_compute_metrics_fn(eval_dataset)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_result = self._prediction_loop(
eval_dataloader, description="Evaluation", task_name=task_name,
mode=eval_dataset.mode)
self._log(eval_result.metrics)
for key, value in eval_result.metrics.items():
logger.info(" %s = %s", key, value)
if self.args.tpu_metrics_debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
def predict(
self,
eval_dataset: Optional[Dataset] = None,
prediction_loss_only: Optional[bool] = None,
scoring_model: Optional[str] = None
):
logging.info("*** Test ***")
datasets = eval_dataset or self.test_datasets
for task_name, test_dataset in datasets.items():
logger.info(task_name)
test_dataloader = self.get_test_dataloader(test_dataset)
test_result = self._prediction_loop(
test_dataloader, description="Prediction", task_name=task_name,
mode=test_dataset.mode)
self._log(test_result.metrics)
for key, value in test_result.metrics.items():
logger.info(" %s = %s", key, value)
softmax = torch.nn.Softmax(dim=1)
probs = softmax(torch.Tensor(test_result.predictions)).numpy().astype('float64')
logits = test_result.predictions.astype('float64')
output_mode = task_output_modes[task_name]
if output_mode == "classification":
predictions = np.argmax(logits, axis=1)
self.run_name = wandb.run.name
output_test_file = os.path.join(
self.args.output_dir,
f"{task_name}_test_iter_{self.run_name}.tsv",
)
if scoring_model is None:
scoring_model = self.run_name
if self.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(task_name))
logger.info("***** Writing as {} *****".format(self.run_name))
if output_mode == "regression":
writer.write("index\tprediction\n")
else:
writer.write("index\tscoring_model\tprediction\tprobability\tlogits\n")
for index, item in enumerate(predictions):
if output_mode == "regression":
writer.write("%d\t%3.3f\n" % (index, item))
else:
i_probs = probs[index,:]
i_logits = logits[index,:]
i_logits = json.dumps(dict(zip(test_dataset.get_labels(), i_logits)))
writer.write(
"%d\t%s\t%s\t%3.6f\t%s\n" % (
index, scoring_model, test_dataset.get_labels()[item],
i_probs[item], i_logits)
)
def _prediction_loop(
self, dataloader: DataLoader, description: str, task_name: str, mode: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
"""
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else self.prediction_loss_only
model = self.model
# multi-gpu eval
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
else:
model = self.model
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", self.num_examples(dataloader))
logger.info(" Batch size = %d", batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader,
[self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(
inputs.get(k) is not None for k in ["labels", "lm_labels", "masked_lm_labels"])
for k, v in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
step_eval_loss, logits = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if not prediction_loss_only:
if preds is None:
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if inputs.get("labels") is not None:
if label_ids is None:
label_ids = inputs["labels"].detach()
else:
label_ids = torch.cat((label_ids, inputs["labels"].detach()), dim=0)
if self.args.local_rank != -1:
# In distributed mode, concatenate all results from all nodes:
if preds is not None:
preds = self.distributed_concat(preds,
num_total_examples=self.num_examples(dataloader))
if label_ids is not None:
label_ids = self.distributed_concat(label_ids,
num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
# tpu-comment: Get all predictions and labels from all worker shards of eval dataset
if preds is not None:
preds = xm.mesh_reduce("eval_preds", preds, torch.cat)
if label_ids is not None:
label_ids = xm.mesh_reduce("eval_label_ids", label_ids, torch.cat)
# Finally, turn the aggregated tensors into numpy arrays.
if preds is not None:
preds = preds.cpu().numpy()
if label_ids is not None:
label_ids = label_ids.cpu().numpy()
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if len(eval_losses) > 0:
metrics[f"{task_name}_{mode}_loss"] = np.mean(eval_losses)
# Prefix all keys with {task_name}_{model}_
for key in list(metrics.keys()):
if not key.startswith(f"{task_name}_{mode}_"):
metrics[f"{task_name}_{mode}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
@staticmethod
def build_compute_metrics_fn(
eval_dataset
) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
return compute_task_metrics(eval_dataset.task_name, p)
return compute_metrics_fn
| 38.967742 | 118 | 0.550369 | [
"MIT"
] | Daupler/CA-MTL | src/mtl_trainer.py | 15,704 | Python |
# coding: utf-8
"""
Influx API Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from influxdb_client.api_client import ApiClient
class HealthService(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_health(self, **kwargs): # noqa: E501
"""Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_health_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_health_with_http_info(**kwargs) # noqa: E501
return data
def get_health_with_http_info(self, **kwargs): # noqa: E501
"""Get the health of an instance anytime during execution. Allow us to check if the instance is still healthy. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_health_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: HealthCheck
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_health" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/health', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='HealthCheck', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 34.153226 | 132 | 0.629988 | [
"MIT"
] | rhajek/influxdb-client-python | influxdb_client/service/health_service.py | 4,235 | Python |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateSpecialistPool
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async]
from google.cloud import aiplatform_v1
async def sample_create_specialist_pool():
# Create a client
client = aiplatform_v1.SpecialistPoolServiceAsyncClient()
# Initialize request argument(s)
specialist_pool = aiplatform_v1.SpecialistPool()
specialist_pool.name = "name_value"
specialist_pool.display_name = "display_name_value"
request = aiplatform_v1.CreateSpecialistPoolRequest(
parent="parent_value",
specialist_pool=specialist_pool,
)
# Make the request
operation = client.create_specialist_pool(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_SpecialistPoolService_CreateSpecialistPool_async]
| 33.218182 | 85 | 0.767378 | [
"Apache-2.0"
] | TheMichaelHu/python-aiplatform | samples/generated_samples/aiplatform_v1_generated_specialist_pool_service_create_specialist_pool_async.py | 1,827 | Python |
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import argparse
from os.path import join as pjoin
import numpy as np
import networkx as nx
from textworld.render import visualize
from textworld.generator import Game
from textworld.generator.inform7 import Inform7Game
from textworld.generator.chaining import ChainingOptions
from textworld.generator.chaining import sample_quest
from textworld.utils import save_graph_to_svg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("game",
help="Use initial state of the provided game.")
parser.add_argument("--output", default="./",
help="Output folder where to sample the images. Default: %(default)s")
parser.add_argument("--quest-length", type=int, default=5,
help="Minimum nb. of actions required to complete the quest. Default: %(default)s")
parser.add_argument("--quest-breadth", type=int, default=1,
help="Control how non-linear a quest can be.")
parser.add_argument("--nb-quests", type=int, default=10,
help="Number of quests to sample. Default: %(default)s")
parser.add_argument("--seed", type=int,
help="Seed for random generator. Default: always different.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print more information.")
return parser.parse_args()
def build_tree_from_chains(chains, inform7):
G = nx.DiGraph()
root = "root"
labels = {}
for chain in chains:
commands = [root] + inform7.gen_commands_from_actions(chain.actions)
G.add_nodes_from(commands)
G.add_edges_from(zip(commands[:-1], commands[1:]))
labels.update(dict(zip(commands, commands)))
return G, labels
def print_chains(chains, inform7):
for i, chain in enumerate(chains):
commands = inform7.gen_commands_from_actions(chain.actions)
print("{:2d}. {}".format(i + 1, " > ".join(commands)))
def main():
args = parse_args()
# Load game for which to sample quests for.
game = Game.load(args.game.replace(".ulx", ".json"))
options = ChainingOptions()
options.backward = False
options.max_depth = args.quest_length
options.max_breadth = args.quest_breadth
options.rules_per_depth = {}
options.create_variables = False
options.rng = np.random.RandomState(args.seed)
# Sample quests.
chains = []
for i in range(args.nb_quests):
chain = sample_quest(game.world.state, options)
chains.append(chain)
inform7 = Inform7Game(game)
print_chains(chains, inform7)
# Convert chains to networkx graph/tree
filename_world = pjoin(args.output, "sample_world.png")
filename_tree = pjoin(args.output, "sample_tree.svg")
filename_graph = pjoin(args.output, "sample_graph.svg")
G, labels = build_tree_from_chains(chains, inform7)
if len(G) > 0:
image = visualize(game)
image.save(filename_world)
tree = nx.bfs_tree(G, "root")
save_graph_to_svg(tree, labels, filename_tree)
save_graph_to_svg(G, labels, filename_graph)
else:
try:
os.remove(filename_world)
os.remove(filename_tree)
os.remove(filename_graph)
except OSError:
pass
if __name__ == "__main__":
main()
| 32.95283 | 107 | 0.658746 | [
"MIT"
] | Bhaskers-Blu-Org2/TextWorld | scripts/sample_quests.py | 3,493 | Python |
#modo indireto
'''import math
num = int(input('Digite um número: '))
raiz = math.sqrt(num)
print('A raiz de {} é {}'.format(num, math.ceil(raiz)))'''
#modo direto
from math import sqrt, floor
num = int(input('Digite um número:'))
raiz = sqrt(num)
print('A raiz de {} é {:.2f}'.format(num, floor(raiz)))
| 25.333333 | 58 | 0.651316 | [
"MIT"
] | MarcelaSamili/Desafios-do-curso-de-Python | Python/PycharmProjects/aula 8/1.py | 308 | Python |
# -------------------------------------------------------------------------
# Copyright (c) 2015-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import json
from collections import defaultdict
import itertools
from osdf.utils.programming_utils import dot_notation, list_flatten
def group_policies_gen(flat_policies, config):
"""Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies
"""
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]["type"]] # drop ones without 'type'
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [x if isinstance(x, list) else [x] for x in attrs]
attributes = [list_flatten(x) if isinstance(x, list) else x for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
#aggregated_policies[key].sort(key=lambda x: x['priority'], reverse=True)
prioritized_policy = aggregated_policies[key][0]
if list(prioritized_policy.keys())[0] not in policy_name:
# TODO: Check logic here... should policy appear only once across all groups?
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
def policy_name_as_regex(policy_name):
"""Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*
"""
p = policy_name.partition('.')
return p[0] + p[1] + ".*" + p[2] + ".*"
def retrieve_node(req_json, reference):
"""
Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON.
"""
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return list_flatten(info) if isinstance(info, list) else info
| 46.371795 | 123 | 0.685651 | [
"Apache-2.0"
] | onap/optf-osdf | osdf/adapters/policy/utils.py | 3,617 | Python |
#!/usr/bin/env python
#
# File Name : ptbtokenizer.py
#
# Description : Do the PTB Tokenization and remove punctuations.
#
# Creation Date : 29-12-2014
# Last Modified : Thu Mar 19 09:53:35 2015
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
import os
import sys
import subprocess
import tempfile
import itertools
# path to the stanford corenlp jar
STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
# punctuations to be removed from the sentences
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
class PTBTokenizer:
"""Python wrapper of Stanford PTBTokenizer"""
def tokenize(self, captions_for_image):
cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
'edu.stanford.nlp.process.PTBTokenizer', \
'-preserveLines', '-lowerCase']
# ======================================================
# prepare data for PTB Tokenizer
# ======================================================
final_tokenized_captions_for_image = {}
image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
sentences = '\n'.join([c['caption'].replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
# ======================================================
# save sentences to temporary file
# ======================================================
path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False, dir=path_to_jar_dirname)
tmp_file.write(sentences)
tmp_file.close()
# ======================================================
# tokenize sentence
# ======================================================
cmd.append(os.path.basename(tmp_file.name))
p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
stdout=subprocess.PIPE)
token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
lines = token_lines.decode().split('\n')
# remove temp file
os.remove(tmp_file.name)
# ======================================================
# create dictionary for tokenized captions
# ======================================================
for k, line in zip(image_id, lines):
if not k in final_tokenized_captions_for_image:
final_tokenized_captions_for_image[k] = []
tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
final_tokenized_captions_for_image[k].append(tokenized_caption)
return final_tokenized_captions_for_image
| 40.971014 | 114 | 0.520693 | [
"MIT"
] | JaywongWang/DenseVideoCaptioning | densevid_eval-master/coco-caption/pycocoevalcap/tokenizer/ptbtokenizer.py | 2,827 | Python |
""" Implementation of mooda.read_pkl(path) """
import pickle
from .. import WaterFrame
def read_pkl(path_pkl):
"""
Get a WaterFrame from a pickle file.
Parameters
----------
path_pkl: str
Location of the pickle file.
Returns
-------
wf_pkl: WaterFrame
"""
wf_pkl = WaterFrame()
pickle_dataset = pickle.load(open(path_pkl, "rb"))
wf_pkl.data = pickle_dataset.get('data')
wf_pkl.vocabulary = pickle_dataset.get('vocabulary')
wf_pkl.metadata = pickle_dataset.get('metadata')
return wf_pkl
| 22.148148 | 57 | 0.600334 | [
"MIT"
] | rbardaji/mooda | mooda/input/read_pkl.py | 598 | Python |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: requirement_instance.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from console_gateway_sdk.model.topboard import issue_pb2 as console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='requirement_instance.proto',
package='tuna_service',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_service'),
serialized_pb=_b('\n\x1arequirement_instance.proto\x12\x0ctuna_service\x1a.console_gateway_sdk/model/topboard/issue.proto\x1a\x1cgoogle/protobuf/struct.proto\"\x99\x02\n\x13RequirementInstance\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x10\n\x08sequence\x18\x03 \x01(\t\x12\r\n\x05given\x18\x04 \x01(\t\x12\x0c\n\x04when\x18\x05 \x01(\t\x12\x0c\n\x04then\x18\x06 \x01(\t\x12\x0c\n\x04type\x18\x07 \x01(\t\x12\x17\n\x0f\x64\x61taDescription\x18\x08 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\t \x01(\t\x12\x0b\n\x03tag\x18\n \x01(\t\x12\x15\n\rinterfaceName\x18\x0b \x01(\t\x12*\n\tcontracts\x18\x0c \x03(\x0b\x32\x17.google.protobuf.Struct\x12\x1e\n\x05ISSUE\x18\r \x03(\x0b\x32\x0f.topboard.IssueBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/tuna_serviceb\x06proto3')
,
dependencies=[console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_REQUIREMENTINSTANCE = _descriptor.Descriptor(
name='RequirementInstance',
full_name='tuna_service.RequirementInstance',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='tuna_service.RequirementInstance.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='tuna_service.RequirementInstance.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sequence', full_name='tuna_service.RequirementInstance.sequence', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='given', full_name='tuna_service.RequirementInstance.given', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='when', full_name='tuna_service.RequirementInstance.when', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='then', full_name='tuna_service.RequirementInstance.then', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='tuna_service.RequirementInstance.type', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataDescription', full_name='tuna_service.RequirementInstance.dataDescription', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='tuna_service.RequirementInstance.data', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='tuna_service.RequirementInstance.tag', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='interfaceName', full_name='tuna_service.RequirementInstance.interfaceName', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contracts', full_name='tuna_service.RequirementInstance.contracts', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ISSUE', full_name='tuna_service.RequirementInstance.ISSUE', index=12,
number=13, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=123,
serialized_end=404,
)
_REQUIREMENTINSTANCE.fields_by_name['contracts'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_REQUIREMENTINSTANCE.fields_by_name['ISSUE'].message_type = console__gateway__sdk_dot_model_dot_topboard_dot_issue__pb2._ISSUE
DESCRIPTOR.message_types_by_name['RequirementInstance'] = _REQUIREMENTINSTANCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RequirementInstance = _reflection.GeneratedProtocolMessageType('RequirementInstance', (_message.Message,), {
'DESCRIPTOR' : _REQUIREMENTINSTANCE,
'__module__' : 'requirement_instance_pb2'
# @@protoc_insertion_point(class_scope:tuna_service.RequirementInstance)
})
_sym_db.RegisterMessage(RequirementInstance)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 50.360248 | 812 | 0.75555 | [
"Apache-2.0"
] | easyopsapis/easyops-api-python | console_gateway_sdk/model/tuna_service/requirement_instance_pb2.py | 8,108 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# cob documentation build configuration file, created by
# sphinx-quickstart on Sun Jan 7 18:09:10 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
#from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.programoutput',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'COB'
copyright = '2019, Joseph Jeffers, Rob Schaefer'
author = 'Joseph Jeffers, Rob Schaefer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import cob
version = cob.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'globaltoc.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'cobdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'cob.tex', 'cob Documentation',
'Joseph Jeffers, Rob Schaefer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cob', 'cob Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'cob', 'cob Documentation',
author, 'cob', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 30.378238 | 79 | 0.683268 | [
"MIT"
] | C-Pauli/cob | docs/conf.py | 5,863 | Python |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter3d"
_path_str = "scatter3d.textfont"
_valid_props = {"color", "colorsrc", "family", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Textfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter3d.Textfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Textfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 34.427609 | 82 | 0.557653 | [
"MIT"
] | 1abner1/plotly.py | packages/python/plotly/plotly/graph_objs/scatter3d/_textfont.py | 10,225 | Python |
# qubit number=5
# total number=48
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += Z(2) # number=28
prog += H(1) # number=4
prog += RX(2.664070570244145,1) # number=39
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(3) # number=40
prog += Y(4) # number=35
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=25
prog += CZ(1,0) # number=26
prog += H(0) # number=27
prog += H(0) # number=36
prog += CZ(1,0) # number=37
prog += H(0) # number=38
prog += CNOT(1,0) # number=41
prog += CNOT(1,0) # number=45
prog += X(0) # number=46
prog += CNOT(1,0) # number=47
prog += CNOT(1,0) # number=43
prog += CNOT(1,0) # number=34
prog += CNOT(1,0) # number=24
prog += CNOT(0,1) # number=29
prog += CNOT(2,3) # number=44
prog += X(1) # number=30
prog += CNOT(0,1) # number=31
prog += X(2) # number=11
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1068.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.139535 | 64 | 0.546253 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startPyquil1068.py | 2,162 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Microsoft Internet Explorer WebCache database."""
import unittest
from plaso.lib import definitions
from plaso.parsers.esedb_plugins import msie_webcache
from tests.parsers.esedb_plugins import test_lib
class MsieWebCacheESEDBPluginTest(test_lib.ESEDBPluginTestCase):
"""Tests for the MSIE WebCache ESE database plugin."""
# pylint: disable=protected-access
def testConvertHeadersValues(self):
"""Tests the _ConvertHeadersValues function."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
binary_value = (
b'HTTP/1.1 200 OK\r\nContent-Type: image/png\r\n'
b'X-Content-Type-Options: nosniff\r\nContent-Length: 2759\r\n'
b'X-XSS-Protection: 1; mode=block\r\n'
b'Alternate-Protocol: 80:quic\r\n\r\n')
expected_headers_value = (
'[HTTP/1.1 200 OK; Content-Type: image/png; '
'X-Content-Type-Options: nosniff; Content-Length: 2759; '
'X-XSS-Protection: 1; mode=block; '
'Alternate-Protocol: 80:quic]')
headers_value = plugin._ConvertHeadersValues(binary_value)
self.assertEqual(headers_value, expected_headers_value)
def testProcessOnDatabaseWithPartitionsTable(self):
"""Tests the Process function on database with a Partitions table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(['WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 1372)
self.assertEqual(storage_writer.number_of_extraction_warnings, 0)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'container_identifier': 1,
'data_type': 'msie:webcache:containers',
'date_time': '2014-05-12 07:30:25.4861987',
'directory': (
'C:\\Users\\test\\AppData\\Local\\Microsoft\\Windows\\'
'INetCache\\IE\\'),
'name': 'Content',
'set_identifier': 0,
'timestamp_desc': definitions.TIME_DESCRIPTION_LAST_ACCESS}
self.CheckEventValues(storage_writer, events[573], expected_event_values)
def testProcessOnDatabaseWithPartitionsExTable(self):
"""Tests the Process function on database with a PartitionsEx table."""
plugin = msie_webcache.MsieWebCacheESEDBPlugin()
storage_writer = self._ParseESEDBFileWithPlugin(
['PartitionsEx-WebCacheV01.dat'], plugin)
self.assertEqual(storage_writer.number_of_events, 4200)
self.assertEqual(storage_writer.number_of_extraction_warnings, 3)
self.assertEqual(storage_writer.number_of_recovery_warnings, 0)
# The order in which ESEDBPlugin._GetRecordValues() generates events is
# nondeterministic hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'access_count': 5,
'cache_identifier': 0,
'cached_file_size': 726,
'cached_filename': 'b83d57c0[1].svg',
'container_identifier': 14,
'data_type': 'msie:webcache:container',
'date_time': '2019-03-20 17:22:14.0000000',
'entry_identifier': 63,
'sync_count': 0,
'response_headers': (
'[HTTP/1.1 200; content-length: 726; content-type: image/svg+xml; '
'x-cache: TCP_HIT; x-msedge-ref: Ref A: 3CD5FCBC8EAD4E0A80FA41A62'
'FBC8CCC Ref B: PRAEDGE0910 Ref C: 2019-12-16T20:55:28Z; date: '
'Mon, 16 Dec 2019 20:55:28 GMT]'),
'timestamp_desc': definitions.TIME_DESCRIPTION_MODIFICATION,
'url': 'https://www.bing.com/rs/3R/kD/ic/878ca0cd/b83d57c0.svg'}
self.CheckEventValues(storage_writer, events[100], expected_event_values)
if __name__ == '__main__':
unittest.main()
| 39.23 | 80 | 0.702014 | [
"Apache-2.0"
] | ColdSmoke627/plaso | tests/parsers/esedb_plugins/msie_webcache.py | 3,923 | Python |
""" converted from Matlab code
source: http://www.robots.ox.ac.uk/~fwood/teaching/AIMS_CDT_ML_2015/homework/HW_2_em/
"""
import numpy as np
def m_step_gaussian_mixture(data, gamma):
"""% Performs the M-step of the EM algorithm for gaussain mixture model.
%
% @param data : n x d matrix with rows as d dimensional data points
% @param gamma : n x k matrix of resposibilities
%
% @return pi : k x 1 array
% @return mu : k x d matrix of maximized cluster centers
% @return sigma : cell array of maximized
%
"""
n = np.shape(data)[0]
d = np.shape(data)[1]
k = np.shape(gamma)[1]
pi = np.zeros(k)
mu = np.zeros((k, d))
sigma = np.zeros((k, d, d))
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
pi[kk] = Nkk / n
for dd in range(d):
mu[kk, dd] = np.sum(gamma[:, kk] * data[:, dd], axis=0) / Nkk
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
centered_data = data - mu[kk, :]
for nn in range(n):
sigma[kk] += gamma[nn, kk] * np.dot(centered_data[nn, None].T, centered_data[nn, None])
sigma[kk] /= Nkk
return [mu, sigma, pi]
| 29.682927 | 99 | 0.555464 | [
"MIT"
] | leonardbj/AIMS | src/ML_Algorithms/ExpectationMaximization/m_step_gaussian_mixture.py | 1,217 | Python |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
__all__ = [
"wigner3j",
"get_camb_cl",
"scale_dust",
]
def blackbody(nu, ref_freq=353.0):
"""
The ratio of the blackbody function for dust at frequency nu
over the value for reference frequency ref_freq
Arguments
---------
nu : float
Frequency in GHz.
ref_freq : float
Reference frequency in GHz.
Returns
-------
blackbody_ratio : float
B(nu, T_dust) / B(nu_ref, T_dust)
"""
k = 1.38064852e-23 # Boltzmann constant
h = 6.626070040e-34 # Planck constant
T = 19.6
nu_ref = ref_freq * 1.0e9
nu *= 1.0e9 # GHz -> Hz
x = h * nu / k / T
x_ref = h * nu_ref / k / T
return x ** 3 / x_ref ** 3 * (np.exp(x_ref) - 1) / (np.exp(x) - 1)
def rj2cmb(nu_in):
"""
Conversion from Rayleigh-Jeans units to CMB temperature units
Arguments
---------
nu_in : float
Frequency in GHz.
Returns
-------
cal_fac : float
Number by which to multiply a RJ temperature to get a CMB temp
"""
k = 1.38064852e-23 # Boltzmann constant
h = 6.626070040e-34 # Planck constant
T = 2.72548 # Cmb BB temp in K
nu = nu_in * 1.0e9 # GHz -> Hz
x = h * nu / k / T
return (np.exp(x) - 1.0) ** 2 / (x ** 2 * np.exp(x))
def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False):
"""
Get the factor by which you must multiply the cross spectrum from maps of
frequencies freq0 and freq1 to match the dust power at ref_freq given
spectra index beta.
If deriv is True, return the frequency scaling at the reference beta,
and the first derivative w.r.t. beta.
Otherwise if delta_beta is given, return the scale factor adjusted
for a linearized offset delta_beta from the reference beta.
Arguments
---------
freq0 : float
Frequency of map0 in GHz.
freq1 : float
Frequency of map1 in GHz.
ref_freq : float
Reference frequency from which to compute relative scaling in GHz.
beta : float
Dust spectral index.
delta_beta : float
Difference from beta-- scaling computed as a first order Taylor
expansion from original beta-scaling.
deriv : bool
If true, return the frequency scaling at the reference beta, along with
the first derivative w.r.t. beta at the reference beta.
Returns
-------
freq_scale : float
The relative scaling factor for the dust cross spectrum-- multiply by
this number to get the dust spectrum at the reference frequency
-- or --
freq_scale, deriv : floats
The relative scaling factor and its derivative
"""
freq_scale = (
rj2cmb(freq0)
* rj2cmb(freq1)
/ rj2cmb(ref_freq) ** 2.0
* blackbody(freq0, ref_freq=ref_freq)
* blackbody(freq1, ref_freq=ref_freq)
* (freq0 * freq1 / ref_freq ** 2) ** (beta - 2.0)
)
if deriv or delta_beta is not None:
delta = np.log(freq0 * freq1 / ref_freq ** 2)
if deriv:
return (freq_scale, freq_scale * delta)
return freq_scale * (1 + delta * delta_beta)
return freq_scale
def wigner3j(l2, m2, l3, m3):
r"""
Wigner 3j symbols computed for all valid values of ``L``, as in:
.. math::
\begin{pmatrix}
\ell_2 & \ell_3 & L \\
m_2 & m_3 & 0 \\
\end{pmatrix}
Arguments
---------
l2, m2, l3, m3 : int
The ell and m values for which to compute the symbols.
Returns
-------
fj : array_like
Array of size ``l2 + l3 + 2``, indexed by ``L``
lmin : int
The minimum value of ``L`` for which ``fj`` is non-zero.
lmax : int
The maximum value of ``L`` for which ``fj`` is non-zero.
"""
import camb
try:
from camb.mathutils import threej
except ImportError:
from camb.bispectrum import threej
arr = threej(l2, l3, m2, m3)
lmin = np.max([np.abs(l2 - l3), np.abs(m2 + m3)])
lmax = l2 + l3
fj = np.zeros(lmax + 2, dtype=arr.dtype)
fj[lmin : lmax + 1] = arr
return fj, lmin, lmax
def get_camb_cl(r, lmax, nt=None, spec="total", lfac=True):
"""
Compute camb spectrum with tensors and lensing.
Parameter values are from arXiv:1807.06209 Table 1 Plik best fit
Arguments
---------
r : float
Tensor-to-scalar ratio
lmax : int
Maximum ell for which to compute spectra
nt : scalar, optional
Tensor spectral index. If not supplied, assumes
slow-roll consistency relation.
spec : string, optional
Spectrum component to return. Can be 'total', 'unlensed_total',
'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'.
lfac: bool, optional
If True, multiply Cls by ell*(ell+1)/2/pi
Returns
-------
cls : array_like
Array of spectra of shape (lmax + 1, nspec).
Diagonal ordering (TT, EE, BB, TE).
"""
# Set up a new set of parameters for CAMB
import camb
pars = camb.CAMBparams()
# This function sets up CosmoMC-like settings, with one massive neutrino and
# helium set using BBN consistency
pars.set_cosmology(
H0=67.32,
ombh2=0.022383,
omch2=0.12011,
mnu=0.06,
omk=0,
tau=0.0543,
)
ln1010As = 3.0448
pars.InitPower.set_params(As=np.exp(ln1010As) / 1.0e10, ns=0.96605, r=r, nt=nt)
if lmax < 2500:
# This results in unacceptable bias. Use higher lmax, then cut it down
lmax0 = 2500
else:
lmax0 = lmax
pars.set_for_lmax(lmax0, lens_potential_accuracy=2)
pars.WantTensors = True
pars.do_lensing = True
# calculate results for these parameters
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(pars, CMB_unit="muK", raw_cl=not lfac)
totCL = powers[spec][: lmax + 1, :4].T
return totCL
| 27.577273 | 83 | 0.60656 | [
"MIT"
] | SPIDER-CMB/xfaster | xfaster/spec_tools.py | 6,067 | Python |
#!/usr/bin/env python
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, division, print_function
"""Test suite for pickled objects"""
__author__ = "Jérôme Kieffer"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "18/10/2018"
import numpy
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.detectors import detector_factory
from pickle import dumps, loads
import unittest
import logging
logger = logging.getLogger(__name__)
class TestPickle(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestPickle, cls).setUpClass()
cls.ai = AzimuthalIntegrator(1.0, detector="Pilatus100k")
cls.ai.wavelength = 1e-10
cls.npt = 100
cls.data = numpy.random.random(cls.ai.detector.shape)
@classmethod
def tearDownClass(cls):
super(TestPickle, cls).tearDownClass()
cls.data = cls.ai = cls.npt = None
def test_Detector_pickle(self):
det = self.ai.detector # type: Detector
dets = dumps(det)
self.assert_(dets, "pickle works")
rest = loads(dets)
self.assert_(rest, "unpickle works")
self.assertEqual(rest.shape, self.ai.detector.MAX_SHAPE)
# test the binning
mar = detector_factory("RayonixMx225")
mar.guess_binning((2048, 2048))
self.assertEqual(mar.binning, (3, 3), "binning OK")
mars = dumps(mar)
marr = loads(mars)
self.assertEqual(mar.binning, marr.binning, "restored binning OK")
def test_AzimuthalIntegrator_pickle(self):
spectra = self.ai.integrate1d(self.data, self.npt) # force lut generation
ais = dumps(self.ai)
newai = loads(ais) # type: AzimuthalIntegrator
self.assertEqual(newai._cached_array.keys(), self.ai._cached_array.keys())
for key in self.ai._cached_array.keys():
if isinstance(self.ai._cached_array[key], numpy.ndarray):
self.assertEqual(abs(newai._cached_array[key] - self.ai._cached_array[key]).max(), 0,
"key %s is the same" % key)
else:
self.assertEqual(newai._cached_array[key], self.ai._cached_array[key],
"key %s is the same: %s %s" %
(key, newai._cached_array[key], self.ai._cached_array[key]))
for first, second in zip(newai.integrate1d(self.data, self.npt), spectra):
self.assertEqual(abs(first - second).max(), 0, "Spectra are the same")
def test_Calibrant(self):
from pyFAI import calibrant
calibrant = calibrant.CalibrantFactory()('AgBh')
assert dumps(calibrant)
assert loads(dumps(calibrant))
def suite():
loader = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loader(TestPickle))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 38.646018 | 101 | 0.686512 | [
"MIT"
] | Jona-Engel/pyFAI | pyFAI/test/test_pickle.py | 4,371 | Python |
import string
import random
from functools import wraps
from urllib.parse import urlencode
from seafileapi.exceptions import ClientHttpError, DoesNotExist
def randstring(length=0):
if length == 0:
length = random.randint(1, 30)
return ''.join(random.choice(string.lowercase) for i in range(length))
def urljoin(base, *args):
url = base
if url[-1] != '/':
url += '/'
for arg in args:
arg = arg.strip('/')
url += arg + '/'
if '?' in url:
url = url[:-1]
return url
def raise_does_not_exist(msg):
"""Decorator to turn a function that get a http 404 response to a
:exc:`DoesNotExist` exception."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ClientHttpError as e:
if e.code == 404:
raise DoesNotExist(msg)
else:
raise
return wrapped
return decorator
def to_utf8(obj):
if isinstance(obj, str):
return obj.encode('utf-8')
return obj
def querystr(**kwargs):
return '?' + urlencode(kwargs)
def utf8lize(obj):
if isinstance(obj, dict):
return {k: to_utf8(v) for k, v in obj.items()}
if isinstance(obj, list):
return [to_utf8(x) for x in ob]
if instance(obj, str):
return obj.encode('utf-8')
return obj
| 24.862069 | 74 | 0.575589 | [
"Apache-2.0"
] | AdriCueGim/python-seafile | seafileapi/utils.py | 1,442 | Python |
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AssignProjectRequest(AbstractModel):
"""AssignProject请求参数结构体
"""
def __init__(self):
"""
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param ProjectId: 项目ID
:type ProjectId: int
"""
self.InstanceIds = None
self.ProjectId = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
self.ProjectId = params.get("ProjectId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class AssignProjectResponse(AbstractModel):
"""AssignProject返回参数结构体
"""
def __init__(self):
"""
:param FlowIds: 返回的异步任务ID列表
:type FlowIds: list of int non-negative
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FlowIds = None
self.RequestId = None
def _deserialize(self, params):
self.FlowIds = params.get("FlowIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ClientConnection(AbstractModel):
"""客户端连接信息,包括客户端IP和连接数
"""
def __init__(self):
"""
:param IP: 连接的客户端IP
:type IP: str
:param Count: 对应客户端IP的连接数
:type Count: int
"""
self.IP = None
self.Count = None
def _deserialize(self, params):
self.IP = params.get("IP")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceHourRequest(AbstractModel):
"""CreateDBInstanceHour请求参数结构体
"""
def __init__(self):
"""
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10
:type ReplicateSetNum: int
:param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2
:type SecondaryNum: int
:param EngineVersion: MongoDB引擎版本,值包括MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT
:type EngineVersion: str
:param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆
:type Machine: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 可用区信息,格式如:ap-guangzhou-2
:type Zone: str
:param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例
:type InstanceRole: str
:param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群
:type InstanceType: str
:param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密
:type Encrypt: int
:param VpcId: 私有网络ID,如果不传则默认选择基础网络
:type VpcId: str
:param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type SubnetId: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
"""
self.Memory = None
self.Volume = None
self.ReplicateSetNum = None
self.SecondaryNum = None
self.EngineVersion = None
self.Machine = None
self.GoodsNum = None
self.Zone = None
self.InstanceRole = None
self.InstanceType = None
self.Encrypt = None
self.VpcId = None
self.SubnetId = None
self.ProjectId = None
self.SecurityGroup = None
def _deserialize(self, params):
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.ReplicateSetNum = params.get("ReplicateSetNum")
self.SecondaryNum = params.get("SecondaryNum")
self.EngineVersion = params.get("EngineVersion")
self.Machine = params.get("Machine")
self.GoodsNum = params.get("GoodsNum")
self.Zone = params.get("Zone")
self.InstanceRole = params.get("InstanceRole")
self.InstanceType = params.get("InstanceType")
self.Encrypt = params.get("Encrypt")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.ProjectId = params.get("ProjectId")
self.SecurityGroup = params.get("SecurityGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceHourResponse(AbstractModel):
"""CreateDBInstanceHour返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param InstanceIds: 创建的实例ID列表
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.InstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.InstanceIds = params.get("InstanceIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceRequest(AbstractModel):
"""CreateDBInstance请求参数结构体
"""
def __init__(self):
"""
:param SecondaryNum: 每个副本集内从节点个数
:type SecondaryNum: int
:param Memory: 实例内存大小,单位:GB
:type Memory: int
:param Volume: 实例硬盘大小,单位:GB
:type Volume: int
:param MongoVersion: 版本号,当前支持 MONGO_3_WT、MONGO_3_ROCKS、MONGO_36_WT
:type MongoVersion: str
:param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆
:type MachineCode: str
:param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10
:type GoodsNum: int
:param Zone: 实例所属区域名称,格式如:ap-guangzhou-2
:type Zone: str
:param TimeSpan: 时长,购买月数
:type TimeSpan: int
:param Password: 实例密码
:type Password: str
:param ProjectId: 项目ID,不填为默认项目
:type ProjectId: int
:param SecurityGroup: 安全组参数
:type SecurityGroup: list of str
:param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络
:type UniqVpcId: str
:param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填
:type UniqSubnetId: str
"""
self.SecondaryNum = None
self.Memory = None
self.Volume = None
self.MongoVersion = None
self.MachineCode = None
self.GoodsNum = None
self.Zone = None
self.TimeSpan = None
self.Password = None
self.ProjectId = None
self.SecurityGroup = None
self.UniqVpcId = None
self.UniqSubnetId = None
def _deserialize(self, params):
self.SecondaryNum = params.get("SecondaryNum")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.MongoVersion = params.get("MongoVersion")
self.MachineCode = params.get("MachineCode")
self.GoodsNum = params.get("GoodsNum")
self.Zone = params.get("Zone")
self.TimeSpan = params.get("TimeSpan")
self.Password = params.get("Password")
self.ProjectId = params.get("ProjectId")
self.SecurityGroup = params.get("SecurityGroup")
self.UniqVpcId = params.get("UniqVpcId")
self.UniqSubnetId = params.get("UniqSubnetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateDBInstanceResponse(AbstractModel):
"""CreateDBInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param InstanceIds: 创建的实例ID列表
:type InstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.InstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.InstanceIds = params.get("InstanceIds")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeClientConnectionsRequest(AbstractModel):
"""DescribeClientConnections请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeClientConnectionsResponse(AbstractModel):
"""DescribeClientConnections返回参数结构体
"""
def __init__(self):
"""
:param Clients: 客户端连接信息,包括客户端IP和对应IP的连接数量
注意:此字段可能返回 null,表示取不到有效值。
:type Clients: list of ClientConnection
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Clients = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Clients") is not None:
self.Clients = []
for item in params.get("Clients"):
obj = ClientConnection()
obj._deserialize(item)
self.Clients.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBInstancesRequest(AbstractModel):
"""DescribeDBInstances请求参数结构体
"""
def __init__(self):
"""
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param InstanceType: 实例类型,取值范围:0-所有实例,1-正式实例,2-临时实例, 3-只读实例,-1-正式实例+只读+灾备实例
:type InstanceType: int
:param ClusterType: 集群类型,取值范围:0-副本集实例,1-分片实例,-1-所有实例
:type ClusterType: int
:param Status: 实例状态,取值范围:0-待初始化,1-流程执行中,2-实例有效,-2-实例已过期
:type Status: list of int
:param VpcId: 私有网络的ID,基础网络则不传该参数
:type VpcId: str
:param SubnetId: 私有网络的子网ID,基础网络则不传该参数。入参设置该参数的同时,必须设置相应的VpcId
:type SubnetId: str
:param PayMode: 付费类型,取值范围:0-按量计费,1-包年包月,-1-按量计费+包年包月
:type PayMode: int
:param Limit: 单次请求返回的数量,最小值为1,最大值为100,默认值为20
:type Limit: int
:param Offset: 偏移量,默认值为0
:type Offset: int
:param OrderBy: 返回结果集排序的字段,目前支持:"ProjectId", "InstanceName", "CreateTime",默认为升序排序
:type OrderBy: str
:param OrderByType: 返回结果集排序方式,目前支持:"ASC"或者"DESC"
:type OrderByType: str
"""
self.InstanceIds = None
self.InstanceType = None
self.ClusterType = None
self.Status = None
self.VpcId = None
self.SubnetId = None
self.PayMode = None
self.Limit = None
self.Offset = None
self.OrderBy = None
self.OrderByType = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
self.InstanceType = params.get("InstanceType")
self.ClusterType = params.get("ClusterType")
self.Status = params.get("Status")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.PayMode = params.get("PayMode")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.OrderBy = params.get("OrderBy")
self.OrderByType = params.get("OrderByType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeDBInstancesResponse(AbstractModel):
"""DescribeDBInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合查询条件的实例总数
:type TotalCount: int
:param InstanceDetails: 实例详细信息
:type InstanceDetails: list of MongoDBInstanceDetail
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.InstanceDetails = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("InstanceDetails") is not None:
self.InstanceDetails = []
for item in params.get("InstanceDetails"):
obj = MongoDBInstanceDetail()
obj._deserialize(item)
self.InstanceDetails.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogRequest(AbstractModel):
"""DescribeSlowLog请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param StartTime: 慢日志起始时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-01 10:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。
:type StartTime: str
:param EndTime: 慢日志终止时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-02 12:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。
:type EndTime: str
:param SlowMS: 慢日志执行时间阈值,返回执行时间超过该阈值的慢日志,单位为毫秒(ms),最小为100毫秒。
:type SlowMS: int
:param Offset: 偏移量,最小值为0,最大值为10000,默认值为0。
:type Offset: int
:param Limit: 分页大小,最小值为1,最大值为100,默认值为20。
:type Limit: int
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SlowMS = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SlowMS = params.get("SlowMS")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSlowLogResponse(AbstractModel):
"""DescribeSlowLog返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 符合查询条件的慢查询日志总数。
:type TotalCount: int
:param SlowLogList: 符合查询条件的慢查询日志详情。
:type SlowLogList: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.SlowLogList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.SlowLogList = params.get("SlowLogList")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSpecInfoRequest(AbstractModel):
"""DescribeSpecInfo请求参数结构体
"""
def __init__(self):
"""
:param Zone: 可用区
:type Zone: str
"""
self.Zone = None
def _deserialize(self, params):
self.Zone = params.get("Zone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeSpecInfoResponse(AbstractModel):
"""DescribeSpecInfo返回参数结构体
"""
def __init__(self):
"""
:param SpecInfoList: 实例售卖规格信息列表
:type SpecInfoList: list of SpecificationInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.SpecInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("SpecInfoList") is not None:
self.SpecInfoList = []
for item in params.get("SpecInfoList"):
obj = SpecificationInfo()
obj._deserialize(item)
self.SpecInfoList.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MongoDBInstance(AbstractModel):
"""实例信息
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param Region: 地域信息
:type Region: str
"""
self.InstanceId = None
self.Region = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MongoDBInstanceDetail(AbstractModel):
"""实例详情
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param InstanceName: 实例名称
:type InstanceName: str
:param PayMode: 付费类型,可能的返回值:1-包年包月;0-按量计费
:type PayMode: int
:param ProjectId: 项目ID
:type ProjectId: int
:param ClusterType: 集群类型,可能的返回值:0-副本集实例,1-分片实例,
:type ClusterType: int
:param Region: 地域信息
:type Region: str
:param Zone: 可用区信息
:type Zone: str
:param NetType: 网络类型,可能的返回值:0-基础网络,1-私有网络
:type NetType: int
:param VpcId: 私有网络的ID
:type VpcId: str
:param SubnetId: 私有网络的子网ID
:type SubnetId: str
:param Status: 实例状态,可能的返回值:0-待初始化,1-流程处理中,2-运行中,-2-实例已过期
:type Status: int
:param Vip: 实例IP
:type Vip: str
:param Vport: 端口号
:type Vport: int
:param CreateTime: 实例创建时间
:type CreateTime: str
:param DeadLine: 实例到期时间
:type DeadLine: str
:param MongoVersion: 实例版本信息
:type MongoVersion: str
:param Memory: 实例内存规格,单位为MB
:type Memory: int
:param Volume: 实例磁盘规格,单位为MB
:type Volume: int
:param CpuNum: 实例CPU核心数
:type CpuNum: int
:param MachineType: 实例机器类型
:type MachineType: str
:param SecondaryNum: 实例从节点数
:type SecondaryNum: int
:param ReplicationSetNum: 实例分片数
:type ReplicationSetNum: int
:param AutoRenewFlag: 实例自动续费标志,可能的返回值:0-手动续费,1-自动续费,2-确认不续费
:type AutoRenewFlag: int
:param UsedVolume: 已用容量,单位MB
:type UsedVolume: int
:param MaintenanceStart: 维护窗口起始时间
:type MaintenanceStart: str
:param MaintenanceEnd: 维护窗口结束时间
:type MaintenanceEnd: str
:param ReplicaSets: 分片信息
:type ReplicaSets: list of MongodbShardInfo
:param ReadonlyInstances: 只读实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type ReadonlyInstances: list of MongoDBInstance
:param StandbyInstances: 灾备实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type StandbyInstances: list of MongoDBInstance
:param CloneInstances: 临时实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type CloneInstances: list of MongoDBInstance
:param RelatedInstance: 关联实例信息,对于正式实例,该字段表示它的临时实例信息;对于临时实例,则表示它的正式实例信息;如果为只读/灾备实例,则表示他的主实例信息
注意:此字段可能返回 null,表示取不到有效值。
:type RelatedInstance: :class:`tencentcloud.mongodb.v20180408.models.MongoDBInstance`
:param Tags: 实例标签信息集合
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of TagInfo
:param InstanceVer: 实例标记
:type InstanceVer: int
:param ClusterVer: 实例标记
:type ClusterVer: int
:param Protocol: 协议信息,可能的返回值:1-mongodb,2-dynamodb
:type Protocol: int
:param InstanceType: 实例类型,可能的返回值,1-正式实例,2-临时实例,3-只读实例,4-灾备实例
:type InstanceType: int
:param InstanceStatusDesc: 实例状态描述
:type InstanceStatusDesc: str
:param RealInstanceId: 实例对应的物理实例ID,回档并替换过的实例有不同的InstanceId和RealInstanceId,从barad获取监控数据等场景下需要用物理id获取
:type RealInstanceId: str
"""
self.InstanceId = None
self.InstanceName = None
self.PayMode = None
self.ProjectId = None
self.ClusterType = None
self.Region = None
self.Zone = None
self.NetType = None
self.VpcId = None
self.SubnetId = None
self.Status = None
self.Vip = None
self.Vport = None
self.CreateTime = None
self.DeadLine = None
self.MongoVersion = None
self.Memory = None
self.Volume = None
self.CpuNum = None
self.MachineType = None
self.SecondaryNum = None
self.ReplicationSetNum = None
self.AutoRenewFlag = None
self.UsedVolume = None
self.MaintenanceStart = None
self.MaintenanceEnd = None
self.ReplicaSets = None
self.ReadonlyInstances = None
self.StandbyInstances = None
self.CloneInstances = None
self.RelatedInstance = None
self.Tags = None
self.InstanceVer = None
self.ClusterVer = None
self.Protocol = None
self.InstanceType = None
self.InstanceStatusDesc = None
self.RealInstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.PayMode = params.get("PayMode")
self.ProjectId = params.get("ProjectId")
self.ClusterType = params.get("ClusterType")
self.Region = params.get("Region")
self.Zone = params.get("Zone")
self.NetType = params.get("NetType")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.Status = params.get("Status")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.CreateTime = params.get("CreateTime")
self.DeadLine = params.get("DeadLine")
self.MongoVersion = params.get("MongoVersion")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.CpuNum = params.get("CpuNum")
self.MachineType = params.get("MachineType")
self.SecondaryNum = params.get("SecondaryNum")
self.ReplicationSetNum = params.get("ReplicationSetNum")
self.AutoRenewFlag = params.get("AutoRenewFlag")
self.UsedVolume = params.get("UsedVolume")
self.MaintenanceStart = params.get("MaintenanceStart")
self.MaintenanceEnd = params.get("MaintenanceEnd")
if params.get("ReplicaSets") is not None:
self.ReplicaSets = []
for item in params.get("ReplicaSets"):
obj = MongodbShardInfo()
obj._deserialize(item)
self.ReplicaSets.append(obj)
if params.get("ReadonlyInstances") is not None:
self.ReadonlyInstances = []
for item in params.get("ReadonlyInstances"):
obj = MongoDBInstance()
obj._deserialize(item)
self.ReadonlyInstances.append(obj)
if params.get("StandbyInstances") is not None:
self.StandbyInstances = []
for item in params.get("StandbyInstances"):
obj = MongoDBInstance()
obj._deserialize(item)
self.StandbyInstances.append(obj)
if params.get("CloneInstances") is not None:
self.CloneInstances = []
for item in params.get("CloneInstances"):
obj = MongoDBInstance()
obj._deserialize(item)
self.CloneInstances.append(obj)
if params.get("RelatedInstance") is not None:
self.RelatedInstance = MongoDBInstance()
self.RelatedInstance._deserialize(params.get("RelatedInstance"))
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = TagInfo()
obj._deserialize(item)
self.Tags.append(obj)
self.InstanceVer = params.get("InstanceVer")
self.ClusterVer = params.get("ClusterVer")
self.Protocol = params.get("Protocol")
self.InstanceType = params.get("InstanceType")
self.InstanceStatusDesc = params.get("InstanceStatusDesc")
self.RealInstanceId = params.get("RealInstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MongodbShardInfo(AbstractModel):
"""实例分片详情
"""
def __init__(self):
"""
:param UsedVolume: 分片已使用容量
:type UsedVolume: float
:param ReplicaSetId: 分片ID
:type ReplicaSetId: str
:param ReplicaSetName: 分片名
:type ReplicaSetName: str
:param Memory: 分片内存规格,单位为MB
:type Memory: int
:param Volume: 分片磁盘规格,单位为MB
:type Volume: int
:param OplogSize: 分片Oplog大小,单位为MB
:type OplogSize: int
:param SecondaryNum: 分片从节点数
:type SecondaryNum: int
:param RealReplicaSetId: 分片物理ID
:type RealReplicaSetId: str
"""
self.UsedVolume = None
self.ReplicaSetId = None
self.ReplicaSetName = None
self.Memory = None
self.Volume = None
self.OplogSize = None
self.SecondaryNum = None
self.RealReplicaSetId = None
def _deserialize(self, params):
self.UsedVolume = params.get("UsedVolume")
self.ReplicaSetId = params.get("ReplicaSetId")
self.ReplicaSetName = params.get("ReplicaSetName")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
self.SecondaryNum = params.get("SecondaryNum")
self.RealReplicaSetId = params.get("RealReplicaSetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RenameInstanceRequest(AbstractModel):
"""RenameInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param NewName: 实例名称
:type NewName: str
"""
self.InstanceId = None
self.NewName = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.NewName = params.get("NewName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RenameInstanceResponse(AbstractModel):
"""RenameInstance返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetAutoRenewRequest(AbstractModel):
"""SetAutoRenew请求参数结构体
"""
def __init__(self):
"""
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceIds: list of str
:param AutoRenewFlag: 续费选项,取值范围:0-手动续费,1-自动续费,2-确认不续费
:type AutoRenewFlag: int
"""
self.InstanceIds = None
self.AutoRenewFlag = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
self.AutoRenewFlag = params.get("AutoRenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetAutoRenewResponse(AbstractModel):
"""SetAutoRenew返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetPasswordRequest(AbstractModel):
"""SetPassword请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param UserName: 实例账户名称
:type UserName: str
:param Password: 实例新密码,至少包含字母、数字和字符(!@#%^*())中的两种,长度为8-16个字符
:type Password: str
"""
self.InstanceId = None
self.UserName = None
self.Password = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.UserName = params.get("UserName")
self.Password = params.get("Password")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SetPasswordResponse(AbstractModel):
"""SetPassword返回参数结构体
"""
def __init__(self):
"""
:param FlowId: 返回的异步任务ID
:type FlowId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FlowId = None
self.RequestId = None
def _deserialize(self, params):
self.FlowId = params.get("FlowId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SpecItem(AbstractModel):
"""mongodb售卖规格
"""
def __init__(self):
"""
:param SpecCode: 规格信息标识
:type SpecCode: str
:param Status: 规格有效标志,取值:0-停止售卖,1-开放售卖
:type Status: int
:param MachineType: 机器类型,取值:0-HIO,4-HIO10G
:type MachineType: str
:param Cpu: cpu核心数
:type Cpu: int
:param Memory: 内存规格,单位为MB
:type Memory: int
:param DefaultStorage: 默认磁盘规格,单位MB
:type DefaultStorage: int
:param MaxStorage: 最大磁盘规格,单位MB
:type MaxStorage: int
:param MinStorage: 最小磁盘规格,单位MB
:type MinStorage: int
:param Qps: 可承载qps信息
:type Qps: int
:param Conns: 连接数限制
:type Conns: int
:param MongoVersionCode: 实例mongodb版本信息
:type MongoVersionCode: str
:param MongoVersionValue: 实例mongodb版本号
:type MongoVersionValue: int
:param Version: 实例mongodb版本号(短)
:type Version: str
:param EngineName: 存储引擎
:type EngineName: str
:param ClusterType: 集群类型,取值:1-分片集群,0-副本集集群
:type ClusterType: int
:param MinNodeNum: 最小副本集从节点数
:type MinNodeNum: int
:param MaxNodeNum: 最大副本集从节点数
:type MaxNodeNum: int
:param MinReplicateSetNum: 最小分片数
:type MinReplicateSetNum: int
:param MaxReplicateSetNum: 最大分片数
:type MaxReplicateSetNum: int
:param MinReplicateSetNodeNum: 最小分片从节点数
:type MinReplicateSetNodeNum: int
:param MaxReplicateSetNodeNum: 最大分片从节点数
:type MaxReplicateSetNodeNum: int
"""
self.SpecCode = None
self.Status = None
self.MachineType = None
self.Cpu = None
self.Memory = None
self.DefaultStorage = None
self.MaxStorage = None
self.MinStorage = None
self.Qps = None
self.Conns = None
self.MongoVersionCode = None
self.MongoVersionValue = None
self.Version = None
self.EngineName = None
self.ClusterType = None
self.MinNodeNum = None
self.MaxNodeNum = None
self.MinReplicateSetNum = None
self.MaxReplicateSetNum = None
self.MinReplicateSetNodeNum = None
self.MaxReplicateSetNodeNum = None
def _deserialize(self, params):
self.SpecCode = params.get("SpecCode")
self.Status = params.get("Status")
self.MachineType = params.get("MachineType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.DefaultStorage = params.get("DefaultStorage")
self.MaxStorage = params.get("MaxStorage")
self.MinStorage = params.get("MinStorage")
self.Qps = params.get("Qps")
self.Conns = params.get("Conns")
self.MongoVersionCode = params.get("MongoVersionCode")
self.MongoVersionValue = params.get("MongoVersionValue")
self.Version = params.get("Version")
self.EngineName = params.get("EngineName")
self.ClusterType = params.get("ClusterType")
self.MinNodeNum = params.get("MinNodeNum")
self.MaxNodeNum = params.get("MaxNodeNum")
self.MinReplicateSetNum = params.get("MinReplicateSetNum")
self.MaxReplicateSetNum = params.get("MaxReplicateSetNum")
self.MinReplicateSetNodeNum = params.get("MinReplicateSetNodeNum")
self.MaxReplicateSetNodeNum = params.get("MaxReplicateSetNodeNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SpecificationInfo(AbstractModel):
"""实例规格信息
"""
def __init__(self):
"""
:param Region: 地域信息
:type Region: str
:param Zone: 可用区信息
:type Zone: str
:param SpecItems: 售卖规格信息
:type SpecItems: list of SpecItem
"""
self.Region = None
self.Zone = None
self.SpecItems = None
def _deserialize(self, params):
self.Region = params.get("Region")
self.Zone = params.get("Zone")
if params.get("SpecItems") is not None:
self.SpecItems = []
for item in params.get("SpecItems"):
obj = SpecItem()
obj._deserialize(item)
self.SpecItems.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TagInfo(AbstractModel):
"""实例标签信息
"""
def __init__(self):
"""
:param TagKey: 标签Key值
:type TagKey: str
:param TagValue: 标签值
:type TagValue: str
"""
self.TagKey = None
self.TagValue = None
def _deserialize(self, params):
self.TagKey = params.get("TagKey")
self.TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateDBInstanceRequest(AbstractModel):
"""TerminateDBInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateDBInstanceResponse(AbstractModel):
"""TerminateDBInstance返回参数结构体
"""
def __init__(self):
"""
:param AsyncRequestId: 订单ID,表示注销实例成功
:type AsyncRequestId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceHourRequest(AbstractModel):
"""UpgradeDBInstanceHour请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
"""
self.InstanceId = None
self.Memory = None
self.Volume = None
self.OplogSize = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceHourResponse(AbstractModel):
"""UpgradeDBInstanceHour返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceRequest(AbstractModel):
"""UpgradeDBInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同
:type InstanceId: str
:param Memory: 升级后的内存大小,单位:GB
:type Memory: int
:param Volume: 升级后的硬盘大小,单位:GB
:type Volume: int
:param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%
:type OplogSize: int
"""
self.InstanceId = None
self.Memory = None
self.Volume = None
self.OplogSize = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.OplogSize = params.get("OplogSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpgradeDBInstanceResponse(AbstractModel):
"""UpgradeDBInstance返回参数结构体
"""
def __init__(self):
"""
:param DealId: 订单ID
:type DealId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DealId = None
self.RequestId = None
def _deserialize(self, params):
self.DealId = params.get("DealId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| 32.031272 | 110 | 0.600071 | [
"Apache-2.0"
] | qin5506/tencentcloud-sdk-python | tencentcloud/mongodb/v20180408/models.py | 50,620 | Python |
import numpy as np
import pytest
import tensorflow as tf
from garage.tf.models.gru import gru
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGRU(TfGraphTestCase):
def setup_method(self):
super().setup_method()
self.batch_size = 2
self.hidden_dim = 2
self.step_hidden_var = tf.compat.v1.placeholder(
shape=(self.batch_size, self.hidden_dim),
name='initial_hidden',
dtype=tf.float32)
self.gru_cell = tf.keras.layers.GRUCell(
units=self.hidden_dim,
activation=tf.nn.tanh,
kernel_initializer=tf.constant_initializer(1),
recurrent_activation=tf.nn.sigmoid,
recurrent_initializer=tf.constant_initializer(1),
name='lstm_layer')
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_shapes(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for _ in range(time_step):
output, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
assert output.shape == (self.batch_size, output_dim)
assert hidden.shape == (self.batch_size, self.hidden_dim)
full_output = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
assert full_output.shape == (self.batch_size, time_step, output_dim)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_value(self, time_step, input_dim, output_dim, hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden1 = hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
for i in range(time_step):
output1, hidden1 = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden1
}) # noqa: E126
hidden2 = recurrent_step_gru(input_val=obs_input,
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 1.)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(output1, output2)
assert np.allclose(hidden1, hidden2)
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim', [
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 1),
(3, 3, 3),
])
# yapf: enable
def test_output_value_trainable_hidden_and_cell(self, time_step, input_dim,
output_dim):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init_trainable=True,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
_, hidden = self.sess.run([output_t, h_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
with tf.compat.v1.variable_scope('GRU/gru', reuse=True):
hidden_init_var = tf.compat.v1.get_variable(name='initial_hidden')
assert hidden_init_var in tf.compat.v1.trainable_variables()
full_output1 = self.sess.run(outputs_t,
feed_dict={input_var: obs_inputs})
hidden2 = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
stack_hidden = None
for i in range(time_step):
hidden2 = recurrent_step_gru(input_val=obs_inputs[:, i, :],
num_units=self.hidden_dim,
step_hidden=hidden2,
w_x_init=1.,
w_h_init=1.,
b_init=0.,
nonlinearity=np.tanh,
gate_nonlinearity=lambda x: 1. /
(1. + np.exp(-x)))
if stack_hidden is None:
stack_hidden = hidden2[:, np.newaxis, :]
else:
stack_hidden = np.concatenate(
(stack_hidden, hidden2[:, np.newaxis, :]), axis=1)
output_nonlinearity = np.full((np.prod(hidden2.shape[1:]), output_dim),
1.)
full_output2 = np.matmul(stack_hidden, output_nonlinearity)
assert np.allclose(full_output1, full_output2)
def test_gradient_paths(self):
time_step = 3
input_dim = 2
output_dim = 4
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Compute output by doing t step() on the gru cell
outputs_t, output_t, h_t, hidden_init = self.gru
hidden = np.full((self.batch_size, self.hidden_dim),
hidden_init.eval())
grads_step_o_i = tf.gradients(output_t, step_input_var)
grads_step_o_h = tf.gradients(output_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, step_input_var)
self.sess.run([grads_step_o_i, grads_step_o_h, grads_step_h],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
grads_step_o_i = tf.gradients(outputs_t, step_input_var)
grads_step_o_h = tf.gradients(outputs_t, self.step_hidden_var)
grads_step_h = tf.gradients(h_t, input_var)
# No gradient flow
with pytest.raises(TypeError):
self.sess.run(grads_step_o_i,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_o_h,
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
})
with pytest.raises(TypeError):
self.sess.run(grads_step_h, feed_dict={input_var: obs_inputs})
# yapf: disable
@pytest.mark.parametrize('time_step, input_dim, output_dim, '
'hidden_init', [
(1, 1, 1, 0), # noqa: E122
(1, 1, 3, 0),
(1, 3, 1, 0),
(3, 1, 1, 0),
(3, 3, 1, 0),
(3, 3, 3, 0),
(1, 1, 1, 0.5),
(1, 1, 3, 0.5),
(1, 3, 1, 0.5),
(3, 1, 1, 0.5),
(3, 3, 1, 0.5),
(3, 3, 3, 0.5),
])
# yapf: enable
def test_output_same_as_rnn(self, time_step, input_dim, output_dim,
hidden_init):
obs_inputs = np.full((self.batch_size, time_step, input_dim), 1.)
obs_input = np.full((self.batch_size, input_dim), 1.)
input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, None, input_dim),
name='input')
step_input_var = tf.compat.v1.placeholder(tf.float32,
shape=(None, input_dim),
name='step_input')
output_nonlinearity = tf.keras.layers.Dense(
units=output_dim,
activation=None,
kernel_initializer=tf.constant_initializer(1))
with tf.compat.v1.variable_scope('GRU'):
self.gru = gru(
all_input_var=input_var,
name='gru',
gru_cell=self.gru_cell,
step_input_var=step_input_var,
step_hidden_var=self.step_hidden_var,
hidden_state_init=tf.constant_initializer(hidden_init),
output_nonlinearity_layer=output_nonlinearity)
self.sess.run(tf.compat.v1.global_variables_initializer())
# Create a RNN and compute the entire outputs
rnn_layer = tf.keras.layers.RNN(cell=self.gru_cell,
return_sequences=True,
return_state=True)
# Set initial state to all 0s
hidden_var = tf.compat.v1.get_variable(
name='initial_hidden',
shape=(self.batch_size, self.hidden_dim),
initializer=tf.constant_initializer(hidden_init),
trainable=False,
dtype=tf.float32)
outputs, hiddens = rnn_layer(input_var, initial_state=[hidden_var])
outputs = output_nonlinearity(outputs)
self.sess.run(tf.compat.v1.global_variables_initializer())
outputs, hiddens = self.sess.run([outputs, hiddens],
feed_dict={input_var: obs_inputs})
# Compute output by doing t step() on the gru cell
hidden = np.full((self.batch_size, self.hidden_dim), hidden_init)
_, output_t, hidden_t, _ = self.gru
for i in range(time_step):
output, hidden = self.sess.run([output_t, hidden_t],
feed_dict={
step_input_var: obs_input,
self.step_hidden_var: hidden,
}) # noqa: E126
# The output from i-th timestep
assert np.array_equal(output, outputs[:, i, :])
assert np.array_equal(hidden, hiddens)
# Also the full output from lstm
full_outputs = self.sess.run(self.gru[0],
feed_dict={input_var: obs_inputs})
assert np.array_equal(outputs, full_outputs)
| 43.812195 | 79 | 0.490675 | [
"MIT"
] | artberryx/LSD | garaged/tests/garage/tf/models/test_gru.py | 17,963 | Python |
# -*- coding: utf-8 -*-
"""This file contains a basic Skype SQLite parser."""
import logging
from plaso.events import time_events
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class SkypeChatEvent(time_events.PosixTimeEvent):
"""Convenience class for a Skype event."""
DATA_TYPE = u'skype:event:chat'
def __init__(self, row, to_account):
"""Build a Skype Event from a single row.
Args:
row: A row object (instance of sqlite3.Row) that contains the
extracted data from a single row in the database.
to_account: A string containing the accounts (excluding the
author) of the conversation.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeChatEvent, self).__init__(
row['timestamp'], u'Chat from Skype', self.DATA_TYPE)
self.title = row['title']
self.text = row['body_xml']
self.from_account = u'{0:s} <{1:s}>'.format(
row['from_displayname'], row['author'])
self.to_account = to_account
class SkypeAccountEvent(time_events.PosixTimeEvent):
"""Convenience class for account information."""
DATA_TYPE = u'skype:event:account'
def __init__(
self, timestamp, usage, identifier, full_name, display_name, email,
country):
"""Initialize the event.
Args:
timestamp: The POSIX timestamp value.
usage: A string containing the description string of the timestamp.
identifier: The row identifier.
full_name: A string containing the full name of the Skype account holder.
display_name: A string containing the chosen display name of the account
holder.
email: A string containing the registered email address of the account
holder.
country: A string containing the chosen home country of the account
holder.
"""
super(SkypeAccountEvent, self).__init__(timestamp, usage)
self.offset = identifier
self.username = u'{0:s} <{1:s}>'.format(full_name, display_name)
self.display_name = display_name
self.email = email
self.country = country
self.data_type = self.DATA_TYPE
class SkypeSMSEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for SMS."""
DATA_TYPE = u'skype:event:sms'
def __init__(self, row, dst_number):
"""Read the information related with the SMS.
Args:
row: row form the sql query.
row['time_sms']: timestamp when the sms was send.
row['dstnum_sms']: number which receives the sms.
row['msg_sms']: text send to this sms.
dst_number: phone number where the user send the sms.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeSMSEvent, self).__init__(
row['time_sms'], u'SMS from Skype', self.DATA_TYPE)
self.number = dst_number
self.text = row['msg_sms']
class SkypeCallEvent(time_events.PosixTimeEvent):
"""Convenience EventObject for the calls."""
DATA_TYPE = u'skype:event:call'
def __init__(self, timestamp, call_type, user_start_call,
source, destination, video_conference):
"""Contains information if the call was cancelled, accepted or finished.
Args:
timestamp: the timestamp of the event.
call_type: WAITING, STARTED, FINISHED.
user_start_call: boolean, true indicates that the owner
account started the call.
source: the account which started the call.
destination: the account which gets the call.
video_conference: boolean, if is true it was a videoconference.
"""
super(SkypeCallEvent, self).__init__(
timestamp, u'Call from Skype', self.DATA_TYPE)
self.call_type = call_type
self.user_start_call = user_start_call
self.src_call = source
self.dst_call = destination
self.video_conference = video_conference
class SkypeTransferFileEvent(time_events.PosixTimeEvent):
"""Evaluate the action of send a file."""
DATA_TYPE = u'skype:event:transferfile'
def __init__(self, row, timestamp, action_type, source, destination):
"""Actions related with sending files.
Args:
row:
filepath: path from the file.
filename: name of the file.
filesize: size of the file.
timestamp: when the action happens.
action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.
source: The account that sent the file.
destination: The account that received the file.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
super(SkypeTransferFileEvent, self).__init__(
timestamp, u'File transfer from Skype', self.DATA_TYPE)
self.offset = row['id']
self.action_type = action_type
self.source = source
self.destination = destination
self.transferred_filepath = row['filepath']
self.transferred_filename = row['filename']
try:
self.transferred_filesize = int(row['filesize'])
except ValueError:
logging.debug(u'Unknown filesize {0:s}'.format(
self.transferred_filename))
self.transferred_filesize = 0
class SkypePlugin(interface.SQLitePlugin):
"""SQLite plugin for Skype main.db SQlite database file."""
NAME = u'skype'
DESCRIPTION = u'Parser for Skype SQLite database files.'
# Queries for building cache.
QUERY_DEST_FROM_TRANSFER = (
u'SELECT parent_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
QUERY_SOURCE_FROM_TRANSFER = (
u'SELECT pk_id, partner_handle AS skypeid, '
u'partner_dispname AS skypename FROM transfers')
# Define the needed queries.
QUERIES = [
((u'SELECT c.id, c.participants, c.friendlyname AS title, '
u'm.author AS author, m.from_dispname AS from_displayname, '
u'm.body_xml, m.timestamp, c.dialog_partner FROM Chats c, Messages m '
u'WHERE c.name = m.chatname'), u'ParseChat'),
((u'SELECT id, fullname, given_displayname, emails, '
u'country, profile_timestamp, authreq_timestamp, '
u'lastonline_timestamp, mood_timestamp, sent_authrequest_time, '
u'lastused_timestamp FROM Accounts'), u'ParseAccountInformation'),
((u'SELECT id, target_numbers AS dstnum_sms, timestamp AS time_sms, '
u'body AS msg_sms FROM SMSes'), u'ParseSMS'),
((u'SELECT id, partner_handle, partner_dispname, offer_send_list, '
u'starttime, accepttime, finishtime, filepath, filename, filesize, '
u'status, parent_id, pk_id FROM Transfers'), u'ParseFileTransfer'),
((u'SELECT c.id, cm.guid, c.is_incoming, '
u'cm.call_db_id, cm.videostatus, c.begin_timestamp AS try_call, '
u'cm.start_timestamp AS accept_call, cm.call_duration '
u'FROM Calls c, CallMembers cm '
u'WHERE c.id = cm.call_db_id;'), u'ParseCall')]
# The required tables.
REQUIRED_TABLES = frozenset([
u'Chats', u'Accounts', u'Conversations', u'Contacts', u'SMSes',
u'Transfers', u'CallMembers', u'Calls'])
def ParseAccountInformation(
self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses the Accounts database.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
if row['profile_timestamp']:
event_object = SkypeAccountEvent(
row['profile_timestamp'], u'Profile Changed', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['authreq_timestamp']:
event_object = SkypeAccountEvent(
row['authreq_timestamp'], u'Authenticate Request', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastonline_timestamp']:
event_object = SkypeAccountEvent(
row['lastonline_timestamp'], u'Last Online', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['mood_timestamp']:
event_object = SkypeAccountEvent(
row['mood_timestamp'], u'Mood Event', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['sent_authrequest_time']:
event_object = SkypeAccountEvent(
row['sent_authrequest_time'], u'Auth Request Sent', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
if row['lastused_timestamp']:
event_object = SkypeAccountEvent(
row['lastused_timestamp'], u'Last Used', row['id'],
row['fullname'], row['given_displayname'], row['emails'],
row['country'])
parser_mediator.ProduceEvent(event_object, query=query)
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parses a chat message row.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
to_account = u''
accounts = []
participants = row['participants'].split(' ')
for participant in participants:
if participant != row['author']:
accounts.append(participant)
to_account = u', '.join(accounts)
if not to_account:
if row['dialog_partner']:
to_account = row['dialog_partner']
else:
to_account = u'Unknown User'
event_object = SkypeChatEvent(row, to_account)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse SMS.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
dst_number = row['dstnum_sms'].replace(u' ', u'')
event_object = SkypeSMSEvent(row, dst_number)
parser_mediator.ProduceEvent(event_object, query=query)
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs):
"""Parse the calls taking into accounts some rows.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
try:
aux = row['guid']
if aux:
aux_list = aux.split(u'-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = u'Unknown [no GUID]'
dst_aux = u'Unknown [no GUID]'
except IndexError:
src_aux = u'Unknown [{0:s}]'.format(row['guid'])
dst_aux = u'Unknown [{0:s}]'.format(row['guid'])
if row['is_incoming'] == u'0':
user_start_call = True
source = src_aux
if row['ip_address']:
destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address'])
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
if row['videostatus'] == u'3':
video_conference = True
else:
video_conference = False
event_object = SkypeCallEvent(
row['try_call'], u'WAITING', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accept_call']:
event_object = SkypeCallEvent(
row['accept_call'], u'ACCEPTED', user_start_call, source,
destination, video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
if row['call_duration']:
try:
timestamp = int(row['accept_call']) + int(row['call_duration'])
event_object = SkypeCallEvent(
timestamp, u'FINISHED', user_start_call, source, destination,
video_conference)
parser_mediator.ProduceEvent(event_object, query=query)
except ValueError:
logging.debug((
u'[{0:s}] Unable to determine when the call {1:s} was '
u'finished.').format(self.NAME, row['id']))
def ParseFileTransfer(
self, parser_mediator, row, cache=None, database=None, query=None,
**unused_kwargs):
"""Parse the transfer files.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
row: the row with all information related with the file transfers.
query: Optional query string. The default is None.
cache: a cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
"""
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
source_dict = cache.GetResults(u'source')
if not source_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults(u'source')
dest_dict = cache.GetResults(u'destination')
if not dest_dict:
cursor = database.cursor
results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER)
# Note that pysqlite does not accept a Unicode string in row['string'] and
# will raise "IndexError: Index must be int or string".
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults(u'destination')
source = u'Unknown'
destination = u'Unknown'
if row['parent_id']:
destination = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
skype_id, skype_name = source_dict.get(row['parent_id'], [None, None])
if skype_name:
source = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = u'{0:s} <{1:s}>'.format(
row['partner_handle'], row['partner_dispname'])
if row['pk_id']:
skype_id, skype_name = dest_dict.get(row['pk_id'], [None, None])
if skype_name:
destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name)
if row['status'] == 8:
if row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'GETSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['accepttime']:
event_object = SkypeTransferFileEvent(
row, row['accepttime'], u'ACCEPTED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
if row['finishtime']:
event_object = SkypeTransferFileEvent(
row, row['finishtime'], u'FINISHED', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
elif row['status'] == 2 and row['starttime']:
event_object = SkypeTransferFileEvent(
row, row['starttime'], u'SENDSOLICITUDE', source, destination)
parser_mediator.ProduceEvent(event_object, query=query)
sqlite.SQLiteParser.RegisterPlugin(SkypePlugin)
| 36.893333 | 80 | 0.665161 | [
"Apache-2.0"
] | Defense-Cyber-Crime-Center/plaso | plaso/parsers/sqlite_plugins/skype.py | 16,602 | Python |
import pycrfsuite
import sklearn
from itertools import chain
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import re
import json
annotypes = ['Participants', 'Intervention', 'Outcome']
annotype = annotypes[0]
path = '/nlp/data/romap/crf/'
#path = '/Users/romapatel/Desktop/crf/'
def run():
train_sents, test_sents = get_train_test_sets()
print len(test_sents)
indwords_list = get_ind_words()
patterns_list = get_patterns()
X_train = [sent_features(train_sents[docid], indwords_list, patterns_list) for docid in train_sents.keys()]
y_train = [sent_labels(train_sents[docid]) for docid in train_sents.keys()]
X_test = [sent_features(test_sents[docid], indwords_list, patterns_list) for docid in test_sents.keys()]
y_test = [sent_labels(test_sents[docid]) for docid in test_sents.keys()]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(X_train, y_train):
trainer.append(xseq, yseq)
trainer.set_params({'c1': 1.0,'c2': 1e-3, 'max_iterations': 50, 'feature.possible_transitions': True})
trainer.train('PICO.crfsuite')
tagger = pycrfsuite.Tagger()
tagger.open('PICO.crfsuite')
get_results(test_sents, tagger, indwords_list, patterns_list)
def get_results(test_sents, tagger, indwords_list, patterns_list):
f1 = open(path + 'sets/4/' + annotype + '-test_pred.json', 'w+')
f2 = open(path + 'sets/4/' + annotype + '-test_correct.json', 'w+')
pred_dict, correct_dict = {}, {}
for docid in test_sents:
pred, correct = tagger.tag(sent_features(test_sents[docid], indwords_list, patterns_list)), sent_labels(test_sents[docid])
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
pred_dict[docid] = spans
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
correct_dict[docid] = spans
f1.write(json.dumps(pred_dict))
f2.write(json.dumps(correct_dict))
def get_ind_words():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_words.txt'
filename = annotype.lower() + '_unigrams.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1]
items = line.split('\t')
word = items[1][:-1]
if word not in list:
list.append(word)
if annotype == 'Intervention':
f = open(path + 'crf_files/drug_names.txt', 'r')
for line in f:
word = line[:-1]
if word not in list:
list.append(word)
fin_list.append(list)
indwords = [fin_list[0], fin_list[1], fin_list[2]]
return indwords
#all lowercased
def get_patterns():
fin_list = []
for annotype in annotypes:
list = []
#filename = annotype.lower() + '_pattern_copy.txt'
filename = annotype.lower() + '_trigrams3.tsv'
f = open(path + 'crf_files/' + filename, 'r')
for line in f:
#word = line[:-1].lower()
word = line[:-1].split('\t')
word = word[1]
if word not in list:
list.append(word)
fin_list.append(list)
patterns = [fin_list[0], fin_list[1], fin_list[2]]
return patterns
def isindword(word, annotype, indwords_list):
if annotype == annotypes[0]: list = indwords_list[0]
elif annotype == annotypes[1]: list = indwords_list[1]
else: list = indwords_list[2]
f = open(path + 'crf_files/numbers.txt', 'r')
for line in f:
if line[:-1] in word.lower():
return True
if word.lower() in list or word.lower()[:-1] in list or word.lower()[-3:] in list: return True
else: return False
def ispattern(word, pos, annotype, pattern_list):
if annotype == annotypes[0]: list = pattern_list[0]
elif annotype == annotypes[1]: list = pattern_list[1]
else: list = pattern_list[2]
for pattern in pattern_list:
if word.lower() in pattern or pos.lower() in pattern: return True
else: return False
def word_features(sent, i, indwords_list, pattern_list):
word = sent[i][0]
postag = sent[i][2]
features = ['bias', 'word.lower=' + word.lower(),'word[-3:]=' + word[-3:],
'word[-4:]=' + word[-4:],'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(), 'word.isdigit=%s' % word.isdigit(),
'postag=' + postag, 'isindword=%s' % isindword(word, annotype, indwords_list),
'word[0:4]=' + word[0:4], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)]
#prev previous word
if i > 1:
word1 = sent[i-2][0]
postag1 = sent[i-2][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#previous word
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][2]
features.extend(['-1:word.lower=' + word1.lower(), '-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(), '-1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('BOS')
#next to next word
if i < len(sent)-2:
word1 = sent[i+2][0]
postag1 = sent[i+2][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:]])
#next word
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][2]
features.extend(['+1:word.lower=' + word1.lower(), '+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(), '+1:postag=' + postag1,
'isindword=%s' % isindword(word1, annotype, indwords_list), 'word[0:4]=' + word[0:4],
'word[-3:]=' + word[-3:], 'ispattern=%s' % ispattern(word, postag, annotype, pattern_list)])
else:
features.append('EOS')
return features
def sent_features(sent, indwords_list, patterns_list):
return [word_features(sent, i, indwords_list, patterns_list) for i in range(len(sent))]
def sent_labels(sent):
return [str(p_label) for token, ner, postag, p_label, i_label, o_label in sent]
def sent_tokens(sent):
return [token for token, ner, postag, p_label, i_label, o_label in sent]
def print_results(example_sent, tagger, indwords_list, docid, dict):
pred, correct = tagger.tag(sent_features(example_sent, indwords_list)), sent_labels(example_sent)
spans, span, outside = [], [], True
for i in range(len(pred)):
if pred[i] == '0' and outside is True: continue
elif pred[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif pred[i] == '1' and outside is False: continue
elif pred[i] == '1' and outside is True:
outside = False
span.append(i)
f = open(path + annotype + '-test.json', 'w+')
print '\n\nPredicted: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
spans, span, outside = [], [], True
for i in range(len(correct)):
if correct[i] == '0' and outside is True: continue
elif correct[i] == '0' and outside is False:
span.append(i+1)
spans.append(span)
span, outside = [], True
elif correct[i] == '1' and outside is False: continue
elif correct[i] == '1' and outside is True:
outside = False
span.append(i)
print '\n\nCorrect: ' + str(spans)
for span in spans:
s = ' '
for i in range(span[0], span[1]):
s += example_sent[i][0] + ' '
print s
def get_training_data():
f = open(path + 'crf_files/difficulty_crf_mv.json', 'r')
for line in f:
dict = json.loads(line)
return dict
def get_train_test_sets():
test_docids = []
f = open(path + 'crf_files/gold_docids.txt', 'r')
for line in f:
test_docids.append(line[:-1])
doc_dict = get_training_data()
test_sents, train_sents = {}, {}
count = 0
for docid in doc_dict:
sents = doc_dict[docid]
if len(sents) == 0: continue
count += 1
#if count >= 100: break
if docid not in test_docids:
train_sents[docid] = sents
else:
test_sents[docid] = sents
f = open(path + 'difficulty_new.json', 'r')
for line in f:
doc_dict_new = json.loads(line)
count = 1
for docid in doc_dict_new:
if docid in train_sents.keys(): continue
if count < 9481:
count += 1
continue
train_sents[docid] = doc_dict_new[docid]
count += 1
return train_sents, test_sents
if __name__ == '__main__':
run()
| 38.195572 | 130 | 0.575983 | [
"Apache-2.0"
] | roma-patel/lstm-crf | crf-seq/sets/sets/4/seq_detect_1p.py | 10,351 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2018.5
# Email : [email protected]
###################################################################
from dayu_widgets.item_model import MSortFilterModel, MTableModel
from dayu_widgets.item_view import MTableView, MTreeView, MBigView, MListView
from dayu_widgets.line_edit import MLineEdit
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.qt import QWidget, QModelIndex, Signal, QVBoxLayout, QApplication, Qt, Slot, QHBoxLayout
class MItemViewSet(QWidget):
sig_double_clicked = Signal(QModelIndex)
sig_left_clicked = Signal(QModelIndex)
TableViewType = MTableView
BigViewType = MBigView
TreeViewType = MTreeView
ListViewType = MListView
def __init__(self, view_type=None, parent=None):
super(MItemViewSet, self).__init__(parent)
self._main_lay = QVBoxLayout()
self._main_lay.setSpacing(5)
self._main_lay.setContentsMargins(0, 0, 0, 0)
self.sort_filter_model = MSortFilterModel()
self.source_model = MTableModel()
self.sort_filter_model.setSourceModel(self.source_model)
view_class = view_type or MItemViewSet.TableViewType
self.item_view = view_class()
self.item_view.doubleClicked.connect(self.sig_double_clicked)
self.item_view.pressed.connect(self.slot_left_clicked)
self.item_view.setModel(self.sort_filter_model)
self._search_line_edit = MLineEdit().search().small()
self._search_attr_button = MToolButton().icon_only().svg('down_fill.svg').small()
self._search_line_edit.set_prefix_widget(self._search_attr_button)
self._search_line_edit.textChanged.connect(self.sort_filter_model.set_search_pattern)
self._search_line_edit.setVisible(False)
_search_lay = QHBoxLayout()
_search_lay.setContentsMargins(0, 0, 0, 0)
_search_lay.addStretch()
_search_lay.addWidget(self._search_line_edit)
self._main_lay.addLayout(_search_lay)
self._main_lay.addWidget(self.item_view)
self.setLayout(self._main_lay)
@Slot(QModelIndex)
def slot_left_clicked(self, start_index):
button = QApplication.mouseButtons()
if button == Qt.LeftButton:
real_index = self.sort_filter_model.mapToSource(start_index)
self.sig_left_clicked.emit(real_index)
def set_header_list(self, header_list):
self.source_model.set_header_list(header_list)
self.sort_filter_model.set_header_list(header_list)
self.sort_filter_model.setSourceModel(self.source_model)
self.item_view.set_header_list(header_list)
@Slot()
def setup_data(self, data_list):
self.source_model.clear()
if data_list:
self.source_model.set_data_list(data_list)
def get_data(self):
return self.source_model.get_data_list()
def searchable(self):
"""Enable search line edit visible."""
self._search_line_edit.setVisible(True)
return self
| 39.493671 | 106 | 0.690705 | [
"MIT"
] | kanbang/dayu_widgets | dayu_widgets/item_view_set.py | 3,120 | Python |
# Generated by Django 2.2 on 2019-05-08 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 49.970588 | 266 | 0.637434 | [
"MIT"
] | ing-ivan-31/recipe-app | app/core/migrations/0001_initial.py | 1,699 | Python |
import time
import datetime
import json
import hashlib
from .env import Env
from .server import Server
from .hardware import Hardware
class Metric(object):
def __init__(self):
# format of report data
self._version = '0.1'
self._type = 'metric'
self.run_id = None
self.mode = None
self.server = Server()
self.hardware = Hardware()
self.env = Env()
self.status = "INIT"
self.err_message = ""
self.collection = {}
self.index = {}
self.search = {}
self.run_params = {}
self.metrics = {
"type": "",
"value": None,
}
self.datetime = str(datetime.datetime.now())
def set_run_id(self):
# Get current time as run id, which uniquely identifies this test
self.run_id = int(time.time())
def set_mode(self, mode):
# Set the deployment mode of milvus
self.mode = mode
# including: metric, suite_metric
def set_case_metric_type(self):
self._type = "case"
def json_md5(self):
json_str = json.dumps(vars(self), sort_keys=True)
return hashlib.md5(json_str.encode('utf-8')).hexdigest()
def update_status(self, status):
# Set the final result of the test run: RUN_SUCC or RUN_FAILED
self.status = status
def update_result(self, result):
self.metrics["value"].update(result)
def update_message(self, err_message):
self.err_message = err_message | 27.071429 | 73 | 0.600923 | [
"Apache-2.0"
] | NotRyan/milvus | tests/benchmark/milvus_benchmark/metrics/models/metric.py | 1,516 | Python |
from credentials import credentials
import unittest
import pyperclip
class TestUser(unittest.TestCase):
'''
Test that defines test cases for the User class
Args:
unitest.Testcase: Testcase that helps in creating test cases for class User.
'''
def setUp(self):
'''
Set up method to run before each test case
'''
self.new_user = credentials("Paul", "123")
def test__init__(self):
'''
test__init__ test case to test if the object is initialized properly
'''
self.assertEqual(self.new_user.user_name, "Paul")
self.assertEqual(self.new_user.password, "123")
def test__save_user(self):
'''
test to see if the user is saved
'''
self.new_credentials.save_credentials()
self.assertEqual(len(credentials.user_list), 1)
if __name__ == "__main__":
unittest.main()
| 26.228571 | 84 | 0.62963 | [
"Unlicense"
] | paulmunyao/Password-Locker | credentials_test.py | 918 | Python |
import inspect
import sys
from enum import IntEnum
from pathlib import Path
from time import time
from logging import getLevelName
from typing import Tuple, Union, Any, List, Iterable, TextIO, Optional
from . import logging
from .logging import _set_log_level, _set_log_file, RootLogger
_VERBOSITY_TO_LOGLEVEL = {
'error': 'ERROR',
'warning': 'WARNING',
'info': 'INFO',
'hint': 'HINT',
'debug': 'DEBUG',
}
# Python 3.7 ensures iteration order
for v, level in enumerate(list(_VERBOSITY_TO_LOGLEVEL.values())):
_VERBOSITY_TO_LOGLEVEL[v] = level
class Verbosity(IntEnum):
error = 0
warn = 1
info = 2
hint = 3
debug = 4
@property
def level(self) -> int:
# getLevelName(str) returns the int level…
return getLevelName(_VERBOSITY_TO_LOGLEVEL[self])
def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]):
if isinstance(var, types):
return
if isinstance(types, type):
possible_types_str = types.__name__
else:
type_names = [t.__name__ for t in types]
possible_types_str = "{} or {}".format(
", ".join(type_names[:-1]), type_names[-1]
)
raise TypeError(f"{varname} must be of type {possible_types_str}")
class ScanpyConfig:
"""Config manager for scanpy.
"""
def __init__(
self,
*,
verbosity: str = "warning",
plot_suffix: str = "",
file_format_data: str = "h5ad",
file_format_figs: str = "pdf",
autosave: bool = False,
autoshow: bool = True,
writedir: Union[str, Path] = "./write/",
cachedir: Union[str, Path] = "./cache/",
datasetdir: Union[str, Path] = "./data/",
figdir: Union[str, Path] = "./figures/",
max_memory=15,
n_jobs=1,
logfile: Union[str, Path, None] = None,
categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"),
_frameon: bool = True,
_vector_friendly: bool = False,
_low_resolution_warning: bool = True,
):
# logging
self._root_logger = RootLogger(logging.INFO) # level will be replaced
self.logfile = logfile
self.verbosity = verbosity
# rest
self.plot_suffix = plot_suffix
self.file_format_data = file_format_data
self.file_format_figs = file_format_figs
self.autosave = autosave
self.autoshow = autoshow
self.writedir = writedir
self.cachedir = cachedir
self.datasetdir = datasetdir
self.figdir = figdir
self.max_memory = max_memory
self.n_jobs = n_jobs
self.categories_to_ignore = categories_to_ignore
self._frameon = _frameon
"""bool: See set_figure_params."""
self._vector_friendly = _vector_friendly
"""Set to true if you want to include pngs in svgs and pdfs."""
self._low_resolution_warning = _low_resolution_warning
"""Print warning when saving a figure with low resolution."""
self._start = time()
"""Time when the settings module is first imported."""
self._previous_time = self._start
"""Variable for timing program parts."""
self._previous_memory_usage = -1
"""Stores the previous memory usage."""
@property
def verbosity(self) -> Verbosity:
"""
Verbosity level (default `warning`)
Level 0: only show 'error' messages.
Level 1: also show 'warning' messages.
Level 2: also show 'info' messages.
Level 3: also show 'hint' messages.
Level 4: also show very detailed progress for 'debug'ging.
"""
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity: Union[Verbosity, int, str]):
verbosity_str_options = [
v for v in _VERBOSITY_TO_LOGLEVEL
if isinstance(v, str)
]
if isinstance(verbosity, Verbosity):
self._verbosity = verbosity
elif isinstance(verbosity, int):
self._verbosity = Verbosity(verbosity)
elif isinstance(verbosity, str):
verbosity = verbosity.lower()
if verbosity not in verbosity_str_options:
raise ValueError(
f"Cannot set verbosity to {verbosity}. "
f"Accepted string values are: {verbosity_str_options}"
)
else:
self._verbosity = Verbosity(verbosity_str_options.index(verbosity))
else:
_type_check(verbosity, "verbosity", (str, int))
_set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity])
@property
def plot_suffix(self) -> str:
"""Global suffix that is appended to figure filenames.
"""
return self._plot_suffix
@plot_suffix.setter
def plot_suffix(self, plot_suffix: str):
_type_check(plot_suffix, "plot_suffix", str)
self._plot_suffix = plot_suffix
@property
def file_format_data(self) -> str:
"""File format for saving AnnData objects.
Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad'
(hdf5) for lossless saving.
"""
return self._file_format_data
@file_format_data.setter
def file_format_data(self, file_format: str):
_type_check(file_format, "file_format_data", str)
file_format_options = {"txt", "csv", "h5ad"}
if file_format not in file_format_options:
raise ValueError(
f"Cannot set file_format_data to {file_format}. "
f"Must be one of {file_format_options}"
)
self._file_format_data = file_format
@property
def file_format_figs(self) -> str:
"""File format for saving figures.
For example 'png', 'pdf' or 'svg'. Many other formats work as well (see
`matplotlib.pyplot.savefig`).
"""
return self._file_format_figs
@file_format_figs.setter
def file_format_figs(self, figure_format: str):
_type_check(figure_format, "figure_format_data", str)
self._file_format_figs = figure_format
@property
def autosave(self) -> bool:
"""\
Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`).
Do not show plots/figures interactively.
"""
return self._autosave
@autosave.setter
def autosave(self, autosave: bool):
_type_check(autosave, "autosave", bool)
self._autosave = autosave
@property
def autoshow(self) -> bool:
"""\
Automatically show figures if `autosave == False` (default `True`).
There is no need to call the matplotlib pl.show() in this case.
"""
return self._autoshow
@autoshow.setter
def autoshow(self, autoshow: bool):
_type_check(autoshow, "autoshow", bool)
self._autoshow = autoshow
@property
def writedir(self) -> Path:
"""\
Directory where the function scanpy.write writes to by default.
"""
return self._writedir
@writedir.setter
def writedir(self, writedir: Union[str, Path]):
_type_check(writedir, "writedir", (str, Path))
self._writedir = Path(writedir)
@property
def cachedir(self) -> Path:
"""\
Directory for cache files (default `'./cache/'`).
"""
return self._cachedir
@cachedir.setter
def cachedir(self, cachedir: Union[str, Path]):
_type_check(cachedir, "cachedir", (str, Path))
self._cachedir = Path(cachedir)
@property
def datasetdir(self) -> Path:
"""\
Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).
"""
return self._datasetdir
@datasetdir.setter
def datasetdir(self, datasetdir: Union[str, Path]):
_type_check(datasetdir, "datasetdir", (str, Path))
self._datasetdir = Path(datasetdir).resolve()
@property
def figdir(self) -> Path:
"""\
Directory for saving figures (default `'./figures/'`).
"""
return self._figdir
@figdir.setter
def figdir(self, figdir: Union[str, Path]):
_type_check(figdir, "figdir", (str, Path))
self._figdir = Path(figdir)
@property
def max_memory(self) -> Union[int, float]:
"""\
Maximal memory usage in Gigabyte.
Is currently not well respected....
"""
return self._max_memory
@max_memory.setter
def max_memory(self, max_memory: Union[int, float]):
_type_check(max_memory, "max_memory", (int, float))
self._max_memory = max_memory
@property
def n_jobs(self) -> int:
"""\
Default number of jobs/ CPUs to use for parallel computing.
"""
return self._n_jobs
@n_jobs.setter
def n_jobs(self, n_jobs: int):
_type_check(n_jobs, "n_jobs", int)
self._n_jobs = n_jobs
@property
def logpath(self) -> Optional[Path]:
"""\
The file path `logfile` was set to.
"""
return self._logpath
@logpath.setter
def logpath(self, logpath: Union[str, Path, None]):
_type_check(logpath, "logfile", (str, Path))
# set via “file object” branch of logfile.setter
self.logfile = Path(logpath).open('a')
self._logpath = Path(logpath)
@property
def logfile(self) -> TextIO:
"""\
The open file to write logs to.
Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one.
The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks
and to :obj:`sys.stderr` otherwise.
For backwards compatibility, setting it to `''` behaves like setting it to `None`.
"""
return self._logfile
@logfile.setter
def logfile(self, logfile: Union[str, Path, TextIO, None]):
if not hasattr(logfile, 'write') and logfile:
self.logpath = logfile
else: # file object
if not logfile: # None or ''
logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr
self._logfile = logfile
self._logpath = None
_set_log_file(self)
@property
def categories_to_ignore(self) -> List[str]:
"""\
Categories that are omitted in plotting etc.
"""
return self._categories_to_ignore
@categories_to_ignore.setter
def categories_to_ignore(self, categories_to_ignore: Iterable[str]):
categories_to_ignore = list(categories_to_ignore)
for i, cat in enumerate(categories_to_ignore):
_type_check(cat, f"categories_to_ignore[{i}]", str)
self._categories_to_ignore = categories_to_ignore
# --------------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------------
def set_figure_params(
self,
scanpy: bool = True,
dpi: int = 80,
dpi_save: int = 150,
frameon: bool = True,
vector_friendly: bool = True,
fontsize: int = 14,
color_map: Optional[str] = None,
format: Union[str, Iterable[str]] = "pdf",
transparent: bool = False,
ipython_format: str = "png2x",
):
"""\
Set resolution/size, styling and format of figures.
Parameters
----------
scanpy
Init default values for :obj:`matplotlib.rcParams` suited for Scanpy.
dpi
Resolution of rendered figures - this influences the size of figures in notebooks.
dpi_save
Resolution of saved figures. This should typically be higher to achieve
publication quality.
frameon
Add frames and axes labels to scatter plots.
vector_friendly
Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.
fontsize
Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.
color_map
Convenience method for setting the default color map. Ignored if `scanpy=False`.
format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`)
This sets the default format for saving figures: `file_format_figs`.
transparent
Save figures with transparent back ground. Sets
`rcParams['savefig.transparent']`.
ipython_format
Only concerns the notebook/IPython environment; see
:func:`~IPython.display.set_matplotlib_formats` for details.
"""
try:
import IPython
if isinstance(ipython_format, str):
ipython_format = [ipython_format]
IPython.display.set_matplotlib_formats(*ipython_format)
except Exception:
pass
from matplotlib import rcParams
self._vector_friendly = vector_friendly
self.file_format_figs = format
if dpi is not None:
rcParams["figure.dpi"] = dpi
if dpi_save is not None:
rcParams["savefig.dpi"] = dpi_save
if transparent is not None:
rcParams["savefig.transparent"] = transparent
if scanpy:
from .plotting._rcmod import set_rcParams_scanpy
set_rcParams_scanpy(fontsize=fontsize, color_map=color_map)
self._frameon = frameon
@staticmethod
def _is_run_from_ipython():
"""Determines whether run from Ipython.
Only affects progress bars.
"""
try:
__IPYTHON__
return True
except NameError:
return False
def __str__(self) -> str:
return '\n'.join(
f'{k} = {v!r}'
for k, v in inspect.getmembers(self)
if not k.startswith("_") and not k == 'getdoc'
)
settings = ScanpyConfig()
| 32.397229 | 102 | 0.597662 | [
"BSD-3-Clause"
] | gamazeps/scanpy | scanpy/_settings.py | 14,034 | Python |
VERSION = 'v0.0.1' | 18 | 18 | 0.611111 | [
"MIT"
] | shi1412/pyspark-olap-pipeline | jobs/version.py | 18 | Python |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'DeepCTR'
copyright = '2017-present, Weichen Shen'
author = 'Weichen Shen'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.7.4'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeepCTRdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DeepCTR.tex', 'DeepCTR Documentation',
'Weichen Shen', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepctr', 'DeepCTR Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DeepCTR', 'DeepCTR Documentation',
author, 'DeepCTR', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
todo_include_todos = False
html_theme = 'sphinx_rtd_theme'
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| 29.682353 | 79 | 0.649822 | [
"Apache-2.0"
] | Lu1352/DeepCTR | docs/source/conf.py | 5,046 | Python |
from typing import Optional
import torch
from torch import Tensor
@torch.jit._overload # noqa
def fps(src, batch=None, ratio=None, random_start=True): # noqa
# type: (Tensor, Optional[Tensor], Optional[float], bool) -> Tensor
pass # pragma: no cover
@torch.jit._overload # noqa
def fps(src, batch=None, ratio=None, random_start=True): # noqa
# type: (Tensor, Optional[Tensor], Optional[Tensor], bool) -> Tensor
pass # pragma: no cover
def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True): # noqa
r""""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature
Learning on Point Sets in a Metric Space"
<https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the
most distant point with regard to the rest points.
Args:
src (Tensor): Point feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
ratio (float or Tensor, optional): Sampling ratio.
(default: :obj:`0.5`)
random_start (bool, optional): If set to :obj:`False`, use the first
node in :math:`\mathbf{X}` as starting node. (default: obj:`True`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_cluster import fps
src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
index = fps(src, batch, ratio=0.5)
"""
r: Optional[Tensor] = None
if ratio is None:
r = torch.tensor(0.5, dtype=src.dtype, device=src.device)
elif isinstance(ratio, float):
r = torch.tensor(ratio, dtype=src.dtype, device=src.device)
else:
r = ratio
assert r is not None
if batch is not None:
assert src.size(0) == batch.numel()
batch_size = int(batch.max()) + 1
deg = src.new_zeros(batch_size, dtype=torch.long)
deg.scatter_add_(0, batch, torch.ones_like(batch))
ptr = deg.new_zeros(batch_size + 1)
torch.cumsum(deg, 0, out=ptr[1:])
else:
ptr = torch.tensor([0, src.size(0)], device=src.device)
return torch.ops.torch_cluster.fps(src, ptr, r, random_start)
| 33.43662 | 78 | 0.617102 | [
"MIT"
] | Hacky-DH/pytorch_cluster | torch_cluster/fps.py | 2,374 | Python |
import pytest
import click
from click.testing import CliRunner
from click._compat import PY2
# Use the most reasonable io that users would use for the python version.
if PY2:
from cStringIO import StringIO as ReasonableBytesIO
else:
from io import BytesIO as ReasonableBytesIO
def test_runner():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input='Hello World!\n')
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_runner_with_stream():
@click.command()
def test():
i = click.get_binary_stream('stdin')
o = click.get_binary_stream('stdout')
while 1:
chunk = i.read(4096)
if not chunk:
break
o.write(chunk)
o.flush()
runner = CliRunner()
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\n'
runner = CliRunner(echo_stdin=True)
result = runner.invoke(test, input=ReasonableBytesIO(b'Hello World!\n'))
assert not result.exception
assert result.output == 'Hello World!\nHello World!\n'
def test_prompts():
@click.command()
@click.option('--foo', prompt=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: wau wau\nfoo=wau wau\n'
@click.command()
@click.option('--foo', prompt=True, hide_input=True)
def test(foo):
click.echo('foo=%s' % foo)
runner = CliRunner()
result = runner.invoke(test, input='wau wau\n')
assert not result.exception
assert result.output == 'Foo: \nfoo=wau wau\n'
def test_getchar():
@click.command()
def continue_it():
click.echo(click.getchar())
runner = CliRunner()
result = runner.invoke(continue_it, input='y')
assert not result.exception
assert result.output == 'y\n'
def test_catch_exceptions():
class CustomError(Exception):
pass
@click.command()
def cli():
raise CustomError(1)
runner = CliRunner()
result = runner.invoke(cli)
assert isinstance(result.exception, CustomError)
assert type(result.exc_info) is tuple
assert len(result.exc_info) == 3
with pytest.raises(CustomError):
runner.invoke(cli, catch_exceptions=False)
CustomError = SystemExit
result = runner.invoke(cli)
assert result.exit_code == 1
| 25.810345 | 76 | 0.637609 | [
"BSD-3-Clause"
] | ANKIT-KS/fjord | vendor/packages/click/tests/test_testing.py | 2,994 | Python |
''' SPEECH-TO-TEXT USING MICROSOFT SPEECH API '''
''' [email protected] '''
# Import required packages
import os
import glob
import json
import logging
import codecs
import helper as he
import azure.cognitiveservices.speech as speechsdk
import params as pa
# Load and set configuration parameters
pa.get_config()
def request_endpoint(audio, speech_config, output_directory, lexical):
"""Request the speech service endpoint
Args:
audio: Input data frame
speech_config: Choice between scoring and
output_folder: LUIS app ID
case: LUIS subscription key
lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00
Returns:
df: Scoring data frame with predicted intents and scores
Raises:
ConnectionError: If file is not found
"""
audio_config = speechsdk.audio.AudioConfig(filename = audio)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config = speech_config, audio_config = audio_config)
result = speech_recognizer.recognize_once()
filename = audio[audio.rindex('\\')+1:]
text = process_recognition(result, filename, output_directory, lexical)
return text, filename
def process_recognition(result, filename, output_directory, lexical):
"""Process recognition received from the speech service
Args:
result: Result object returned by STT-service
filename: Filename for output file
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
Returns:
text: Processed recognition as string
"""
if result.reason == speechsdk.ResultReason.RecognizedSpeech:
if lexical:
text = f"{format(result.text)}\t{json.loads(result.json)['NBest'][0]['Lexical']}"
else:
text = f"{format(result.text)}"
logging.info(f"[INFO] - Recognition successful: {filename} -> {result.text}")
elif result.reason == speechsdk.ResultReason.NoMatch:
logging.warning(filename + "\t" + f"No speech could be recognized: {result.no_match_details}")
text = ""
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.error(filename+"\t"+ f"Speech Recognition canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
logging.error(f"Error details: {cancellation_details.error_details}")
text = ""
return text
# General Function
def write_transcription(output_directory, text):
"""Write transcription to file
Args:
text: Processed recognition as string
output_directory: Output directory for the file
Returns:
Writes output to file
"""
if not os.path.exists(f'{output_directory}/transcriptions.txt'):
transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig')
transfile.close()
logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.')
with open(f"{output_directory}/transcriptions.txt", "a", encoding='utf-8-sig') as transfile:
transfile.write(f'{text}\n')
transfile.close()
def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):
"""Main function for STT-functionality
Args:
speech_files: Directory of audio files to be transcribed
output_directory: Output directory for the file
lexical: Boolean to enable extended lexical version of STT-result
enable_proxy: Boolean to enable proxy function in case you need it
*argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str
Returns:
zip(filenames, results): Zipped lists of filenames and STT-results as string
"""
try:
speech_config = speechsdk.SpeechConfig(subscription = pa.config_data['stt_key'], region = pa.config_data['stt_region'])
except RuntimeError:
logging.error("[ERROR] - Could not retrieve speech config")
# If necessary, you can enable a proxy here:
# set_proxy(hostname: str, port: str, username: str, password: str)
if enable_proxy:
speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])
# Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted
speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)
if pa.config_data['stt_endpoint'] != "":
speech_config.endpoint_id = pa.config_data['stt_endpoint']
logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')
results = []
filenames = []
for audio in glob.iglob(f'{speech_files}*av'):
result, filename = request_endpoint(audio, speech_config, output_directory, lexical)
results.append(result)
filenames.append(filename)
# Check the result
return zip(filenames, results)
if __name__ == '__main__':
main("input/audio/", "output/test/") | 45.008696 | 131 | 0.701893 | [
"MIT"
] | microsoft/SpeechServices | src/stt.py | 5,176 | Python |
# Generated by Django 3.2.8 on 2021-11-29 09:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20211125_1330'),
]
operations = [
migrations.DeleteModel(
name='VehicleLog',
),
]
| 17.529412 | 47 | 0.607383 | [
"MIT"
] | MadeleenRoestorff/django_budget | django_budget/budget/migrations/0005_delete_vehiclelog.py | 298 | Python |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang
# Mingshuang Luo)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib import Path
from shutil import copyfile
from typing import Optional, Tuple
import k2
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from asr_datamodule import LibriSpeechAsrDataModule
from lhotse.utils import fix_random_seed
from model import TdnnLstm
from torch import Tensor
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
from icefall.checkpoint import load_checkpoint
from icefall.checkpoint import save_checkpoint as save_checkpoint_impl
from icefall.dist import cleanup_dist, setup_dist
from icefall.graph_compiler import CtcTrainingGraphCompiler
from icefall.lexicon import Lexicon
from icefall.utils import (
AttributeDict,
MetricsTracker,
encode_supervisions,
get_env_info,
setup_logger,
str2bool,
)
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--world-size",
type=int,
default=1,
help="Number of GPUs for DDP training.",
)
parser.add_argument(
"--master-port",
type=int,
default=12354,
help="Master port to use for DDP training.",
)
parser.add_argument(
"--tensorboard",
type=str2bool,
default=True,
help="Should various information be logged in tensorboard.",
)
parser.add_argument(
"--num-epochs",
type=int,
default=20,
help="Number of epochs to train.",
)
parser.add_argument(
"--start-epoch",
type=int,
default=0,
help="""Resume training from from this epoch.
If it is positive, it will load checkpoint from
tdnn_lstm_ctc/exp/epoch-{start_epoch-1}.pt
""",
)
return parser
def get_params() -> AttributeDict:
"""Return a dict containing training parameters.
All training related parameters that are not passed from the commandline
is saved in the variable `params`.
Commandline options are merged into `params` after they are parsed, so
you can also access them via `params`.
Explanation of options saved in `params`:
- exp_dir: It specifies the directory where all training related
files, e.g., checkpoints, log, etc, are saved
- lang_dir: It contains language related input files such as
"lexicon.txt"
- lr: It specifies the initial learning rate
- feature_dim: The model input dim. It has to match the one used
in computing features.
- weight_decay: The weight_decay for the optimizer.
- subsampling_factor: The subsampling factor for the model.
- best_train_loss: Best training loss so far. It is used to select
the model that has the lowest training loss. It is
updated during the training.
- best_valid_loss: Best validation loss so far. It is used to select
the model that has the lowest validation loss. It is
updated during the training.
- best_train_epoch: It is the epoch that has the best training loss.
- best_valid_epoch: It is the epoch that has the best validation loss.
- batch_idx_train: Used to writing statistics to tensorboard. It
contains number of batches trained so far across
epochs.
- log_interval: Print training loss if batch_idx % log_interval` is 0
- reset_interval: Reset statistics if batch_idx % reset_interval is 0
- valid_interval: Run validation if batch_idx % valid_interval` is 0
- beam_size: It is used in k2.ctc_loss
- reduction: It is used in k2.ctc_loss
- use_double_scores: It is used in k2.ctc_loss
"""
params = AttributeDict(
{
"exp_dir": Path("tdnn_lstm_ctc/exp"),
"lang_dir": Path("data/lang_phone"),
"lr": 1e-3,
"feature_dim": 80,
"weight_decay": 5e-4,
"subsampling_factor": 3,
"best_train_loss": float("inf"),
"best_valid_loss": float("inf"),
"best_train_epoch": -1,
"best_valid_epoch": -1,
"batch_idx_train": 0,
"log_interval": 10,
"reset_interval": 200,
"valid_interval": 1000,
"beam_size": 10,
"reduction": "sum",
"use_double_scores": True,
"env_info": get_env_info(),
}
)
return params
def load_checkpoint_if_available(
params: AttributeDict,
model: nn.Module,
optimizer: Optional[torch.optim.Optimizer] = None,
scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,
) -> None:
"""Load checkpoint from file.
If params.start_epoch is positive, it will load the checkpoint from
`params.start_epoch - 1`. Otherwise, this function does nothing.
Apart from loading state dict for `model`, `optimizer` and `scheduler`,
it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,
and `best_valid_loss` in `params`.
Args:
params:
The return value of :func:`get_params`.
model:
The training model.
optimizer:
The optimizer that we are using.
scheduler:
The learning rate scheduler we are using.
Returns:
Return None.
"""
if params.start_epoch <= 0:
return
filename = params.exp_dir / f"epoch-{params.start_epoch-1}.pt"
saved_params = load_checkpoint(
filename,
model=model,
optimizer=optimizer,
scheduler=scheduler,
)
keys = [
"best_train_epoch",
"best_valid_epoch",
"batch_idx_train",
"best_train_loss",
"best_valid_loss",
]
for k in keys:
params[k] = saved_params[k]
return saved_params
def save_checkpoint(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler._LRScheduler,
rank: int = 0,
) -> None:
"""Save model, optimizer, scheduler and training stats to file.
Args:
params:
It is returned by :func:`get_params`.
model:
The training model.
"""
if rank != 0:
return
filename = params.exp_dir / f"epoch-{params.cur_epoch}.pt"
save_checkpoint_impl(
filename=filename,
model=model,
params=params,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
if params.best_train_epoch == params.cur_epoch:
best_train_filename = params.exp_dir / "best-train-loss.pt"
copyfile(src=filename, dst=best_train_filename)
if params.best_valid_epoch == params.cur_epoch:
best_valid_filename = params.exp_dir / "best-valid-loss.pt"
copyfile(src=filename, dst=best_valid_filename)
def compute_loss(
params: AttributeDict,
model: nn.Module,
batch: dict,
graph_compiler: CtcTrainingGraphCompiler,
is_training: bool,
) -> Tuple[Tensor, MetricsTracker]:
"""
Compute CTC loss given the model and its inputs.
Args:
params:
Parameters for training. See :func:`get_params`.
model:
The model for training. It is an instance of TdnnLstm in our case.
batch:
A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`
for the content in it.
graph_compiler:
It is used to build a decoding graph from a ctc topo and training
transcript. The training transcript is contained in the given `batch`,
while the ctc topo is built when this compiler is instantiated.
is_training:
True for training. False for validation. When it is True, this
function enables autograd during computation; when it is False, it
disables autograd.
"""
device = graph_compiler.device
feature = batch["inputs"]
# at entry, feature is (N, T, C)
feature = feature.permute(0, 2, 1) # now feature is (N, C, T)
assert feature.ndim == 3
feature = feature.to(device)
with torch.set_grad_enabled(is_training):
nnet_output = model(feature)
# nnet_output is (N, T, C)
# NOTE: We need `encode_supervisions` to sort sequences with
# different duration in decreasing order, required by
# `k2.intersect_dense` called in `k2.ctc_loss`
supervisions = batch["supervisions"]
supervision_segments, texts = encode_supervisions(
supervisions, subsampling_factor=params.subsampling_factor
)
decoding_graph = graph_compiler.compile(texts)
dense_fsa_vec = k2.DenseFsaVec(
nnet_output,
supervision_segments,
allow_truncate=params.subsampling_factor - 1,
)
loss = k2.ctc_loss(
decoding_graph=decoding_graph,
dense_fsa_vec=dense_fsa_vec,
output_beam=params.beam_size,
reduction=params.reduction,
use_double_scores=params.use_double_scores,
)
assert loss.requires_grad == is_training
info = MetricsTracker()
info["frames"] = supervision_segments[:, 2].sum().item()
info["loss"] = loss.detach().cpu().item()
return loss, info
def compute_validation_loss(
params: AttributeDict,
model: nn.Module,
graph_compiler: CtcTrainingGraphCompiler,
valid_dl: torch.utils.data.DataLoader,
world_size: int = 1,
) -> MetricsTracker:
"""Run the validation process. The validation loss
is saved in `params.valid_loss`.
"""
model.eval()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(valid_dl):
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=False,
)
assert loss.requires_grad is False
tot_loss = tot_loss + loss_info
if world_size > 1:
tot_loss.reduce(loss.device)
loss_value = tot_loss["loss"] / tot_loss["frames"]
if loss_value < params.best_valid_loss:
params.best_valid_epoch = params.cur_epoch
params.best_valid_loss = loss_value
return tot_loss
def train_one_epoch(
params: AttributeDict,
model: nn.Module,
optimizer: torch.optim.Optimizer,
graph_compiler: CtcTrainingGraphCompiler,
train_dl: torch.utils.data.DataLoader,
valid_dl: torch.utils.data.DataLoader,
tb_writer: Optional[SummaryWriter] = None,
world_size: int = 1,
) -> None:
"""Train the model for one epoch.
The training loss from the mean of all frames is saved in
`params.train_loss`. It runs the validation process every
`params.valid_interval` batches.
Args:
params:
It is returned by :func:`get_params`.
model:
The model for training.
optimizer:
The optimizer we are using.
graph_compiler:
It is used to convert transcripts to FSAs.
train_dl:
Dataloader for the training dataset.
valid_dl:
Dataloader for the validation dataset.
tb_writer:
Writer to write log messages to tensorboard.
world_size:
Number of nodes in DDP training. If it is 1, DDP is disabled.
"""
model.train()
tot_loss = MetricsTracker()
for batch_idx, batch in enumerate(train_dl):
params.batch_idx_train += 1
batch_size = len(batch["supervisions"]["text"])
loss, loss_info = compute_loss(
params=params,
model=model,
batch=batch,
graph_compiler=graph_compiler,
is_training=True,
)
# summary stats.
tot_loss = (tot_loss * (1 - 1 / params.reset_interval)) + loss_info
optimizer.zero_grad()
loss.backward()
clip_grad_norm_(model.parameters(), 5.0, 2.0)
optimizer.step()
if batch_idx % params.log_interval == 0:
logging.info(
f"Epoch {params.cur_epoch}, "
f"batch {batch_idx}, loss[{loss_info}], "
f"tot_loss[{tot_loss}], batch size: {batch_size}"
)
if batch_idx % params.log_interval == 0:
if tb_writer is not None:
loss_info.write_summary(
tb_writer, "train/current_", params.batch_idx_train
)
tot_loss.write_summary(
tb_writer, "train/tot_", params.batch_idx_train
)
if batch_idx > 0 and batch_idx % params.valid_interval == 0:
valid_info = compute_validation_loss(
params=params,
model=model,
graph_compiler=graph_compiler,
valid_dl=valid_dl,
world_size=world_size,
)
model.train()
logging.info(f"Epoch {params.cur_epoch}, validation {valid_info}")
if tb_writer is not None:
valid_info.write_summary(
tb_writer,
"train/valid_",
params.batch_idx_train,
)
loss_value = tot_loss["loss"] / tot_loss["frames"]
params.train_loss = loss_value
if params.train_loss < params.best_train_loss:
params.best_train_epoch = params.cur_epoch
params.best_train_loss = params.train_loss
def run(rank, world_size, args):
"""
Args:
rank:
It is a value between 0 and `world_size-1`, which is
passed automatically by `mp.spawn()` in :func:`main`.
The node with rank 0 is responsible for saving checkpoint.
world_size:
Number of GPUs for DDP training.
args:
The return value of get_parser().parse_args()
"""
params = get_params()
params.update(vars(args))
fix_random_seed(42)
if world_size > 1:
setup_dist(rank, world_size, params.master_port)
setup_logger(f"{params.exp_dir}/log/log-train")
logging.info("Training started")
logging.info(params)
if args.tensorboard and rank == 0:
tb_writer = SummaryWriter(log_dir=f"{params.exp_dir}/tensorboard")
else:
tb_writer = None
lexicon = Lexicon(params.lang_dir)
max_phone_id = max(lexicon.tokens)
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", rank)
graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device)
model = TdnnLstm(
num_features=params.feature_dim,
num_classes=max_phone_id + 1, # +1 for the blank symbol
subsampling_factor=params.subsampling_factor,
)
checkpoints = load_checkpoint_if_available(params=params, model=model)
model.to(device)
if world_size > 1:
model = DDP(model, device_ids=[rank])
optimizer = optim.AdamW(
model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
)
scheduler = StepLR(optimizer, step_size=8, gamma=0.1)
if checkpoints:
optimizer.load_state_dict(checkpoints["optimizer"])
scheduler.load_state_dict(checkpoints["scheduler"])
librispeech = LibriSpeechAsrDataModule(args)
train_dl = librispeech.train_dataloaders()
valid_dl = librispeech.valid_dataloaders()
for epoch in range(params.start_epoch, params.num_epochs):
train_dl.sampler.set_epoch(epoch)
if epoch > params.start_epoch:
logging.info(f"epoch {epoch}, lr: {scheduler.get_last_lr()[0]}")
if tb_writer is not None:
tb_writer.add_scalar(
"train/lr",
scheduler.get_last_lr()[0],
params.batch_idx_train,
)
tb_writer.add_scalar("train/epoch", epoch, params.batch_idx_train)
params.cur_epoch = epoch
train_one_epoch(
params=params,
model=model,
optimizer=optimizer,
graph_compiler=graph_compiler,
train_dl=train_dl,
valid_dl=valid_dl,
tb_writer=tb_writer,
world_size=world_size,
)
scheduler.step()
save_checkpoint(
params=params,
model=model,
optimizer=optimizer,
scheduler=scheduler,
rank=rank,
)
logging.info("Done!")
if world_size > 1:
torch.distributed.barrier()
cleanup_dist()
def main():
parser = get_parser()
LibriSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
world_size = args.world_size
assert world_size >= 1
if world_size > 1:
mp.spawn(run, args=(world_size, args), nprocs=world_size, join=True)
else:
run(rank=0, world_size=1, args=args)
if __name__ == "__main__":
main()
| 29.969799 | 79 | 0.632124 | [
"Apache-2.0"
] | aarora8/icefall | egs/librispeech/ASR/tdnn_lstm_ctc/train.py | 17,862 | Python |
from collections import OrderedDict, defaultdict
from typing import Optional, Dict, Tuple, List
import ariadne
from irrd.rpki.status import RPKIStatus
from irrd.rpsl.fields import RPSLFieldListMixin, RPSLTextField, RPSLReferenceField
from irrd.rpsl.rpsl_objects import (lookup_field_names, OBJECT_CLASS_MAPPING, RPSLAutNum,
RPSLInetRtr, RPSLPerson, RPSLRole)
from irrd.scopefilter.status import ScopeFilterStatus
from irrd.utils.text import snake_to_camel_case
class SchemaGenerator:
def __init__(self):
"""
The schema generator generates a GraphQL schema.
The purpose is to provide a schema to which resolvers are then
attached, which is then given to Ariadne, and for resolvers to
have information about expected types.
For RPSL queries and types, this is dynamically generated based on
the RPSL objects from irrd.rpsl. Other parts are fixed.
This means that the schema is always the same for a given IRRd
codebase - there are no runtime or user configurable parts.
Along with generating the schema, some metadata is saved, e.g.
self.graphql_types which allows resolvers to learn the GraphQL
type for a certain field.
This generator also creates Ariadne object types on self, which
are used to attach resolvers to them.
"""
self._set_rpsl_query_fields()
self._set_rpsl_object_interface_schema()
self._set_rpsl_contact_schema()
self._set_rpsl_object_schemas()
self._set_enums()
schema = self.enums
schema += """
scalar ASN
scalar IP
schema {
query: Query
}
type Query {
rpslObjects(""" + self.rpsl_query_fields + """): [RPSLObject!]
databaseStatus(sources: [String!]): [DatabaseStatus]
asnPrefixes(asns: [ASN!]!, ipVersion: Int, sources: [String!]): [ASNPrefixes!]
asSetPrefixes(setNames: [String!]!, ipVersion: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [AsSetPrefixes!]
recursiveSetMembers(setNames: [String!]!, depth: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [SetMembers!]
}
type DatabaseStatus {
source: String!
authoritative: Boolean!
objectClassFilter: [String!]
rpkiRovFilter: Boolean!
scopefilterEnabled: Boolean!
localJournalKept: Boolean!
serialOldestJournal: Int
serialNewestJournal: Int
serialLastExport: Int
serialNewestMirror: Int
lastUpdate: String
synchronisedSerials: Boolean!
}
type RPSLJournalEntry {
rpslPk: String!
source: String!
serialNrtm: Int!
operation: String!
origin: String
objectClass: String!
objectText: String!
timestamp: String!
}
type ASNPrefixes {
asn: ASN!
prefixes: [IP!]
}
type AsSetPrefixes {
rpslPk: String!
prefixes: [IP!]
}
type SetMembers {
rpslPk: String!
members: [String!]
}
"""
schema += self.rpsl_object_interface_schema
schema += self.rpsl_contact_schema
schema += ''.join(self.rpsl_object_schemas.values())
schema += 'union RPSLContactUnion = RPSLPerson | RPSLRole'
self.type_defs = ariadne.gql(schema)
self.query_type = ariadne.QueryType()
self.rpsl_object_type = ariadne.InterfaceType("RPSLObject")
self.rpsl_contact_union_type = ariadne.UnionType("RPSLContactUnion")
self.asn_scalar_type = ariadne.ScalarType("ASN")
self.ip_scalar_type = ariadne.ScalarType("IP")
self.object_types = [self.query_type, self.rpsl_object_type, self.rpsl_contact_union_type,
self.asn_scalar_type, self.ip_scalar_type]
for name in self.rpsl_object_schemas.keys():
self.object_types.append(ariadne.ObjectType(name))
self.object_types.append(ariadne.ObjectType("ASNPrefixes"))
self.object_types.append(ariadne.ObjectType("AsSetPrefixes"))
self.object_types.append(ariadne.ObjectType("SetMembers"))
self.object_types.append(ariadne.EnumType("RPKIStatus", RPKIStatus))
self.object_types.append(ariadne.EnumType("ScopeFilterStatus", ScopeFilterStatus))
def _set_rpsl_query_fields(self):
"""
Create a sub-schema for the fields that can be queried for RPSL objects.
This includes all fields from all objects, along with a few
special fields.
"""
string_list_fields = {'rpsl_pk', 'sources', 'object_class'}.union(lookup_field_names())
params = [snake_to_camel_case(p) + ': [String!]' for p in sorted(string_list_fields)]
params += [
'ipExact: IP',
'ipLessSpecific: IP',
'ipLessSpecificOneLevel: IP',
'ipMoreSpecific: IP',
'ipAny: IP',
'asn: [ASN!]',
'rpkiStatus: [RPKIStatus!]',
'scopeFilterStatus: [ScopeFilterStatus!]',
'textSearch: String',
'recordLimit: Int',
'sqlTrace: Boolean',
]
self.rpsl_query_fields = ', '.join(params)
def _set_enums(self):
"""
Create the schema for enums, current RPKI and scope filter status.
"""
self.enums = ''
for enum in [RPKIStatus, ScopeFilterStatus]:
self.enums += f'enum {enum.__name__} {{\n'
for value in enum:
self.enums += f' {value.name}\n'
self.enums += '}\n\n'
def _set_rpsl_object_interface_schema(self):
"""
Create the schema for RPSLObject, which contains only fields that
are common to every known RPSL object, along with meta
"""
common_fields = None
for rpsl_object_class in OBJECT_CLASS_MAPPING.values():
if common_fields is None:
common_fields = set(rpsl_object_class.fields.keys())
else:
common_fields = common_fields.intersection(set(rpsl_object_class.fields.keys()))
common_fields = list(common_fields)
common_fields = ['rpslPk', 'objectClass', 'objectText', 'updated'] + common_fields
common_field_dict = self._dict_for_common_fields(common_fields)
common_field_dict['journal'] = '[RPSLJournalEntry]'
schema = self._generate_schema_str('RPSLObject', 'interface', common_field_dict)
self.rpsl_object_interface_schema = schema
def _set_rpsl_contact_schema(self):
"""
Create the schema for RPSLContact. This contains shared fields between
RPSLPerson and RPSLRole, as they are so similar.
"""
common_fields = set(RPSLPerson.fields.keys()).intersection(set(RPSLRole.fields.keys()))
common_fields = common_fields.union({'rpslPk', 'objectClass', 'objectText', 'updated'})
common_field_dict = self._dict_for_common_fields(list(common_fields))
schema = self._generate_schema_str('RPSLContact', 'interface', common_field_dict)
self.rpsl_contact_schema = schema
def _dict_for_common_fields(self, common_fields: List[str]):
common_field_dict = OrderedDict()
for field_name in sorted(common_fields):
try:
# These fields are present in all relevant object, so this is a safe check
rpsl_field = RPSLPerson.fields[field_name]
graphql_type = self._graphql_type_for_rpsl_field(rpsl_field)
reference_name, reference_type = self._grapql_type_for_reference_field(
field_name, rpsl_field)
if reference_name and reference_type:
common_field_dict[reference_name] = reference_type
except KeyError:
graphql_type = 'String'
common_field_dict[snake_to_camel_case(field_name)] = graphql_type
return common_field_dict
def _set_rpsl_object_schemas(self):
"""
Create the schemas for each specific RPSL object class.
Each of these implements RPSLObject, and RPSLPerson/RPSLRole
implement RPSLContact as well.
"""
self.graphql_types = defaultdict(dict)
schemas = OrderedDict()
for object_class, klass in OBJECT_CLASS_MAPPING.items():
object_name = klass.__name__
graphql_fields = OrderedDict()
graphql_fields['rpslPk'] = 'String'
graphql_fields['objectClass'] = 'String'
graphql_fields['objectText'] = 'String'
graphql_fields['updated'] = 'String'
graphql_fields['journal'] = '[RPSLJournalEntry]'
for field_name, field in klass.fields.items():
graphql_type = self._graphql_type_for_rpsl_field(field)
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
self.graphql_types[snake_to_camel_case(object_name)][field_name] = graphql_type
reference_name, reference_type = self._grapql_type_for_reference_field(field_name, field)
if reference_name and reference_type:
graphql_fields[reference_name] = reference_type
self.graphql_types[object_name][reference_name] = reference_type
for field_name in klass.field_extracts:
if field_name.startswith('asn'):
graphql_type = 'ASN'
elif field_name == 'prefix':
graphql_type = 'IP'
elif field_name == 'prefix_length':
graphql_type = 'Int'
else:
graphql_type = 'String'
graphql_fields[snake_to_camel_case(field_name)] = graphql_type
if klass.rpki_relevant:
graphql_fields['rpkiStatus'] = 'RPKIStatus'
graphql_fields['rpkiMaxLength'] = 'Int'
self.graphql_types[object_name]['rpki_max_length'] = 'Int'
implements = 'RPSLContact & RPSLObject' if klass in [RPSLPerson, RPSLRole] else 'RPSLObject'
schema = self._generate_schema_str(object_name, 'type', graphql_fields, implements)
schemas[object_name] = schema
self.rpsl_object_schemas = schemas
def _graphql_type_for_rpsl_field(self, field: RPSLTextField) -> str:
"""
Return the GraphQL type for a regular RPSL field.
This is always a list of strings if the field is a list and/or
can occur multiple times.
"""
if RPSLFieldListMixin in field.__class__.__bases__ or field.multiple:
return '[String!]'
return 'String'
def _grapql_type_for_reference_field(self, field_name: str, rpsl_field: RPSLTextField) -> Tuple[Optional[str], Optional[str]]:
"""
Return the GraphQL name and type for a reference field.
For example, for a field "admin-c" that refers to person/role,
returns ('adminC', '[RPSLContactUnion!]').
Some fields are excluded because they are syntactical references,
not real references.
"""
if isinstance(rpsl_field, RPSLReferenceField) and getattr(rpsl_field, 'referring', None):
rpsl_field.resolve_references()
graphql_name = snake_to_camel_case(field_name) + 'Objs'
grapql_referring = set(rpsl_field.referring_object_classes)
if RPSLAutNum in grapql_referring:
grapql_referring.remove(RPSLAutNum)
if RPSLInetRtr in grapql_referring:
grapql_referring.remove(RPSLInetRtr)
if grapql_referring == {RPSLPerson, RPSLRole}:
graphql_type = '[RPSLContactUnion!]'
else:
graphql_type = '[' + grapql_referring.pop().__name__ + '!]'
return graphql_name, graphql_type
return None, None
def _generate_schema_str(self, name: str, graphql_type: str, fields: Dict[str, str], implements: Optional[str]=None) -> str:
"""
Generate a schema string for a given name, object type and dict of fields.
"""
schema = f'{graphql_type} {name} '
if implements:
schema += f'implements {implements} '
schema += '{\n'
for field, field_type in fields.items():
schema += f' {field}: {field_type}\n'
schema += '}\n\n'
return schema
| 43.822526 | 146 | 0.616745 | [
"BSD-2-Clause"
] | morrowc/irrd | irrd/server/graphql/schema_generator.py | 12,840 | Python |
"""
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.exec_action import ExecAction
from argo_workflows.model.http_get_action import HTTPGetAction
from argo_workflows.model.tcp_socket_action import TCPSocketAction
globals()['ExecAction'] = ExecAction
globals()['HTTPGetAction'] = HTTPGetAction
globals()['TCPSocketAction'] = TCPSocketAction
class LifecycleHandler(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'_exec': (ExecAction,), # noqa: E501
'http_get': (HTTPGetAction,), # noqa: E501
'tcp_socket': (TCPSocketAction,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_exec': 'exec', # noqa: E501
'http_get': 'httpGet', # noqa: E501
'tcp_socket': 'tcpSocket', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.007299 | 206 | 0.579366 | [
"Apache-2.0"
] | AnuragThePathak/argo-workflows | sdks/python/client/argo_workflows/model/lifecycle_handler.py | 12,058 | Python |
import grpc
import threading
import proto.connection_pb2_grpc
from libs.core.Log import Log
from libs.core.Switch import Switch
from libs.core.Event import Event
from libs.Configuration import Configuration
class SwitchConnection:
def __init__(self, grpc_address=None):
self.channel = grpc.insecure_channel(grpc_address)
self.stub = proto.connection_pb2_grpc.LocalServerStub(self.channel)
response = self.stub.Hello(proto.connection_pb2.HelloMessage(ip="127.0.0.1", port=int(Configuration.get('listen_port'))))
self.name = response.name.encode('utf-8')
Event.trigger('new_switch_connection',
name=self.name, device=Switch(name=self.name, ip=response.ip.encode('utf-8'), mac=response.mac.encode('utf-8'), bfr_id=response.bfr_id))
def addTableEntry(self, tableEntry=None):
"""
Add a table entry to the switch
"""
response = self.stub.AddEntry(tableEntry)
if response.code == 0:
Log.error("Error for entry:", tableEntry, "on switch", self.name)
def removeTableEntry(self, tableEntry=None):
"""
Remove a table entry from the switch
"""
response = self.stub.RemoveEntry(tableEntry)
if response.code == 0:
Log.error("Error while removing entry:", tableEntry, "on switch", self.name)
| 34.974359 | 158 | 0.674487 | [
"Apache-2.0"
] | qcz994/p4-bier | Controller-Implementation/libs/core/SwitchConnection.py | 1,364 | Python |
from torch import jit
from syft.execution.placeholder import PlaceHolder
from syft.execution.translation.abstract import AbstractPlanTranslator
class PlanTranslatorTorchscript(AbstractPlanTranslator):
"""Performs translation from 'list of ops' Plan into torchscript Plan"""
def __init__(self, plan):
super().__init__(plan)
def translate(self):
translation_plan = self.plan.copy()
translation_plan.forward = None
args = translation_plan.create_dummy_args()
# jit.trace clones input args and can change their type, so we have to skip types check
# TODO see if type check can be made less strict,
# e.g. tensor/custom tensor/nn.Parameter could be considered same type
translation_plan.validate_input_types = False
# To avoid storing Plan state tensors in torchscript, they will be sent as parameters
# we trace wrapper func, which accepts state parameters as last arg
# and sets them into the Plan before executing the Plan
def wrap_stateful_plan(*args):
role = translation_plan.role
state = args[-1]
if 0 < len(role.state.state_placeholders) == len(state) and isinstance(
state, (list, tuple)
):
state_placeholders = tuple(
role.placeholders[ph.id.value] for ph in role.state.state_placeholders
)
PlaceHolder.instantiate_placeholders(role.state.state_placeholders, state)
PlaceHolder.instantiate_placeholders(state_placeholders, state)
return translation_plan(*args[:-1])
plan_params = translation_plan.parameters()
if len(plan_params) > 0:
torchscript_plan = jit.trace(wrap_stateful_plan, (*args, plan_params))
else:
torchscript_plan = jit.trace(translation_plan, args)
self.plan.torchscript = torchscript_plan
return self.plan
def remove(self):
self.plan.torchscript = None
return self.plan
| 38.792453 | 95 | 0.664397 | [
"Apache-2.0"
] | NicoSerranoP/PySyft | syft/execution/translation/torchscript.py | 2,056 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado.gen
import bcrypt
__all__ = ["create_new_user"]
@tornado.gen.coroutine
def get_next_id(db, collection):
counter = yield db.counters.find_and_modify(
{"_id": "{}id".format(collection)},
{"$inc": {"seq": 1}},
new=True,
)
raise tornado.gen.Return(counter["seq"])
@tornado.gen.coroutine
def create_new_user(db, email, password, group):
password = bcrypt.hashpw(password.encode(), bcrypt.gensalt(8))
id = yield get_next_id(db, "user")
yield db.users.insert({
"_id": id, "email": email, "hash": password, "group": group})
| 23.703704 | 69 | 0.635938 | [
"MIT"
] | ilkerkesen/trebol | trebol/interface.py | 640 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import sys
import numpy as np
import pandas as pd
def run(args):
data = pd.read_csv(sys.stdin)
# Find maximum rank value and increase by one to use as a fill_value
# on the pivot with cluster by day
# notfound_value = grouped['rank'].max()+1
# #create pivot table and fill non existing with high number i.e:200
pivot = pd.pivot_table(data,
values='rank',
index='Cluster ID',
columns=['day'],
fill_value=args.notfound_value,
aggfunc=np.sum)
# Write output
pivot.to_csv(sys.stdout)
if __name__ == '__main__':
# Parse command-line arguments.
parser = argparse.ArgumentParser(
description="Pivot table by cluster and day of the poll")
parser.add_argument('--notfound_value',
type=int,
help="value to assign to N/A values on pivot table",
required=True)
args = parser.parse_args()
run(args)
| 29.526316 | 76 | 0.564171 | [
"MIT"
] | isabella232/allsongsconsidered-poll | scripts/pivot_cluster_day.py | 1,122 | Python |
import sys
import random
from collections import deque
def printGrid(grid, wallChar, emptyChar):
finalstr = ""
finalstr += "\n"
for i in range(len(grid[0])):
for j in range(len(grid)):
if grid[j][i]==1:
finalstr += wallChar
else:
finalstr += emptyChar
finalstr += "\n"
finalstr += "\n"
print(finalstr)
def makeGrid(width, height):
newgrid = [[0 for x in range(height)] for y in range(width)]
for i in range(len(newgrid)):
for j in range(len(newgrid[i])):
if i==0 or j==0 or i==len(newgrid)-1 or j==len(newgrid[0])-1:
newgrid[i][j]=1
return newgrid
def populateGrid(grid, chance):
for i in range(len(grid)): # reminder to test with: for index, value in enumerate(grid)
for j in range(len(grid[0])):
if(random.randint(0,100)<=chance): # test with list comprehension instead??
grid[i][j]=1
return grid
def automataIteration(grid, minCount, makePillars):
new_grid = [row[:] for row in grid]
for i in range(1, len(grid)-1):
for j in range(1, len(grid[0])-1):
count = 0
for k in range(-1,2):
for l in range(-1,2):
if grid[i+k][j+l]==1:
count+=1
if count>=minCount or (count==0 and makePillars==1):
new_grid[i][j]=1
else:
new_grid[i][j]=0
return new_grid
def floodFindEmpty(grid, tries, goal):
times_remade = 0
percentage = 0
while times_remade<tries and percentage<goal:
copy_grid = [row[:] for row in grid]
open_count = 0
times_remade+=1
unvisited = deque([])
new_grid = [[1 for x in range(len(grid[0]))] for y in range(len(grid))]
#find a random empty space, hope it's the biggest cave
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
while(grid[randx][randy] == 1):
randx = random.randint(0,len(grid)-1)
randy = random.randint(0,len(grid[0])-1)
unvisited.append([randx, randy])
while len(unvisited)>0:
current = unvisited.popleft()
new_grid[current[0]][current[1]] = 0
for k in range(-1,2):
for l in range(-1,2):
if current[0]+k >= 0 and current[0]+k<len(grid) and current[1]+l >= 0 and current[1]+l < len(grid[0]): #if we're not out of bounds
if copy_grid[current[0]+k][current[1]+l]==0: #if it's an empty space
copy_grid[current[0]+k][current[1]+l]=2 #mark visited
open_count += 1
unvisited.append([current[0]+k, current[1]+l])
percentage = open_count*100/(len(grid)*len(grid[0]))
print("counted {0}, {1}%...".format(open_count,percentage))
return new_grid, percentage
def main():
width = int(input("Enter the width: "))
height = int(input("Enter the height: "))
#chance = 100 - int(input("Enter the percentage chance of randomly generating a wall: "))
#count = int(input("Enter the min count of surrounding walls for the automata rules: "))
chance = 40
count = 5
iterations = int(input("Enter the number of regular iterations: "))
pillarIterations = int(input("Enter the number of pillar-generating iterations: "))
floodTries = 5
goalPercentage = 30 # above 30% seems to be a good target
grid = makeGrid(width, height)
print("\nRandomly populated grid:")
grid = populateGrid(grid, chance)
printGrid(grid, '# ', '· ')
for i in range(pillarIterations):
print("{0} iteration(s) of automata with pillars:".format(i+1))
grid = automataIteration(grid, count, 1)
printGrid(grid, '# ', '· ')
for i in range(iterations):
print("{0} iteration(s) of regular automata:".format(i+1))
grid = automataIteration(grid, count, 0)
printGrid(grid, '# ', '· ')
print("\nAfter flood algorithm to find the biggest cave:")
grid, percentage = floodFindEmpty(grid, floodTries, goalPercentage)
if percentage<goalPercentage:
print("Failed to produce a big enough cave after {0} tries...".format(floodTries))
else:
print("Percentage of open space: {0}%".format(percentage))
printGrid(grid, '# ', '· ')
# self reminder to try checking map size https://stackoverflow.com/questions/1331471/in-memory-size-of-a-python-structure
print("")
main()
if __name__ == "__main__":
main()
| 37.532258 | 150 | 0.572841 | [
"MIT"
] | nmmarzano/CellularCaves.py | cellularcaves.py | 4,658 | Python |
from pools import PoolTest
import eventlet
class EventletPool(PoolTest):
def init_pool(self, worker_count):
return eventlet.GreenPool(worker_count)
def map(self, work_func, inputs):
return self.pool.imap(work_func, inputs)
def init_network_resource(self):
return eventlet.import_patched('requests').Session
| 24.785714 | 58 | 0.731988 | [
"MIT"
] | JohnStarich/python-pool-performance | pools/eventlet.py | 347 | Python |
## ********Day 55 Start**********
## Advanced Python Decorator Functions
class User:
def __init__(self, name):
self.name = name
self.is_logged_in = False
def is_authenticated_decorator(function):
def wrapper(*args, **kwargs):
if args[0].is_logged_in == True:
function(args[0])
return wrapper
@is_authenticated_decorator
def create_blog_post(user):
print(f"This is {user.name}'s new blog post.")
new_user = User("Edgar")
new_user.is_logged_in = True
create_blog_post(new_user) | 24.181818 | 50 | 0.667293 | [
"MIT"
] | ecanro/100DaysOfCode_Python | Day_55/sandbox.py | 532 | Python |
import json
def get_text_from_json(file):
with open(file) as f:
json_text = f.read()
text = json.loads(json_text)
return text
content = get_text_from_json('content.json')
test = get_text_from_json('test.json') | 20.083333 | 44 | 0.672199 | [
"MIT"
] | mell-old/echo-bot | json_content.py | 241 | Python |
#!/usr/bin/env python
import io
import sys
from datetime import datetime
# To make sure all packet types are available
import scapy.all # noqa
import scapy.packet
from scapy.layers.l2 import Ether
import pcapng
from pcapng.blocks import EnhancedPacket, InterfaceDescription, SectionHeader
def col256(text, fg=None, bg=None, bold=False):
def _get_color(col):
return "8;5;{0:d}".format(_to_color(col))
def _to_color(num):
if isinstance(num, int):
return num # Assume it is already a color
if isinstance(num, str) and len(num) <= 3:
return 16 + int(num, 6)
raise ValueError("Invalid color: {0!r}".format(num))
if not isinstance(text, str):
text = repr(text)
buf = io.StringIO()
if bold:
buf.write("\x1b[1m")
if fg is not None:
buf.write("\x1b[3{0}m".format(_get_color(fg)))
if bg is not None:
buf.write("\x1b[4{0}m".format(_get_color(bg)))
buf.write(text)
buf.write("\x1b[0m")
return buf.getvalue()
def dump_information(scanner):
for block in scanner:
if isinstance(block, SectionHeader):
pprint_sectionheader(block)
elif isinstance(block, InterfaceDescription):
pprint_interfacedesc(block)
elif isinstance(block, EnhancedPacket):
pprint_enhanced_packet(block)
else:
print(" " + str(block))
def pprint_options(options):
if len(options):
yield "--"
for key, values in options.iter_all_items():
for value in values:
yield col256(key + ":", bold=True, fg="453")
yield col256(str(value), fg="340")
def pprint_sectionheader(block):
endianness_desc = {
"<": "Little endian",
">": "Big endian",
"!": "Network (Big endian)",
"=": "Native",
}
text = [
col256(" Section ", bg="400", fg="550"),
col256("version:", bold=True),
col256(".".join(str(x) for x in block.version), fg="145"),
# col256('endianness:', bold=True),
"-",
col256(endianness_desc.get(block.endianness, "Unknown endianness"), bold=True),
"-",
]
if block.length < 0:
text.append(col256("unspecified size", bold=True))
else:
text.append(col256("length:", bold=True))
text.append(col256(str(block.length), fg="145"))
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_interfacedesc(block):
text = [
col256(" Interface #{0} ".format(block.interface_id), bg="010", fg="453"),
col256("Link type:", bold=True),
col256(str(block.link_type), fg="140"),
col256(block.link_type_description, fg="145"),
col256("Snap length:", bold=True),
col256(str(block.snaplen), fg="145"),
]
text.extend(pprint_options(block.options))
print(" ".join(text))
def pprint_enhanced_packet(block):
text = [
col256(" Packet+ ", bg="001", fg="345"),
# col256('NIC:', bold=True),
# col256(str(block.interface_id), fg='145'),
col256(str(block.interface.options["if_name"]), fg="140"),
col256(
str(
datetime.utcfromtimestamp(block.timestamp).strftime("%Y-%m-%d %H:%M:%S")
),
fg="455",
),
]
try:
text.extend(
[
col256("NIC:", bold=True),
col256(block.interface_id, fg="145"),
col256(block.interface.options["if_name"], fg="140"),
]
)
except KeyError:
pass
text.extend(
[
# col256('Size:', bold=True),
col256(str(block.packet_len) + " bytes", fg="025")
]
)
if block.captured_len != block.packet_len:
text.extend(
[
col256("Truncated to:", bold=True),
col256(str(block.captured_len) + "bytes", fg="145"),
]
)
text.extend(pprint_options(block.options))
print(" ".join(text))
if block.interface.link_type == 1:
# print(repr(block.packet_data))
# print(col256(repr(Ether(block.packet_data)), fg='255'))
_info = format_packet_information(block.packet_data)
print("\n".join(" " + line for line in _info))
else:
print(" Printing information for non-ethernet packets")
print(" is not supported yet.")
# print('\n'.join(' ' + line
# for line in format_binary_data(block.packet_data)))
def format_packet_information(packet_data):
decoded = Ether(packet_data)
return format_scapy_packet(decoded)
def format_scapy_packet(packet):
fields = []
for f in packet.fields_desc:
# if isinstance(f, ConditionalField) and not f._evalcond(self):
# continue
if f.name in packet.fields:
val = f.i2repr(packet, packet.fields[f.name])
elif f.name in packet.overloaded_fields:
val = f.i2repr(packet, packet.overloaded_fields[f.name])
else:
continue
fields.append("{0}={1}".format(col256(f.name, "542"), col256(val, "352")))
yield "{0} {1}".format(col256(packet.__class__.__name__, "501"), " ".join(fields))
if packet.payload:
if isinstance(packet.payload, scapy.packet.Raw):
raw_data = str(packet.payload)
for line in make_printable(raw_data).splitlines():
yield " " + line
# for line in format_binary_data(raw_data):
# yield ' ' + line
elif isinstance(packet.payload, scapy.packet.Packet):
for line in format_scapy_packet(packet.payload):
yield " " + line
else:
for line in repr(packet.payload).splitlines():
yield " " + line
def make_printable(data): # todo: preserve unicode
stream = io.StringIO()
for ch in data:
if ch == "\\":
stream.write("\\\\")
elif ch in "\n\r" or (32 <= ord(ch) <= 126):
stream.write(ch)
else:
stream.write("\\x{0:02x}".format(ord(ch)))
return stream.getvalue()
def format_binary_data(data):
stream = io.BytesIO(data)
row_offset = 0
row_size = 16 # bytes
while True:
data = stream.read(row_size)
if not data:
return
hexrow = io.BytesIO()
asciirow = io.BytesIO()
for i, byte in enumerate(data):
if 32 <= ord(byte) <= 126:
asciirow.write(byte)
else:
asciirow.write(".")
hexrow.write(format(ord(byte), "02x"))
if i < 15:
if i % 2 == 1:
hexrow.write(" ")
if i % 8 == 7:
hexrow.write(" ")
row_offset += 1
yield "{0:08x}: {1:40s} {2:16s}".format(
row_offset, hexrow.getvalue(), asciirow.getvalue()
)
def main():
if (len(sys.argv) > 1) and (sys.argv[1] != "-"):
with open(sys.argv[1], "rb") as fp:
scanner = pcapng.FileScanner(fp)
dump_information(scanner)
else:
scanner = pcapng.FileScanner(sys.stdin)
dump_information(scanner)
if __name__ == "__main__":
main()
| 28.015209 | 88 | 0.549946 | [
"Apache-2.0"
] | dieter-exc/python-pcapng | examples/dump_pcapng_info_pretty.py | 7,368 | Python |
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, LSTM
from tensorflow.keras.layers import Dropout, TimeDistributed
try:
from tensorflow.python.keras.layers import CuDNNLSTM as lstm
except:
from tensorflow.keras.layers import Dense, Activation, LSTM as lstm
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import load_model as lm
import numpy as np
import random
import sys
import io
from midi import Midi
class Model:
def create(self, size, unique_notes, optimizer=None, hidden_size=128):
self.model = Sequential()
self.model.add(lstm(hidden_size, input_shape=(
size, unique_notes), return_sequences=True))
self.model.add(lstm(hidden_size))
self.model.add(Dropout(0.2))
self.model.add(Dense(unique_notes))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer=RMSprop(
lr=0.01) if optimizer == None else optimizer)
def load_from_file(self, name="model.h5"):
self.model = lm(name)
def save_to_file(self, name="model.h5"):
self.model.save(name)
def learn(self, inputs, outputs, batch_size=256, epochs=185):
self.model.fit(inputs, outputs,
batch_size=batch_size,
epochs=epochs, verbose=True)
def predict(self, arr):
return self.model.predict(arr)
| 34.733333 | 78 | 0.706974 | [
"MIT"
] | Wowol/Piano-Bot | model.py | 1,563 | Python |
# -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2013-2018
# - Cedric Serfon <[email protected]>, 2013-2020
# - Ralph Vigne <[email protected]>, 2013-2014
# - Martin Barisits <[email protected]>, 2013-2021
# - Mario Lassnig <[email protected]>, 2014-2021
# - David Cameron <[email protected]>, 2014
# - Thomas Beermann <[email protected]>, 2014-2021
# - Wen Guan <[email protected]>, 2014-2015
# - Hannes Hansen <[email protected]>, 2018-2019
# - Dimitrios Christidis <[email protected]>, 2019-2021
# - Robert Illingworth <[email protected]>, 2019
# - James Perry <[email protected]>, 2019
# - Jaroslav Guenther <[email protected]>, 2019
# - Andrew Lister <[email protected]>, 2019
# - Ilija Vukotic <[email protected]>, 2020-2021
# - Brandon White <[email protected]>, 2019
# - Tomas Javurek <[email protected]>, 2020
# - Luc Goossens <[email protected]>, 2020
# - Eli Chadwick <[email protected]>, 2020
# - Patrick Austin <[email protected]>, 2020
# - Eric Vaandering <[email protected]>, 2020-2021
# - Benedikt Ziemons <[email protected]>, 2020-2021
# - Radu Carpa <[email protected]>, 2021
# - Gabriele Fronzé <[email protected]>, 2021
from __future__ import print_function
import heapq
import logging
import random
from collections import defaultdict
from copy import deepcopy
from curses.ascii import isprint
from datetime import datetime, timedelta
from hashlib import sha256
from json import dumps
from re import match
from struct import unpack
from traceback import format_exc
import requests
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE
from six import string_types
from sqlalchemy import func, and_, or_, exists, not_
from sqlalchemy.exc import DatabaseError, IntegrityError
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import FlushError, NoResultFound
from sqlalchemy.sql import label
from sqlalchemy.sql.expression import case, select, text, false, true
import rucio.core.did
import rucio.core.lock
from rucio.common import exception
from rucio.common.types import InternalScope
from rucio.common.utils import chunks, clean_surls, str_to_date, add_url_query
from rucio.core.config import get as config_get
from rucio.core.credential import get_signed_url
from rucio.core.rse import get_rse, get_rse_name, get_rse_attribute, get_rse_vo, list_rses
from rucio.core.rse_counter import decrease, increase
from rucio.core.rse_expression_parser import parse_expression
from rucio.db.sqla import models, filter_thread_work
from rucio.db.sqla.constants import (DIDType, ReplicaState, OBSOLETE, DIDAvailability,
BadFilesStatus, RuleState, BadPFNStatus)
from rucio.db.sqla.session import (read_session, stream_session, transactional_session,
DEFAULT_SCHEMA_NAME, BASE)
from rucio.rse import rsemanager as rsemgr
REGION = make_region().configure('dogpile.cache.memory', expiration_time=60)
@read_session
def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None):
"""
List the bad file replicas summary. Method used by the rucio-ui.
:param rse_expression: The RSE expression.
:param from_date: The start date.
:param to_date: The end date.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param session: The database session in use.
"""
result = []
incidents = {}
rse_clause = []
if rse_expression:
for rse in parse_expression(expression=rse_expression, filter=filter, session=session):
rse_clause.append(models.BadReplicas.rse_id == rse['id'])
elif filter:
# Ensure we limit results to current VO even if we don't specify an RSE expression
for rse in list_rses(filters=filter, session=session):
rse_clause.append(models.BadReplicas.rse_id == rse['id'])
if session.bind.dialect.name == 'oracle':
to_days = func.trunc(models.BadReplicas.created_at, str('DD'))
elif session.bind.dialect.name == 'mysql':
to_days = func.date(models.BadReplicas.created_at)
elif session.bind.dialect.name == 'postgresql':
to_days = func.date_trunc('day', models.BadReplicas.created_at)
else:
to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d')
query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason)
# To be added : HINTS
if rse_clause != []:
query = query.filter(or_(*rse_clause))
if from_date:
query = query.filter(models.BadReplicas.created_at > from_date)
if to_date:
query = query.filter(models.BadReplicas.created_at < to_date)
summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all()
for row in summary:
if (row[2], row[1], row[4]) not in incidents:
incidents[(row[2], row[1], row[4])] = {}
incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0]
for incident in incidents:
res = incidents[incident]
res['rse_id'] = incident[0]
res['rse'] = get_rse_name(rse_id=incident[0], session=session)
res['created_at'] = incident[1]
res['reason'] = incident[2]
result.append(res)
return result
@read_session
def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None):
"""
Internal method to check if a replica exists at a given site.
:param rse_id: The RSE id.
:param scope: The scope of the file.
:param name: The name of the file.
:param path: The path of the replica.
:param session: The database session in use.
"""
already_declared = False
if path:
path_clause = [models.RSEFileAssociation.path == path]
if path.startswith('/'):
path_clause.append(models.RSEFileAssociation.path == path[1:])
else:
path_clause.append(models.RSEFileAssociation.path == '/%s' % path)
query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\
with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle').\
filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*path_clause))
else:
query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).\
filter_by(rse_id=rse_id, scope=scope, name=name)
if query.count():
result = query.first()
path, scope, name, rse_id, size = result
# Now we check that the replica is not already declared bad
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).\
filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD)
if query.count():
already_declared = True
return True, scope, name, already_declared, size
else:
return False, None, None, already_declared, None
@read_session
def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None):
"""
List the bad file replicas history states. Method used by the rucio-ui.
:param state: The state of the file (SUSPICIOUS or BAD).
:param rse_id: The RSE id.
:param younger_than: datetime object to select bad replicas younger than this date.
:param older_than: datetime object to select bad replicas older than this date.
:param limit: The maximum number of replicas returned.
:param vo: The VO to find replicas from.
:param session: The database session in use.
"""
result = []
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at)
if state:
query = query.filter(models.BadReplicas.state == state)
if rse_id:
query = query.filter(models.BadReplicas.rse_id == rse_id)
if younger_than:
query = query.filter(models.BadReplicas.created_at >= younger_than)
if older_than:
query = query.filter(models.BadReplicas.created_at <= older_than)
if limit:
query = query.limit(limit)
for badfile in query.yield_per(1000):
if badfile.scope.vo == vo:
if list_pfns:
result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE})
else:
result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at})
if list_pfns:
reps = []
for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session):
pfn = None
if rse_id in rep['rses'] and rep['rses'][rse_id]:
pfn = rep['rses'][rse_id][0]
if pfn and pfn not in reps:
reps.append(pfn)
else:
reps.extend([item for row in rep['rses'].values() for item in row])
list(set(reps))
result = reps
return result
@read_session
def list_bad_replicas_history(limit=10000, thread=None, total_threads=None, session=None):
"""
List the bad file replicas history. Method only used by necromancer
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
"""
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\
filter(models.BadReplicas.state == BadFilesStatus.BAD)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='name')
query = query.limit(limit)
bad_replicas = {}
for scope, name, rse_id in query.yield_per(1000):
if rse_id not in bad_replicas:
bad_replicas[rse_id] = []
bad_replicas[rse_id].append({'scope': scope, 'name': name})
return bad_replicas
@transactional_session
def update_bad_replicas_history(dids, rse_id, session=None):
"""
Update the bad file replicas history. Method only used by necromancer
:param dids: The list of DIDs.
:param rse_id: The rse_id.
:param session: The database session in use.
"""
for did in dids:
# Check if the replica is still there
try:
result = session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name']).one()
state = result.state
if state == ReplicaState.AVAILABLE:
# If yes, and replica state is AVAILABLE, update BadReplicas
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.RECOVERED, 'updated_at': datetime.utcnow()}, synchronize_session=False)
elif state != ReplicaState.BAD:
# If the replica state is not AVAILABLE check if other replicas for the same file are still there.
try:
session.query(models.RSEFileAssociation.state).filter_by(rse_id=rse_id, scope=did['scope'], name=did['name'], state=ReplicaState.AVAILABLE).one()
except NoResultFound:
# No replicas are available for this file. Reset the replica state to BAD
update_replicas_states([{'scope': did['scope'], 'name': did['name'], 'rse_id': rse_id, 'state': ReplicaState.BAD}], session=session)
session.query(models.Source).filter_by(scope=did['scope'], name=did['name'], rse_id=rse_id).delete(synchronize_session=False)
else:
# Here that means that the file has not been processed by the necro. Just pass
pass
except NoResultFound:
# We end-up here if the replica is not registered anymore on the RSE
try:
result = session.query(models.DataIdentifier.availability).filter_by(scope=did['scope'], name=did['name'], did_type=DIDType.FILE).one()
# If yes, the final state depends on DIDAvailability
state = result.availability
final_state = None
if state == DIDAvailability.LOST:
final_state = BadFilesStatus.LOST
elif state == DIDAvailability.DELETED:
final_state = BadFilesStatus.DELETED
elif state == DIDAvailability.AVAILABLE:
final_state = BadFilesStatus.DELETED
else:
# For completness, it shouldn't happen.
print('Houston we have a problem.')
final_state = BadFilesStatus.DELETED
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': final_state, 'updated_at': datetime.utcnow()}, synchronize_session=False)
except NoResultFound:
# If no, the replica is marked as LOST in BadFilesStatus
query = session.query(models.BadReplicas).filter_by(state=BadFilesStatus.BAD, rse_id=rse_id, scope=did['scope'], name=did['name'])
query.update({'state': BadFilesStatus.LOST, 'updated_at': datetime.utcnow()}, synchronize_session=False)
@transactional_session
def __declare_bad_file_replicas(pfns, rse_id, reason, issuer, status=BadFilesStatus.BAD, scheme='srm', session=None):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: Either BAD or SUSPICIOUS.
:param scheme: The scheme of the PFNs.
:param session: The database session in use.
"""
unknown_replicas = []
declared_replicas = []
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
replicas = []
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
# WARNING : this part is ATLAS specific and must be changed
path = parsed_pfn[pfn]['path']
if path.startswith('/user') or path.startswith('/group'):
scope = '%s.%s' % (path.split('/')[1], path.split('/')[2])
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo=issuer.vo)
__exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None, session=session)
if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
else:
if already_declared:
unknown_replicas.append('%s %s' % (pfn, 'Already declared'))
else:
no_hidden_char = True
for char in str(pfn):
if not isprint(char):
unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars'))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append('%s %s' % (pfn, 'Unknown replica'))
if status == BadFilesStatus.BAD:
# For BAD file, we modify the replica state, not for suspicious
try:
# there shouldn't be any exceptions since all replicas exist
update_replicas_states(replicas, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
else:
path_clause = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name'])
__exists, scope, name, already_declared, size = __exists_replicas(rse_id, scope=None, name=None, path=path, session=session)
if __exists and ((status == BadFilesStatus.BAD and not already_declared) or status == BadFilesStatus.SUSPICIOUS):
replicas.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=status, account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name, rse_id=rse_id).delete(synchronize_session=False)
declared_replicas.append(pfn)
path_clause.append(models.RSEFileAssociation.path == path)
if path.startswith('/'):
path_clause.append(models.RSEFileAssociation.path == path[1:])
else:
path_clause.append(models.RSEFileAssociation.path == '/%s' % path)
else:
if already_declared:
unknown_replicas.append('%s %s' % (pfn, 'Already declared'))
else:
no_hidden_char = True
for char in str(pfn):
if not isprint(char):
unknown_replicas.append('%s %s' % (pfn, 'PFN contains hidden chars'))
no_hidden_char = False
break
if no_hidden_char:
unknown_replicas.append('%s %s' % (pfn, 'Unknown replica'))
if status == BadFilesStatus.BAD and declared_replicas != []:
# For BAD file, we modify the replica state, not for suspicious
query = session.query(models.RSEFileAssociation) \
.with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_PATH_IDX", 'oracle') \
.filter(models.RSEFileAssociation.rse_id == rse_id) \
.filter(or_(*path_clause))
rowcount = query.update({'state': ReplicaState.BAD})
if rowcount != len(declared_replicas):
# there shouldn't be any exceptions since all replicas exist
print(rowcount, len(declared_replicas), declared_replicas)
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
raise exception.RucioException(error.args)
return unknown_replicas
@transactional_session
def add_bad_dids(dids, rse_id, reason, issuer, state=BadFilesStatus.BAD, session=None):
"""
Declare a list of bad replicas.
:param dids: The list of DIDs.
:param rse_id: The RSE id.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param state: BadFilesStatus.BAD
:param session: The database session in use.
"""
unknown_replicas = []
replicas_for_update = []
for did in dids:
scope = InternalScope(did['scope'], vo=issuer.vo)
name = did['name']
replica_exists, _scope, _name, already_declared, size = __exists_replicas(rse_id, scope, name, path=None,
session=session)
if replica_exists and not already_declared:
replicas_for_update.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'state': ReplicaState.BAD})
new_bad_replica = models.BadReplicas(scope=scope, name=name, rse_id=rse_id, reason=reason, state=state,
account=issuer, bytes=size)
new_bad_replica.save(session=session, flush=False)
session.query(models.Source).filter_by(scope=scope, name=name,
rse_id=rse_id).delete(synchronize_session=False)
else:
if already_declared:
unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Already declared'))
else:
unknown_replicas.append('%s:%s %s' % (did['scope'], name, 'Unknown replica'))
if state == BadFilesStatus.BAD:
try:
update_replicas_states(replicas_for_update, session=session)
except exception.UnsupportedOperation:
raise exception.ReplicaNotFound("One or several replicas don't exist.")
try:
session.flush()
except (IntegrityError, DatabaseError, FlushError) as error:
raise exception.RucioException(error.args)
return unknown_replicas
@transactional_session
def declare_bad_file_replicas(pfns, reason, issuer, status=BadFilesStatus.BAD, session=None):
"""
Declare a list of bad replicas.
:param pfns: The list of PFNs.
:param reason: The reason of the loss.
:param issuer: The issuer account.
:param status: The status of the file (SUSPICIOUS or BAD).
:param session: The database session in use.
"""
scheme, files_to_declare, unknown_replicas = get_pfn_to_rse(pfns, vo=issuer.vo, session=session)
for rse_id in files_to_declare:
notdeclared = __declare_bad_file_replicas(files_to_declare[rse_id], rse_id, reason, issuer, status=status, scheme=scheme, session=session)
if notdeclared:
unknown_replicas[rse_id] = notdeclared
return unknown_replicas
@read_session
def get_pfn_to_rse(pfns, vo='def', session=None):
"""
Get the RSE associated to a list of PFNs.
:param pfns: The list of pfn.
:param vo: The VO to find RSEs at.
:param session: The database session in use.
:returns: a tuple : scheme, {rse1 : [pfn1, pfn2, ...], rse2: [pfn3, pfn4, ...]}, {'unknown': [pfn5, pfn6, ...]}.
"""
unknown_replicas = {}
storage_elements = []
se_condition = []
dict_rse = {}
surls = clean_surls(pfns)
scheme = surls[0].split(':')[0] if surls else None
for surl in surls:
if surl.split(':')[0] != scheme:
raise exception.InvalidType('The PFNs specified must have the same protocol')
split_se = surl.split('/')[2].split(':')
storage_element = split_se[0]
if storage_element not in storage_elements:
storage_elements.append(storage_element)
se_condition.append(models.RSEProtocols.hostname == storage_element)
query = session.query(models.RSEProtocols.rse_id, models.RSEProtocols.scheme, models.RSEProtocols.hostname, models.RSEProtocols.port, models.RSEProtocols.prefix).\
filter(and_(or_(*se_condition), models.RSEProtocols.scheme == scheme)).filter(models.RSE.staging_area == false())
protocols = {}
for rse_id, protocol, hostname, port, prefix in query.yield_per(10000):
protocols[rse_id] = ('%s://%s%s' % (protocol, hostname, prefix), '%s://%s:%s%s' % (protocol, hostname, port, prefix))
hint = None
for surl in surls:
if hint and (surl.find(protocols[hint][0]) > -1 or surl.find(protocols[hint][1]) > -1):
dict_rse[hint].append(surl)
else:
mult_rse_match = 0
for rse_id in protocols:
if (surl.find(protocols[rse_id][0]) > -1 or surl.find(protocols[rse_id][1]) > -1) and get_rse_vo(rse_id=rse_id, session=session) == vo:
mult_rse_match += 1
if mult_rse_match > 1:
print('ERROR, multiple matches : %s at %s' % (surl, rse_id))
raise exception.RucioException('ERROR, multiple matches : %s at %s' % (surl, get_rse_name(rse_id=rse_id, session=session)))
hint = rse_id
if hint not in dict_rse:
dict_rse[hint] = []
dict_rse[hint].append(surl)
if mult_rse_match == 0:
if 'unknown' not in unknown_replicas:
unknown_replicas['unknown'] = []
unknown_replicas['unknown'].append(surl)
return scheme, dict_rse, unknown_replicas
@read_session
def list_bad_replicas(limit=10000, thread=None, total_threads=None, session=None):
"""
List RSE File replicas with no locks.
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this necromancer.
:param total_threads: The total number of threads of all necromancers.
:param session: The database session in use.
:returns: a list of dictionary {'scope' scope, 'name': name, 'rse_id': rse_id, 'rse': rse}.
"""
schema_dot = '%s.' % DEFAULT_SCHEMA_NAME if DEFAULT_SCHEMA_NAME else ''
if session.bind.dialect.name == 'oracle':
# The filter(text...)) is needed otherwise, SQLA uses bind variables and the index is not used.
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.rse_id).\
with_hint(models.RSEFileAssociation, "+ index(replicas REPLICAS_STATE_IDX)", 'oracle').\
filter(text("CASE WHEN (%sreplicas.state != 'A') THEN %sreplicas.rse_id END IS NOT NULL" % (schema_dot,
schema_dot))). \
filter(models.RSEFileAssociation.state == ReplicaState.BAD)
else:
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.rse_id).\
filter(models.RSEFileAssociation.state == ReplicaState.BAD)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='%sreplicas.name' % (schema_dot))
query = query.join(models.DataIdentifier,
and_(models.DataIdentifier.scope == models.RSEFileAssociation.scope,
models.DataIdentifier.name == models.RSEFileAssociation.name)).\
filter(models.DataIdentifier.availability != DIDAvailability.LOST)
query = query.limit(limit)
rows = []
for scope, name, rse_id in query.yield_per(1000):
rows.append({'scope': scope, 'name': name, 'rse_id': rse_id, 'rse': get_rse_name(rse_id=rse_id, session=session)})
return rows
@stream_session
def get_did_from_pfns(pfns, rse_id=None, vo='def', session=None):
"""
Get the DIDs associated to a PFN on one given RSE
:param pfns: The list of PFNs.
:param rse_id: The RSE id.
:param vo: The VO to get DIDs from.
:param session: The database session in use.
:returns: A dictionary {pfn: {'scope': scope, 'name': name}}
"""
dict_rse = {}
if not rse_id:
scheme, dict_rse, unknown_replicas = get_pfn_to_rse(pfns, vo=vo, session=session)
if unknown_replicas:
raise Exception
else:
scheme = 'srm'
dict_rse[rse_id] = pfns
for rse_id in dict_rse:
pfns = dict_rse[rse_id]
rse_info = rsemgr.get_rse_info(rse_id=rse_id, session=session)
pfndict = {}
proto = rsemgr.create_protocol(rse_info, 'read', scheme=scheme)
if rse_info['deterministic']:
parsed_pfn = proto.parse_pfns(pfns=pfns)
# WARNING : this part is ATLAS specific and must be changed
for pfn in parsed_pfn:
path = parsed_pfn[pfn]['path']
if path.startswith('/user') or path.startswith('/group'):
scope = '%s.%s' % (path.split('/')[1], path.split('/')[2])
name = parsed_pfn[pfn]['name']
elif path.startswith('/'):
scope = path.split('/')[1]
name = parsed_pfn[pfn]['name']
else:
scope = path.split('/')[0]
name = parsed_pfn[pfn]['name']
scope = InternalScope(scope, vo)
yield {pfn: {'scope': scope, 'name': name}}
else:
condition = []
parsed_pfn = proto.parse_pfns(pfns=pfns)
for pfn in parsed_pfn:
path = '%s%s' % (parsed_pfn[pfn]['path'], parsed_pfn[pfn]['name'])
pfndict[path] = pfn
condition.append(and_(models.RSEFileAssociation.path == path, models.RSEFileAssociation.rse_id == rse_id))
for scope, name, pfn in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.path).filter(or_(*condition)):
yield {pfndict[pfn]: {'scope': scope, 'name': name}}
def _resolve_dids(dids, unavailable, ignore_availability, all_states, resolve_archives, session):
"""
Resolve list of DIDs into a list of conditions.
:param dids: The list of data identifiers (DIDs).
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param session: The database session in use.
"""
did_clause, dataset_clause, file_clause, constituent_clause = [], [], [], []
# Accumulate all the dids which were requested explicitly (not via a container/dataset).
# If any replicas for these dids will be found latter, the associated did will be removed from the list,
# leaving, at the end, only the requested dids which didn't have any replicas at all.
files_wo_replica = []
for did in [dict(tupleized) for tupleized in set(tuple(item.items()) for item in dids)]:
if 'type' in did and did['type'] in (DIDType.FILE, DIDType.FILE.value) or 'did_type' in did and did['did_type'] in (DIDType.FILE, DIDType.FILE.value): # pylint: disable=no-member
files_wo_replica.append({'scope': did['scope'], 'name': did['name']})
file_clause.append(and_(models.RSEFileAssociation.scope == did['scope'],
models.RSEFileAssociation.name == did['name']))
else:
did_clause.append(and_(models.DataIdentifier.scope == did['scope'],
models.DataIdentifier.name == did['name']))
if did_clause:
for scope, name, did_type, constituent in session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.did_type,
models.DataIdentifier.constituent)\
.with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle')\
.filter(or_(*did_clause)):
if resolve_archives and constituent:
constituent_clause.append(and_(models.ConstituentAssociation.child_scope == scope,
models.ConstituentAssociation.child_name == name))
if did_type == DIDType.FILE:
files_wo_replica.append({'scope': scope, 'name': name})
file_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name))
elif did_type == DIDType.DATASET:
dataset_clause.append(and_(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name))
else: # Container
content_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.child_type)
content_query = content_query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle')
child_dids = [(scope, name)]
while child_dids:
s, n = child_dids.pop()
for tmp_did in content_query.filter_by(scope=s, name=n):
if tmp_did.child_type == DIDType.DATASET:
dataset_clause.append(and_(models.DataIdentifierAssociation.scope == tmp_did.child_scope,
models.DataIdentifierAssociation.name == tmp_did.child_name))
else:
child_dids.append((tmp_did.child_scope, tmp_did.child_name))
state_clause = None
if not all_states:
if not unavailable:
state_clause = and_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)
else:
state_clause = or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
models.RSEFileAssociation.state == ReplicaState.UNAVAILABLE,
models.RSEFileAssociation.state == ReplicaState.COPYING)
return file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica
def _pick_n_random(nrandom, generator):
"""
Select n random elements from the generator
"""
if not nrandom:
# pass-through the data unchanged
yield from generator
return
# A "reservoir sampling" algorithm:
# Copy the N first files from the generator. After that, following element may be picked to substitute
# one of the previously selected element with a probability which decreases as the number of encountered elements grows.
selected = []
i = 0
iterator = iter(generator)
try:
for _ in range(nrandom):
selected.append(next(iterator))
i += 1
while True:
element = next(iterator)
i += 1
index_to_substitute = random.randint(0, i)
if index_to_substitute < nrandom:
selected[index_to_substitute] = element
except StopIteration:
pass
for r in selected:
yield r
def _list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session):
"""
List file replicas for a list of datasets.
:param session: The database session in use.
"""
if not dataset_clause:
return
replica_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation.path,
models.RSEFileAssociation.state,
models.RSE.id,
models.RSE.rse,
models.RSE.rse_type,
models.RSE.volatile).\
with_hint(models.RSEFileAssociation,
text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
dialect_name='oracle').\
outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)).\
join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id).\
filter(models.RSE.deleted == false()).\
filter(or_(*dataset_clause)).\
order_by(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name)
if not ignore_availability:
replica_query = replica_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if state_clause is not None:
replica_query = replica_query.filter(and_(state_clause))
if rse_clause is not None:
replica_query = replica_query.filter(or_(*rse_clause))
if updated_after:
replica_query = replica_query.filter(models.RSEFileAssociation.updated_at >= updated_after)
for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.yield_per(500):
yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile
def _list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
"""
List file replicas for archive constituents.
"""
if not constituent_clause:
return
constituent_query = session.query(models.ConstituentAssociation.child_scope,
models.ConstituentAssociation.child_name,
models.ConstituentAssociation.scope,
models.ConstituentAssociation.name,
models.ConstituentAssociation.bytes,
models.ConstituentAssociation.md5,
models.ConstituentAssociation.adler32,
models.RSEFileAssociation.path,
models.RSEFileAssociation.state,
models.RSE.id,
models.RSE.rse,
models.RSE.rse_type,
models.RSE.volatile). \
with_hint(models.RSEFileAssociation,
text="INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
dialect_name='oracle'). \
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \
outerjoin(models.RSEFileAssociation,
and_(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope,
models.ConstituentAssociation.name == models.RSEFileAssociation.name)). \
join(models.RSE, models.RSE.id == models.RSEFileAssociation.rse_id). \
filter(models.RSE.deleted == false()). \
filter(or_(*constituent_clause)). \
order_by(models.ConstituentAssociation.child_scope,
models.ConstituentAssociation.child_name)
if not ignore_availability:
constituent_query = constituent_query.filter(models.RSE.availability.in_((4, 5, 6, 7)))
if state_clause is not None:
constituent_query = constituent_query.filter(and_(state_clause))
if rse_clause is not None:
constituent_query = constituent_query.filter(or_(*rse_clause))
if updated_after:
constituent_query = constituent_query.filter(models.RSEFileAssociation.updated_at >= updated_after)
for replica in constituent_query.yield_per(500):
scope, name = replica[0], replica[1]
{'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name})
yield replica
def _list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session):
"""
List file replicas for a list of files.
:param session: The database session in use.
"""
if not file_clause:
return
for replica_condition in chunks(file_clause, 50):
filters = [
models.RSEFileAssociation.rse_id == models.RSE.id,
models.RSE.deleted == false(),
or_(*replica_condition),
]
if not ignore_availability:
filters.append(models.RSE.availability.in_((4, 5, 6, 7)))
if state_clause is not None:
filters.append(state_clause)
if rse_clause:
filters.append(or_(*rse_clause))
if updated_after:
filters.append(models.RSEFileAssociation.updated_at >= updated_after)
replica_query = session.query(
models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.bytes,
models.RSEFileAssociation.md5,
models.RSEFileAssociation.adler32,
models.RSEFileAssociation.path,
models.RSEFileAssociation.state,
models.RSE.id,
models.RSE.rse,
models.RSE.rse_type,
models.RSE.volatile,
) \
.filter(and_(*filters)) \
.order_by(models.RSEFileAssociation.scope, models.RSEFileAssociation.name) \
.with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle')
for scope, name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replica_query.all():
{'scope': scope, 'name': name} in files_wo_replica and files_wo_replica.remove({'scope': scope, 'name': name})
yield scope, name, None, None, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile
def _list_files_wo_replicas(files_wo_replica, session):
if files_wo_replica:
file_wo_clause = []
for file in sorted(files_wo_replica, key=lambda f: (f['scope'], f['name'])):
file_wo_clause.append(and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name']))
files_wo_replicas_query = session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.bytes,
models.DataIdentifier.md5,
models.DataIdentifier.adler32).\
filter_by(did_type=DIDType.FILE).filter(or_(*file_wo_clause)).\
with_hint(models.DataIdentifier, text="INDEX(DIDS DIDS_PK)", dialect_name='oracle')
for scope, name, bytes, md5, adler32 in files_wo_replicas_query:
yield scope, name, bytes, md5, adler32
def get_vp_endpoint():
"""
VP endpoint is the Virtual Placement server.
Once VP is integrated in Rucio it won't be needed.
"""
vp_endpoint = config_get('virtual_placement', 'vp_endpoint', default='')
return vp_endpoint
def get_multi_cache_prefix(cache_site, filename, logger=logging.log):
"""
for a givent cache site and filename, return address of the cache node that
should be prefixed.
:param cache_site: Cache site
:param filename: Filename
"""
vp_endpoint = get_vp_endpoint()
if not vp_endpoint:
return ''
x_caches = REGION.get('CacheSites')
if x_caches is NO_VALUE:
try:
response = requests.get('{}/serverRanges'.format(vp_endpoint), verify=False)
if response.ok:
x_caches = response.json()
REGION.set('CacheSites', x_caches)
else:
REGION.set('CacheSites', {'could not reload': ''})
return ''
except requests.exceptions.RequestException as re:
REGION.set('CacheSites', {'could not reload': ''})
logger(logging.WARNING, 'In get_multi_cache_prefix, could not access {}. Excaption:{}'.format(vp_endpoint, re))
return ''
if cache_site not in x_caches:
return ''
xcache_site = x_caches[cache_site]
h = float(
unpack('Q', sha256(filename.encode('utf-8')).digest()[:8])[0]) / 2**64
for irange in xcache_site['ranges']:
if h < irange[1]:
return xcache_site['servers'][irange[0]][0]
return ''
def _list_replicas(dataset_clause, file_clause, state_clause, show_pfns,
schemes, files_wo_replica, rse_clause, client_location, domain,
sign_urls, signature_lifetime, constituent_clause, resolve_parents,
updated_after, filters, ignore_availability,
session):
# iterator which merges multiple sorted replica sources into a combine sorted result without loading everything into the memory
replicas = heapq.merge(
_list_replicas_for_datasets(dataset_clause, state_clause, rse_clause, ignore_availability, updated_after, session),
_list_replicas_for_files(file_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session),
_list_replicas_for_constituents(constituent_clause, state_clause, files_wo_replica, rse_clause, ignore_availability, updated_after, session),
key=lambda t: (t[0], t[1]), # sort by scope, name
)
# we need to retain knowledge of the original domain selection by the user
# in case we have to loop over replicas with a potential outgoing proxy
original_domain = deepcopy(domain)
# find all RSEs local to the client's location in autoselect mode (i.e., when domain is None)
local_rses = []
if domain is None:
if client_location and 'site' in client_location and client_location['site']:
try:
local_rses = [rse['id'] for rse in parse_expression('site=%s' % client_location['site'], filter=filters, session=session)]
except Exception:
pass # do not hard fail if site cannot be resolved or is empty
file, tmp_protocols, rse_info, pfns_cache = {}, {}, {}, {}
for scope, name, archive_scope, archive_name, bytes, md5, adler32, path, state, rse_id, rse, rse_type, volatile in replicas:
pfns = []
# reset the domain selection to original user's choice (as this could get overwritten each iteration)
domain = deepcopy(original_domain)
if show_pfns and rse_id:
if rse_id not in rse_info:
rse_info[rse_id] = rsemgr.get_rse_info(rse_id=rse_id, session=session)
# assign scheme priorities, and don't forget to exclude disabled protocols
# 0 in RSE protocol definition = disabled, 1 = highest priority
rse_info[rse_id]['priority_wan'] = {p['scheme']: p['domains']['wan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['wan']['read'] > 0}
rse_info[rse_id]['priority_lan'] = {p['scheme']: p['domains']['lan']['read'] for p in rse_info[rse_id]['protocols'] if p['domains']['lan']['read'] > 0}
# select the lan door in autoselect mode, otherwise use the wan door
if domain is None:
domain = 'wan'
if local_rses and rse_id in local_rses:
domain = 'lan'
if rse_id not in tmp_protocols:
rse_schemes = schemes or []
if not rse_schemes:
try:
if domain == 'all':
rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id],
operation='read',
domain='wan')['scheme'])
rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id],
operation='read',
domain='lan')['scheme'])
else:
rse_schemes.append(rsemgr.select_protocol(rse_settings=rse_info[rse_id],
operation='read',
domain=domain)['scheme'])
except exception.RSEProtocolNotSupported:
pass # no need to be verbose
except Exception:
print(format_exc())
if archive_scope and archive_name and 'root' not in rse_schemes:
rse_schemes.append('root')
protocols = []
for s in rse_schemes:
try:
if domain == 'all':
protocols.append(('lan', rsemgr.create_protocol(rse_settings=rse_info[rse_id],
operation='read',
scheme=s,
domain='lan'),
rse_info[rse_id]['priority_lan'][s]))
protocols.append(('wan', rsemgr.create_protocol(rse_settings=rse_info[rse_id],
operation='read',
scheme=s,
domain='wan'),
rse_info[rse_id]['priority_wan'][s]))
else:
protocols.append((domain, rsemgr.create_protocol(rse_settings=rse_info[rse_id],
operation='read',
scheme=s,
domain=domain),
rse_info[rse_id]['priority_%s' % domain][s]))
except exception.RSEProtocolNotSupported:
pass # no need to be verbose
except Exception:
print(format_exc())
tmp_protocols[rse_id] = protocols
# get pfns
for tmp_protocol in tmp_protocols[rse_id]:
# If the current "replica" is a constituent inside an archive, we must construct the pfn for the
# parent (archive) file and append the xrdcl.unzip query string to it.
if archive_scope and archive_name:
t_scope = archive_scope
t_name = archive_name
else:
t_scope = scope
t_name = name
protocol = tmp_protocol[1]
if 'determinism_type' in protocol.attributes: # PFN is cachable
try:
path = pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)]
except KeyError: # No cache entry scope:name found for this protocol
path = protocol._get_path(t_scope, t_name)
pfns_cache['%s:%s:%s' % (protocol.attributes['determinism_type'], t_scope.internal, t_name)] = path
try:
pfn = list(protocol.lfns2pfns(lfns={'scope': t_scope.external,
'name': t_name,
'path': path}).values())[0]
# do we need to sign the URLs?
if sign_urls and protocol.attributes['scheme'] == 'https':
service = get_rse_attribute('sign_url',
rse_id=rse_id,
session=session)
if service and isinstance(service, list):
pfn = get_signed_url(rse_id=rse_id, service=service[0], operation='read', url=pfn, lifetime=signature_lifetime)
# server side root proxy handling if location is set.
# supports root and http destinations
# cannot be pushed into protocols because we need to lookup rse attributes.
# ultra-conservative implementation.
if domain == 'wan' and protocol.attributes['scheme'] in ['root', 'http', 'https'] and client_location:
if 'site' in client_location and client_location['site']:
# is the RSE site-configured?
rse_site_attr = get_rse_attribute('site', rse_id, session=session)
replica_site = ['']
if isinstance(rse_site_attr, list) and rse_site_attr:
replica_site = rse_site_attr[0]
# does it match with the client? if not, it's an outgoing connection
# therefore the internal proxy must be prepended
if client_location['site'] != replica_site:
cache_site = config_get('clientcachemap', client_location['site'], default='', session=session)
if cache_site != '':
# print('client', client_location['site'], 'has cache:', cache_site)
# print('filename', name)
selected_prefix = get_multi_cache_prefix(cache_site, t_name)
if selected_prefix:
pfn = 'root://' + selected_prefix + '//' + pfn.replace('davs://', 'root://')
else:
# print('site:', client_location['site'], 'has no cache')
# print('lets check if it has defined an internal root proxy ')
root_proxy_internal = config_get('root-proxy-internal', # section
client_location['site'], # option
default='', # empty string to circumvent exception
session=session)
if root_proxy_internal:
# TODO: XCache does not seem to grab signed URLs. Doublecheck with XCache devs.
# For now -> skip prepending XCache for GCS.
if 'storage.googleapis.com' in pfn or 'atlas-google-cloud.cern.ch' in pfn or 'amazonaws.com' in pfn:
pass # ATLAS HACK
else:
# don't forget to mangle gfal-style davs URL into generic https URL
pfn = 'root://' + root_proxy_internal + '//' + pfn.replace('davs://', 'https://')
# PFNs don't have concepts, therefore quickly encapsulate in a tuple
# ('pfn', 'domain', 'priority', 'client_extract')
t_domain = tmp_protocol[0]
t_priority = tmp_protocol[2]
t_client_extract = False
if archive_scope and archive_name:
t_domain = 'zip'
pfn = add_url_query(pfn, {'xrdcl.unzip': name})
if protocol.attributes['scheme'] == 'root':
# xroot supports downloading files directly from inside an archive. Disable client_extract and prioritize xroot.
t_client_extract = False
t_priority = -1
else:
t_client_extract = True
pfns.append((pfn, t_domain, t_priority, t_client_extract))
except Exception:
# never end up here
print(format_exc())
if protocol.attributes['scheme'] == 'srm':
try:
file['space_token'] = protocol.attributes['extended_attributes']['space_token']
except KeyError:
file['space_token'] = None
if 'scope' in file and 'name' in file:
if file['scope'] == scope and file['name'] == name:
# extract properly the pfn from the tuple
file['rses'][rse_id] += list(set([tmp_pfn[0] for tmp_pfn in pfns]))
file['states'][rse_id] = str(state.name if state else state)
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)]
for tmp_pfn in pfns:
file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id,
'rse': rse,
'type': str(rse_type.name),
'volatile': volatile,
'domain': tmp_pfn[1],
'priority': tmp_pfn[2],
'client_extract': tmp_pfn[3]}
else:
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)]
# quick exit, but don't forget to set the total order for the priority
# --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically
# and use 1-indexing to be compatible with metalink
tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']])
for i in range(0, len(tmp)):
file['pfns'][tmp[i][2]]['priority'] = i + 1
file['rses'] = {}
rse_pfns = []
for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]:
rse_pfns.append((t_rse, t_priority, t_pfn))
rse_pfns = sorted(rse_pfns)
for t_rse, t_priority, t_pfn in rse_pfns:
if t_rse in file['rses']:
file['rses'][t_rse].append(t_pfn)
else:
file['rses'][t_rse] = [t_pfn]
yield file
file = {}
if not ('scope' in file and 'name' in file):
file['scope'], file['name'] = scope, name
file['bytes'], file['md5'], file['adler32'] = bytes, md5, adler32
file['pfns'], file['rses'] = {}, defaultdict(list)
file['states'] = {rse_id: str(state.name if state else state)}
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(scope, name, session=session)]
if rse_id:
# extract properly the pfn from the tuple
file['rses'][rse_id] = list(set([tmp_pfn[0] for tmp_pfn in pfns]))
for tmp_pfn in pfns:
file['pfns'][tmp_pfn[0]] = {'rse_id': rse_id,
'rse': rse,
'type': str(rse_type.name),
'volatile': volatile,
'domain': tmp_pfn[1],
'priority': tmp_pfn[2],
'client_extract': tmp_pfn[3]}
# set the total order for the priority
# --> exploit that L(AN) comes before W(AN) before Z(IP) alphabetically
# and use 1-indexing to be compatible with metalink
if 'pfns' in file:
tmp = sorted([(file['pfns'][p]['domain'], file['pfns'][p]['priority'], p) for p in file['pfns']])
for i in range(0, len(tmp)):
file['pfns'][tmp[i][2]]['priority'] = i + 1
if 'scope' in file and 'name' in file:
file['rses'] = {}
# don't forget to resolve parents for the last replica
if resolve_parents:
file['parents'] = ['%s:%s' % (parent['scope'].internal, parent['name'])
for parent in rucio.core.did.list_all_parent_dids(file['scope'], file['name'], session=session)]
# also sort the pfns inside the rse structure
rse_pfns = []
for t_rse, t_priority, t_pfn in [(file['pfns'][t_pfn]['rse_id'], file['pfns'][t_pfn]['priority'], t_pfn) for t_pfn in file['pfns']]:
rse_pfns.append((t_rse, t_priority, t_pfn))
rse_pfns = sorted(rse_pfns)
for t_rse, t_priority, t_pfn in rse_pfns:
if t_rse in file['rses']:
file['rses'][t_rse].append(t_pfn)
else:
file['rses'][t_rse] = [t_pfn]
yield file
file = {}
for scope, name, bytes, md5, adler32 in _list_files_wo_replicas(files_wo_replica, session):
yield {
'scope': scope,
'name': name,
'bytes': bytes,
'md5': md5,
'adler32': adler32,
'pfns': {},
'rses': defaultdict(list)
}
@stream_session
def list_replicas(dids, schemes=None, unavailable=False, request_id=None,
ignore_availability=True, all_states=False, pfns=True,
rse_expression=None, client_location=None, domain=None,
sign_urls=False, signature_lifetime=None, resolve_archives=True,
resolve_parents=False, nrandom=None,
updated_after=None,
session=None):
"""
List file replicas for a list of data identifiers (DIDs).
:param dids: The list of data identifiers (DIDs).
:param schemes: A list of schemes to filter the replicas. (e.g. file, http, ...)
:param unavailable: (deprecated) Also include unavailable replicas in the list.
:param request_id: ID associated with the request for debugging.
:param ignore_availability: Ignore the RSE blocklisting.
:param all_states: Return all replicas whatever state they are in. Adds an extra 'states' entry in the result dictionary.
:param rse_expression: The RSE expression to restrict list_replicas on a set of RSEs.
:param client_location: Client location dictionary for PFN modification {'ip', 'fqdn', 'site', 'latitude', 'longitude'}
:param domain: The network domain for the call, either None, 'wan' or 'lan'. None is automatic mode, 'all' is both ['lan','wan']
:param sign_urls: If set, will sign the PFNs if necessary.
:param signature_lifetime: If supported, in seconds, restrict the lifetime of the signed PFN.
:param resolve_archives: When set to true, find archives which contain the replicas.
:param resolve_parents: When set to true, find all parent datasets which contain the replicas.
:param updated_after: datetime (UTC time), only return replicas updated after this time
:param session: The database session in use.
"""
if dids:
filter = {'vo': dids[0]['scope'].vo}
else:
filter = {'vo': 'def'}
file_clause, dataset_clause, state_clause, constituent_clause, files_wo_replica = _resolve_dids(
dids=dids,
unavailable=unavailable,
ignore_availability=ignore_availability,
all_states=all_states,
resolve_archives=resolve_archives,
session=session
)
rse_clause = []
if rse_expression:
for rse in parse_expression(expression=rse_expression, filter=filter, session=session):
rse_clause.append(models.RSEFileAssociation.rse_id == rse['id'])
yield from _pick_n_random(
nrandom,
_list_replicas(dataset_clause, file_clause, state_clause, pfns,
schemes, files_wo_replica, rse_clause, client_location, domain,
sign_urls, signature_lifetime, constituent_clause, resolve_parents,
updated_after, filter, ignore_availability,
session)
)
@transactional_session
def __bulk_add_new_file_dids(files, account, dataset_meta=None, session=None):
"""
Bulk add new dids.
:param dids: the list of new files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
"""
for file in files:
new_did = models.DataIdentifier(scope=file['scope'], name=file['name'],
account=file.get('account') or account,
did_type=DIDType.FILE, bytes=file['bytes'],
md5=file.get('md5'), adler32=file.get('adler32'),
is_new=None)
new_did.save(session=session, flush=False)
if 'meta' in file and file['meta']:
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=file['meta'], recursive=False, session=session)
if dataset_meta:
rucio.core.did.set_metadata_bulk(scope=file['scope'], name=file['name'], meta=dataset_meta, recursive=False, session=session)
try:
session.flush()
except IntegrityError as error:
if match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \
or match('.*IntegrityError.*FOREIGN KEY constraint failed.*', error.args[0]) \
or match('.*IntegrityError.*1452.*Cannot add or update a child row: a foreign key constraint fails.*', error.args[0]) \
or match('.*IntegrityError.*02291.*integrity constraint.*DIDS_SCOPE_FK.*violated - parent key not found.*', error.args[0]) \
or match('.*IntegrityError.*insert or update on table.*violates foreign key constraint "DIDS_SCOPE_FK".*', error.args[0]) \
or match('.*ForeignKeyViolation.*insert or update on table.*violates foreign key constraint.*', error.args[0]) \
or match('.*IntegrityError.*foreign key constraints? failed.*', error.args[0]):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except DatabaseError as error:
if match('.*(DatabaseError).*ORA-14400.*inserted partition key does not map to any partition.*', error.args[0]):
raise exception.ScopeNotFound('Scope not found!')
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True
@transactional_session
def __bulk_add_file_dids(files, account, dataset_meta=None, session=None):
"""
Bulk add new dids.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
"""
condition = []
for f in files:
condition.append(and_(models.DataIdentifier.scope == f['scope'], models.DataIdentifier.name == f['name'], models.DataIdentifier.did_type == DIDType.FILE))
q = session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.bytes,
models.DataIdentifier.adler32,
models.DataIdentifier.md5).with_hint(models.DataIdentifier, "INDEX(dids DIDS_PK)", 'oracle').filter(or_(*condition))
available_files = [dict([(column, getattr(row, column)) for column in row._fields]) for row in q]
new_files = list()
for file in files:
found = False
for available_file in available_files:
if file['scope'] == available_file['scope'] and file['name'] == available_file['name']:
found = True
break
if not found:
new_files.append(file)
__bulk_add_new_file_dids(files=new_files, account=account,
dataset_meta=dataset_meta,
session=session)
return new_files + available_files
def tombstone_from_delay(tombstone_delay):
# Tolerate None for tombstone_delay
if not tombstone_delay:
return None
if not isinstance(tombstone_delay, timedelta):
try:
tombstone_delay = timedelta(seconds=int(tombstone_delay))
except ValueError:
return None
if not tombstone_delay:
return None
if tombstone_delay < timedelta(0):
return datetime(1970, 1, 1)
return datetime.utcnow() + tombstone_delay
@transactional_session
def __bulk_add_replicas(rse_id, files, account, session=None):
"""
Bulk add new dids.
:param rse_id: the RSE id.
:param dids: the list of files.
:param account: The account owner.
:param session: The database session in use.
:returns: True is successful.
"""
nbfiles, bytes = 0, 0
# Check for the replicas already available
condition = []
for f in files:
condition.append(and_(models.RSEFileAssociation.scope == f['scope'], models.RSEFileAssociation.name == f['name'], models.RSEFileAssociation.rse_id == rse_id))
query = session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id).\
with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\
filter(or_(*condition))
available_replicas = [dict([(column, getattr(row, column)) for column in row._fields]) for row in query]
default_tombstone_delay = next(iter(get_rse_attribute('tombstone_delay', rse_id=rse_id, session=session)), None)
default_tombstone = tombstone_from_delay(default_tombstone_delay)
new_replicas = []
for file in files:
found = False
for available_replica in available_replicas:
if file['scope'] == available_replica['scope'] and file['name'] == available_replica['name'] and rse_id == available_replica['rse_id']:
found = True
break
if not found:
nbfiles += 1
bytes += file['bytes']
new_replicas.append({'rse_id': rse_id, 'scope': file['scope'],
'name': file['name'], 'bytes': file['bytes'],
'path': file.get('path'),
'state': ReplicaState(file.get('state', 'A')),
'md5': file.get('md5'), 'adler32': file.get('adler32'),
'lock_cnt': file.get('lock_cnt', 0),
'tombstone': file.get('tombstone') or default_tombstone})
try:
new_replicas and session.bulk_insert_mappings(models.RSEFileAssociation,
new_replicas)
session.flush()
return nbfiles, bytes
except IntegrityError as error:
if match('.*IntegrityError.*ORA-00001: unique constraint .*REPLICAS_PK.*violated.*', error.args[0]) \
or match('.*IntegrityError.*1062.*Duplicate entry.*', error.args[0]) \
or match('.*IntegrityError.*columns? rse_id.*scope.*name.*not unique.*', error.args[0]) \
or match('.*IntegrityError.*duplicate key value violates unique constraint.*', error.args[0]):
raise exception.Duplicate("File replica already exists!")
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
@transactional_session
def add_replicas(rse_id, files, account, ignore_availability=True,
dataset_meta=None, session=None):
"""
Bulk add file replicas.
:param rse_id: The RSE id.
:param files: The list of files.
:param account: The account owner.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
:returns: True is successful.
"""
def _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=None):
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme, domain=domain, protocol_attr=protocol_attr)
expected_pfns = p.lfns2pfns(lfns)
return clean_surls(expected_pfns.values())
replica_rse = get_rse(rse_id=rse_id, session=session)
if replica_rse.volatile is True:
raise exception.UnsupportedOperation('Cannot add replicas on volatile RSE %s ' % (replica_rse.rse))
if not (replica_rse.availability & 2) and not ignore_availability:
raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable for writing' % replica_rse.rse)
replicas = __bulk_add_file_dids(files=files, account=account,
dataset_meta=dataset_meta,
session=session)
pfns, scheme = {}, None # {scheme: [pfns], scheme: [pfns]}
for file in files:
if 'pfn' not in file:
if not replica_rse.deterministic:
raise exception.UnsupportedOperation('PFN needed for this (non deterministic) RSE %s ' % (replica_rse.rse))
else:
scheme = file['pfn'].split(':')[0]
pfns.setdefault(scheme, []).append(file['pfn'])
if pfns:
rse_settings = rsemgr.get_rse_info(rse_id=rse_id, session=session)
for scheme in pfns.keys():
if not replica_rse.deterministic:
p = rsemgr.create_protocol(rse_settings=rse_settings, operation='write', scheme=scheme)
pfns[scheme] = p.parse_pfns(pfns=pfns[scheme])
for file in files:
if file['pfn'].startswith(scheme):
tmp = pfns[scheme][file['pfn']]
file['path'] = ''.join([tmp['path'], tmp['name']])
else:
# Check that the pfns match to the expected pfns
lfns = [{'scope': i['scope'].external, 'name': i['name']} for i in files if i['pfn'].startswith(scheme)]
pfns[scheme] = clean_surls(pfns[scheme])
# Check wan first
found_on_wan = False
available_wan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='wan')
expected_pfns_wan = None
for protocol_attr in available_wan_protocols:
pfns_wan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='wan', protocol_attr=protocol_attr)
if not expected_pfns_wan and pfns_wan_buffer:
expected_pfns_wan = pfns_wan_buffer
found_on_wan = found_on_wan or (pfns_wan_buffer == pfns[scheme])
if found_on_wan:
break
if not found_on_wan:
# Check lan
found_on_lan = False
available_lan_protocols = rsemgr.get_protocols_ordered(rse_settings=rse_settings, operation='write', scheme=scheme, domain='lan')
for protocol_attr in available_lan_protocols:
pfns_lan_buffer = _expected_pfns(lfns, rse_settings, scheme, operation='write', domain='lan', protocol_attr=protocol_attr)
found_on_lan = found_on_lan or (pfns_lan_buffer == pfns[scheme])
if found_on_lan:
break
if found_on_lan == pfns[scheme]:
# Registration always with wan
pfns[scheme] = expected_pfns_wan
else:
raise exception.InvalidPath('One of the PFNs provided does not match the Rucio expected PFN : got %s, expected %s (%s)' % (str(pfns), str(expected_pfns_wan), str(lfns)))
nbfiles, bytes = __bulk_add_replicas(rse_id=rse_id, files=files, account=account, session=session)
increase(rse_id=rse_id, files=nbfiles, bytes=bytes, session=session)
return replicas
@transactional_session
def add_replica(rse_id, scope, name, bytes, account, adler32=None, md5=None, dsn=None, pfn=None, meta=None, rules=[], tombstone=None, session=None):
"""
Add File replica.
:param rse_id: the rse id.
:param scope: the scope name.
:param name: The data identifier name.
:param bytes: the size of the file.
:param account: The account owner.
:param md5: The md5 checksum.
:param adler32: The adler32 checksum.
:param pfn: Physical file name (for nondeterministic rse).
:param meta: Meta-data associated with the file. Represented as key/value pairs in a dictionary.
:param rules: Replication rules associated with the file. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param tombstone: If True, create replica with a tombstone.
:param session: The database session in use.
:returns: True is successful.
"""
if meta is None:
meta = {}
file = {'scope': scope, 'name': name, 'bytes': bytes, 'adler32': adler32, 'md5': md5, 'meta': meta, 'rules': rules, 'tombstone': tombstone}
if pfn:
file['pfn'] = pfn
return add_replicas(rse_id=rse_id, files=[file, ], account=account, session=session)
@transactional_session
def delete_replicas(rse_id, files, ignore_availability=True, session=None):
"""
Delete file replicas.
:param rse_id: the rse id.
:param files: the list of files to delete.
:param ignore_availability: Ignore the RSE blocklisting.
:param session: The database session in use.
"""
replica_rse = get_rse(rse_id=rse_id, session=session)
if not (replica_rse.availability & 1) and not ignore_availability:
raise exception.ResourceTemporaryUnavailable('%s is temporary unavailable'
'for deleting' % replica_rse.rse)
replica_condition, src_condition = [], []
for file in files:
replica_condition.append(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name']))
src_condition.append(
and_(models.Source.scope == file['scope'],
models.Source.name == file['name'],
models.Source.rse_id == rse_id))
delta, bytes, rowcount = 0, 0, 0
# WARNING : This should not be necessary since that would mean the replica is used as a source.
for chunk in chunks(src_condition, 10):
rowcount = session.query(models.Source). \
filter(or_(*chunk)). \
delete(synchronize_session=False)
rowcount = 0
for chunk in chunks(replica_condition, 10):
for (scope, name, rid, replica_bytes) in session.query(models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes). \
with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle').filter(models.RSEFileAssociation.rse_id == rse_id).filter(or_(*chunk)):
bytes += replica_bytes
delta += 1
rowcount += session.query(models.RSEFileAssociation). \
filter(models.RSEFileAssociation.rse_id == rse_id). \
filter(or_(*chunk)). \
delete(synchronize_session=False)
if rowcount != len(files):
raise exception.ReplicaNotFound("One or several replicas don't exist.")
__cleanup_after_replica_deletion(rse_id=rse_id, files=files, session=session)
# Decrease RSE counter
decrease(rse_id=rse_id, files=delta, bytes=bytes, session=session)
@transactional_session
def __cleanup_after_replica_deletion(rse_id, files, session=None):
"""
Perform update of collections/archive associations/dids after the removal of their replicas
:param rse_id: the rse id
:param files: list of files whose replica got deleted
:param session: The database session in use.
"""
parent_condition, did_condition = [], []
clt_replica_condition, dst_replica_condition = [], []
incomplete_condition, messages, clt_is_not_archive_condition, archive_contents_condition = [], [], [], []
for file in files:
# Schedule update of all collections containing this file and having a collection replica in the RSE
dst_replica_condition.append(
and_(models.DataIdentifierAssociation.child_scope == file['scope'],
models.DataIdentifierAssociation.child_name == file['name'],
exists(select([1]).prefix_with("/*+ INDEX(COLLECTION_REPLICAS COLLECTION_REPLICAS_PK) */", dialect='oracle')).where(
and_(models.CollectionReplica.scope == models.DataIdentifierAssociation.scope,
models.CollectionReplica.name == models.DataIdentifierAssociation.name,
models.CollectionReplica.rse_id == rse_id))))
# If the file doesn't have any replicas anymore, we should perform cleanups of objects
# related to this file. However, if the file is "lost", it's removal wasn't intentional,
# so we want to skip deleting the metadata here. Perform cleanups:
# 1) schedule removal of this file from all parent datasets
parent_condition.append(
and_(models.DataIdentifierAssociation.child_scope == file['scope'],
models.DataIdentifierAssociation.child_name == file['name'],
~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.availability == DIDAvailability.LOST)),
~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name'])),
~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where(
and_(models.ConstituentAssociation.child_scope == file['scope'],
models.ConstituentAssociation.child_name == file['name']))))
# 2) schedule removal of this file from the DID table
did_condition.append(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.availability != DIDAvailability.LOST,
~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name'])),
~exists(select([1]).prefix_with("/*+ INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK) */", dialect='oracle')).where(
and_(models.ConstituentAssociation.child_scope == file['scope'],
models.ConstituentAssociation.child_name == file['name']))))
# 3) if the file is an archive, schedule cleanup on the files from inside the archive
archive_contents_condition.append(
and_(models.ConstituentAssociation.scope == file['scope'],
models.ConstituentAssociation.name == file['name'],
~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == file['scope'],
models.DataIdentifier.name == file['name'],
models.DataIdentifier.availability == DIDAvailability.LOST)),
~exists(select([1]).prefix_with("/*+ INDEX(REPLICAS REPLICAS_PK) */", dialect='oracle')).where(
and_(models.RSEFileAssociation.scope == file['scope'],
models.RSEFileAssociation.name == file['name']))))
# Get all collection_replicas at RSE, insert them into UpdatedCollectionReplica
if dst_replica_condition:
for chunk in chunks(dst_replica_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name).\
filter(or_(*chunk)).\
distinct()
for parent_scope, parent_name in query:
models.UpdatedCollectionReplica(scope=parent_scope,
name=parent_name,
did_type=DIDType.DATASET,
rse_id=rse_id).\
save(session=session, flush=False)
# Delete did from the content for the last did
while parent_condition:
child_did_condition, tmp_parent_condition = [], []
for chunk in chunks(parent_condition, 10):
query = session.query(models.DataIdentifierAssociation.scope, models.DataIdentifierAssociation.name,
models.DataIdentifierAssociation.did_type,
models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.child_name).\
filter(or_(*chunk))
for parent_scope, parent_name, did_type, child_scope, child_name in query:
# Schedule removal of child file/dataset/container from the parent dataset/container
child_did_condition.append(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name,
models.DataIdentifierAssociation.child_scope == child_scope,
models.DataIdentifierAssociation.child_name == child_name))
# Schedule setting is_archive = False on parents which don't have any children with is_archive == True anymore
clt_is_not_archive_condition.append(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name,
exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.scope,
models.DataIdentifier.name == models.DataIdentifierAssociation.name,
models.DataIdentifier.is_archive == true())),
~exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == models.DataIdentifierAssociation.child_scope,
models.DataIdentifier.name == models.DataIdentifierAssociation.child_name,
models.DataIdentifier.is_archive == true()))))
# If the parent dataset/container becomes empty as a result of the child removal
# (it was the last children), metadata cleanup has to be done:
#
# 1) Schedule to remove the replicas of this empty collection
clt_replica_condition.append(
and_(models.CollectionReplica.scope == parent_scope,
models.CollectionReplica.name == parent_name,
exists(select([1]).prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifier.scope == parent_scope,
models.DataIdentifier.name == parent_name,
models.DataIdentifier.is_open == False)), # NOQA
~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name))))
# 2) Schedule removal of this empty collection from its own parent collections
tmp_parent_condition.append(
and_(models.DataIdentifierAssociation.child_scope == parent_scope,
models.DataIdentifierAssociation.child_name == parent_name,
~exists(select([1]).prefix_with("/*+ INDEX(CONTENTS CONTENTS_PK) */", dialect='oracle')).where(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name))))
# 3) Schedule removal of the entry from the DIDs table
did_condition.append(
and_(models.DataIdentifier.scope == parent_scope,
models.DataIdentifier.name == parent_name,
models.DataIdentifier.is_open == False, # NOQA
~exists([1]).where(
and_(models.DataIdentifierAssociation.child_scope == parent_scope,
models.DataIdentifierAssociation.child_name == parent_name)),
~exists([1]).where(
and_(models.DataIdentifierAssociation.scope == parent_scope,
models.DataIdentifierAssociation.name == parent_name))))
if child_did_condition:
# get the list of modified parent scope, name
for chunk in chunks(child_did_condition, 10):
modifieds = session.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.DataIdentifierAssociation.did_type).\
distinct().\
with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle').\
filter(or_(*chunk)).\
filter(exists(select([1]).
prefix_with("/*+ INDEX(DIDS DIDS_PK) */", dialect='oracle')).
where(and_(models.DataIdentifierAssociation.scope == models.DataIdentifier.scope,
models.DataIdentifierAssociation.name == models.DataIdentifier.name,
or_(models.DataIdentifier.complete == true(),
models.DataIdentifier.complete is None))))
for parent_scope, parent_name, parent_did_type in modifieds:
message = {'scope': parent_scope,
'name': parent_name,
'did_type': parent_did_type,
'event_type': 'INCOMPLETE'}
if message not in messages:
messages.append(message)
incomplete_condition.append(
and_(models.DataIdentifier.scope == parent_scope,
models.DataIdentifier.name == parent_name,
models.DataIdentifier.did_type == parent_did_type))
for chunk in chunks(child_did_condition, 10):
rucio.core.did.insert_content_history(content_clause=chunk, did_created_at=None, session=session)
session.query(models.DataIdentifierAssociation).\
filter(or_(*chunk)).\
delete(synchronize_session=False)
parent_condition = tmp_parent_condition
for chunk in chunks(clt_replica_condition, 10):
session.query(models.CollectionReplica).\
filter(or_(*chunk)).\
delete(synchronize_session=False)
# Update incomplete state
for chunk in chunks(incomplete_condition, 10):
session.query(models.DataIdentifier).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(*chunk)).\
filter(models.DataIdentifier.complete != false()).\
update({'complete': False}, synchronize_session=False)
# delete empty dids
messages, deleted_dids, deleted_rules, deleted_did_meta = [], [], [], []
for chunk in chunks(did_condition, 100):
query = session.query(models.DataIdentifier.scope,
models.DataIdentifier.name,
models.DataIdentifier.did_type).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(*chunk))
for scope, name, did_type in query:
if did_type == DIDType.DATASET:
messages.append({'event_type': 'ERASE',
'payload': dumps({'scope': scope.external,
'name': name,
'account': 'root'})})
deleted_rules.append(and_(models.ReplicationRule.scope == scope,
models.ReplicationRule.name == name))
deleted_dids.append(and_(models.DataIdentifier.scope == scope,
models.DataIdentifier.name == name))
if session.bind.dialect.name == 'oracle':
oracle_version = int(session.connection().connection.version.split('.')[0])
if oracle_version >= 12:
deleted_did_meta.append(and_(models.DidMeta.scope == scope,
models.DidMeta.name == name))
else:
deleted_did_meta.append(and_(models.DidMeta.scope == scope,
models.DidMeta.name == name))
# Remove Archive Constituents
removed_constituents = []
constituents_to_delete_condition = []
for chunk in chunks(archive_contents_condition, 30):
query = session.query(models.ConstituentAssociation). \
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_CHILD_IDX)", 'oracle'). \
filter(or_(*chunk))
for constituent in query:
removed_constituents.append({'scope': constituent.child_scope, 'name': constituent.child_name})
constituents_to_delete_condition.append(
and_(models.ConstituentAssociation.scope == constituent.scope,
models.ConstituentAssociation.name == constituent.name,
models.ConstituentAssociation.child_scope == constituent.child_scope,
models.ConstituentAssociation.child_name == constituent.child_name))
models.ConstituentAssociationHistory(
child_scope=constituent.child_scope,
child_name=constituent.child_name,
scope=constituent.scope,
name=constituent.name,
bytes=constituent.bytes,
adler32=constituent.adler32,
md5=constituent.md5,
guid=constituent.guid,
length=constituent.length,
updated_at=constituent.updated_at,
created_at=constituent.created_at,
).save(session=session, flush=False)
if len(constituents_to_delete_condition) > 200:
session.query(models.ConstituentAssociation).\
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle').\
filter(or_(*constituents_to_delete_condition)).\
delete(synchronize_session=False)
constituents_to_delete_condition.clear()
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
removed_constituents.clear()
if constituents_to_delete_condition:
session.query(models.ConstituentAssociation). \
with_hint(models.ConstituentAssociation, "INDEX(ARCHIVE_CONTENTS ARCH_CONTENTS_PK)", 'oracle'). \
filter(or_(*constituents_to_delete_condition)). \
delete(synchronize_session=False)
__cleanup_after_replica_deletion(rse_id=rse_id, files=removed_constituents, session=session)
# Remove rules in Waiting for approval or Suspended
for chunk in chunks(deleted_rules, 100):
session.query(models.ReplicationRule).\
with_hint(models.ReplicationRule, "INDEX(RULES RULES_SCOPE_NAME_IDX)", 'oracle').\
filter(or_(*chunk)).\
filter(models.ReplicationRule.state.in_((RuleState.SUSPENDED,
RuleState.WAITING_APPROVAL))).\
delete(synchronize_session=False)
# Remove DID Metadata
for chunk in chunks(deleted_did_meta, 100):
session.query(models.DidMeta).\
filter(or_(*chunk)).\
delete(synchronize_session=False)
for chunk in chunks(messages, 100):
session.bulk_insert_mappings(models.Message, chunk)
for chunk in chunks(deleted_dids, 100):
session.query(models.DataIdentifier).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(*chunk)).\
delete(synchronize_session=False)
if session.bind.dialect.name != 'oracle':
rucio.core.did.insert_deleted_dids(chunk, session=session)
# Set is_archive = false on collections which don't have archive children anymore
for chunk in chunks(clt_is_not_archive_condition, 100):
clt_to_update = list(session
.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name)
.distinct(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name)
.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle')
.filter(or_(*chunk)))
if clt_to_update:
session.query(models.DataIdentifier).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
filter(or_(and_(models.DataIdentifier.scope == scope,
models.DataIdentifier.name == name,
models.DataIdentifier.is_archive == true())
for scope, name in clt_to_update)).\
update({'is_archive': False}, synchronize_session=False)
@transactional_session
def get_replica(rse_id, scope, name, session=None):
"""
Get File replica.
:param rse_id: The RSE Id.
:param scope: the scope name.
:param name: The data identifier name.
:param session: The database session in use.
:returns: A dictionary with the list of replica attributes.
"""
try:
row = session.query(models.RSEFileAssociation).filter_by(rse_id=rse_id, scope=scope, name=name).one()
result = {}
for column in row.__table__.columns:
result[column.name] = getattr(row, column.name)
return result
except NoResultFound:
raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
@transactional_session
def list_and_mark_unlocked_replicas(limit, bytes=None, rse_id=None, delay_seconds=600, only_delete_obsolete=False, session=None):
"""
List RSE File replicas with no locks.
:param limit: Number of replicas returned.
:param bytes: The amount of needed bytes.
:param rse_id: The rse_id.
:param delay_seconds: The delay to query replicas in BEING_DELETED state
:param only_delete_obsolete If set to True, will only return the replicas with EPOCH tombstone
:param session: The database session in use.
:returns: a list of dictionary replica.
"""
none_value = None # Hack to get pep8 happy...
query = session.query(models.RSEFileAssociation.scope,
models.RSEFileAssociation.name,
models.RSEFileAssociation.path,
models.RSEFileAssociation.bytes,
models.RSEFileAssociation.tombstone,
models.RSEFileAssociation.state).\
with_hint(models.RSEFileAssociation, "INDEX_RS_ASC(replicas REPLICAS_TOMBSTONE_IDX) NO_INDEX_FFS(replicas REPLICAS_TOMBSTONE_IDX)", 'oracle').\
filter(models.RSEFileAssociation.tombstone < datetime.utcnow()).\
filter(models.RSEFileAssociation.lock_cnt == 0).\
filter(case([(models.RSEFileAssociation.tombstone != none_value, models.RSEFileAssociation.rse_id), ]) == rse_id).\
filter(or_(models.RSEFileAssociation.state.in_((ReplicaState.AVAILABLE, ReplicaState.UNAVAILABLE, ReplicaState.BAD)),
and_(models.RSEFileAssociation.state == ReplicaState.BEING_DELETED, models.RSEFileAssociation.updated_at < datetime.utcnow() - timedelta(seconds=delay_seconds)))).\
filter(~exists(select([1]).prefix_with("/*+ INDEX(SOURCES SOURCES_SC_NM_DST_IDX) */", dialect='oracle')
.where(and_(models.RSEFileAssociation.scope == models.Source.scope,
models.RSEFileAssociation.name == models.Source.name,
models.RSEFileAssociation.rse_id == models.Source.rse_id)))).\
with_for_update(skip_locked=True).\
order_by(models.RSEFileAssociation.tombstone)
needed_space = bytes
total_bytes, total_files = 0, 0
rows = []
replica_clause = []
for (scope, name, path, bytes, tombstone, state) in query.yield_per(1000):
# Check if more than one replica is available
replica_cnt = session.query(func.count(models.RSEFileAssociation.scope)).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
filter(and_(models.RSEFileAssociation.scope == scope, models.RSEFileAssociation.name == name, models.RSEFileAssociation.rse_id != rse_id)).one()
if replica_cnt[0] > 1:
if state != ReplicaState.UNAVAILABLE:
if tombstone != OBSOLETE:
if only_delete_obsolete:
break
if needed_space is not None and total_bytes > needed_space:
break
total_bytes += bytes
total_files += 1
if total_files > limit:
break
rows.append({'scope': scope, 'name': name, 'path': path,
'bytes': bytes, 'tombstone': tombstone,
'state': state})
replica_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name,
models.RSEFileAssociation.rse_id == rse_id))
else:
# If this is the last replica, check if there are some requests
request_cnt = session.query(func.count()).\
with_hint(models.Request, "INDEX(requests REQUESTS_SCOPE_NAME_RSE_IDX)", 'oracle').\
filter(and_(models.Request.scope == scope,
models.Request.name == name)).one()
if request_cnt[0] == 0:
if tombstone != OBSOLETE:
if only_delete_obsolete:
break
if needed_space is not None and total_bytes > needed_space:
break
total_bytes += bytes
total_files += 1
if total_files > limit:
break
rows.append({'scope': scope, 'name': name, 'path': path,
'bytes': bytes, 'tombstone': tombstone,
'state': state})
replica_clause.append(and_(models.RSEFileAssociation.scope == scope,
models.RSEFileAssociation.name == name,
models.RSEFileAssociation.rse_id == rse_id))
for chunk in chunks(replica_clause, 100):
session.query(models.RSEFileAssociation).filter(or_(*chunk)).\
with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').\
update({'updated_at': datetime.utcnow(), 'state': ReplicaState.BEING_DELETED, 'tombstone': datetime(1970, 1, 1)}, synchronize_session=False)
return rows
@transactional_session
def update_replicas_states(replicas, nowait=False, session=None):
"""
Update File replica information and state.
:param replicas: The list of replicas.
:param nowait: Nowait parameter for the for_update queries.
:param session: The database session in use.
"""
for replica in replicas:
query = session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name'])
try:
if nowait:
query.with_for_update(nowait=True).one()
except NoResultFound:
# remember scope, name and rse
raise exception.ReplicaNotFound("No row found for scope: %s name: %s rse: %s" % (replica['scope'], replica['name'], get_rse_name(replica['rse_id'], session=session)))
if isinstance(replica['state'], string_types):
replica['state'] = ReplicaState(replica['state'])
values = {'state': replica['state']}
if replica['state'] == ReplicaState.BEING_DELETED:
query = query.filter_by(lock_cnt=0)
# Exclude replicas use as sources
stmt = exists([1]).where(and_(models.RSEFileAssociation.scope == models.Source.scope,
models.RSEFileAssociation.name == models.Source.name,
models.RSEFileAssociation.rse_id == models.Source.rse_id))
query = query.filter(not_(stmt))
values['tombstone'] = OBSOLETE
elif replica['state'] == ReplicaState.AVAILABLE:
rucio.core.lock.successful_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], nowait=nowait, session=session)
elif replica['state'] == ReplicaState.UNAVAILABLE:
rucio.core.lock.failed_transfer(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'],
error_message=replica.get('error_message', None),
broken_rule_id=replica.get('broken_rule_id', None),
broken_message=replica.get('broken_message', None),
nowait=nowait, session=session)
elif replica['state'] == ReplicaState.TEMPORARY_UNAVAILABLE:
query = query.filter(or_(models.RSEFileAssociation.state == ReplicaState.AVAILABLE, models.RSEFileAssociation.state == ReplicaState.TEMPORARY_UNAVAILABLE))
if 'path' in replica and replica['path']:
values['path'] = replica['path']
if not query.update(values, synchronize_session=False):
if 'rse' not in replica:
replica['rse'] = get_rse_name(rse_id=replica['rse_id'], session=session)
raise exception.UnsupportedOperation('State %(state)s for replica %(scope)s:%(name)s on %(rse)s cannot be updated' % replica)
return True
@transactional_session
def touch_replica(replica, session=None):
"""
Update the accessed_at timestamp of the given file replica/did but don't wait if row is locked.
:param replica: a dictionary with the information of the affected replica.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
"""
try:
accessed_at, none_value = replica.get('accessed_at') or datetime.utcnow(), None
session.query(models.RSEFileAssociation).\
filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
with_for_update(nowait=True).one()
session.query(models.RSEFileAssociation).filter_by(rse_id=replica['rse_id'], scope=replica['scope'], name=replica['name']).\
with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle').\
update({'accessed_at': accessed_at,
'tombstone': case([(and_(models.RSEFileAssociation.tombstone != none_value,
models.RSEFileAssociation.tombstone != OBSOLETE),
accessed_at)],
else_=models.RSEFileAssociation.tombstone)},
synchronize_session=False)
session.query(models.DataIdentifier).\
filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
with_for_update(nowait=True).one()
session.query(models.DataIdentifier).\
filter_by(scope=replica['scope'], name=replica['name'], did_type=DIDType.FILE).\
with_hint(models.DataIdentifier, "INDEX(DIDS DIDS_PK)", 'oracle').\
update({'accessed_at': accessed_at}, synchronize_session=False)
except DatabaseError:
return False
except NoResultFound:
return True
return True
@transactional_session
def update_replica_state(rse_id, scope, name, state, session=None):
"""
Update File replica information and state.
:param rse_id: the rse id.
:param scope: the tag name.
:param name: The data identifier name.
:param state: The state.
:param session: The database session in use.
"""
return update_replicas_states(replicas=[{'scope': scope, 'name': name, 'state': state, 'rse_id': rse_id}], session=session)
@transactional_session
def get_and_lock_file_replicas(scope, name, nowait=False, restrict_rses=None, session=None):
"""
Get file replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects
"""
query = session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)
if restrict_rses is not None:
if len(restrict_rses) < 10:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = query.filter(or_(*rse_clause))
return query.with_for_update(nowait=nowait).all()
@transactional_session
def get_source_replicas(scope, name, source_rses=None, session=None):
"""
Get soruce replicas for a specific scope:name.
:param scope: The scope of the did.
:param name: The name of the did.
:param soruce_rses: Possible RSE_ids to filter on.
:param session: The db session in use.
:returns: List of SQLAlchemy Replica Objects
"""
query = session.query(models.RSEFileAssociation.rse_id).filter_by(scope=scope, name=name).filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)
if source_rses:
if len(source_rses) < 10:
rse_clause = []
for rse_id in source_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = query.filter(or_(*rse_clause))
return [a[0] for a in query.all()]
@transactional_session
def get_and_lock_file_replicas_for_dataset(scope, name, nowait=False, restrict_rses=None,
total_threads=None, thread_id=None,
session=None):
"""
Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param nowait: Nowait parameter for the FOR UPDATE statement
:param restrict_rses: Possible RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset)
"""
files, replicas = {}, {}
if session.bind.dialect.name == 'postgresql':
# Get content
content_query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32).\
with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle').\
filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if total_threads and total_threads > 1:
content_query = filter_thread_work(session=session, query=content_query, total_threads=total_threads,
thread_id=thread_id, hash_variable='child_name')
for child_scope, child_name, bytes, md5, adler32 in content_query.yield_per(1000):
files[(child_scope, child_name)] = {'scope': child_scope,
'name': child_name,
'bytes': bytes,
'md5': md5,
'adler32': adler32}
replicas[(child_scope, child_name)] = []
# Get replicas and lock them
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle')\
.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if restrict_rses is not None:
if len(restrict_rses) < 10:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle')\
.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
else:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle') \
.with_hint(models.RSEFileAssociation, "INDEX(REPLICAS REPLICAS_PK)", 'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED)).\
filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if restrict_rses is not None:
if len(restrict_rses) < 10:
rse_clause = []
for rse_id in restrict_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.bytes,
models.DataIdentifierAssociation.md5,
models.DataIdentifierAssociation.adler32,
models.RSEFileAssociation)\
.with_hint(models.DataIdentifierAssociation,
"INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)",
'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if total_threads and total_threads > 1:
query = filter_thread_work(session=session, query=query, total_threads=total_threads,
thread_id=thread_id, hash_variable='child_name')
query = query.with_for_update(nowait=nowait, of=models.RSEFileAssociation.lock_cnt)
for child_scope, child_name, bytes, md5, adler32, replica in query.yield_per(1000):
if (child_scope, child_name) not in files:
files[(child_scope, child_name)] = {'scope': child_scope,
'name': child_name,
'bytes': bytes,
'md5': md5,
'adler32': adler32}
if (child_scope, child_name) in replicas:
if replica is not None:
replicas[(child_scope, child_name)].append(replica)
else:
replicas[(child_scope, child_name)] = []
if replica is not None:
replicas[(child_scope, child_name)].append(replica)
return (list(files.values()), replicas)
@transactional_session
def get_source_replicas_for_dataset(scope, name, source_rses=None,
total_threads=None, thread_id=None,
session=None):
"""
Get file replicas for all files of a dataset.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param source_rses: Possible source RSE_ids to filter on.
:param total_threads: Total threads
:param thread_id: This thread
:param session: The db session in use.
:returns: (files in dataset, replicas in dataset)
"""
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.RSEFileAssociation.rse_id)\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state == ReplicaState.AVAILABLE)).\
filter(models.DataIdentifierAssociation.scope == scope, models.DataIdentifierAssociation.name == name)
if source_rses:
if len(source_rses) < 10:
rse_clause = []
for rse_id in source_rses:
rse_clause.append(models.RSEFileAssociation.rse_id == rse_id)
if rse_clause:
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.RSEFileAssociation.rse_id)\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.outerjoin(models.RSEFileAssociation,
and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
or_(*rse_clause)))\
.filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name)
if total_threads and total_threads > 1:
query = filter_thread_work(session=session, query=query, total_threads=total_threads,
thread_id=thread_id, hash_variable='child_name')
replicas = {}
for child_scope, child_name, rse_id in query:
if (child_scope, child_name) in replicas:
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
else:
replicas[(child_scope, child_name)] = []
if rse_id:
replicas[(child_scope, child_name)].append(rse_id)
return replicas
@read_session
def get_replica_atime(replica, session=None):
"""
Get the accessed_at timestamp for a replica. Just for testing.
:param replicas: List of dictionaries {scope, name, rse_id, path}
:param session: Database session to use.
:returns: A datetime timestamp with the last access time.
"""
return session.query(models.RSEFileAssociation.accessed_at).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id']).\
with_hint(models.RSEFileAssociation, text="INDEX(REPLICAS REPLICAS_PK)", dialect_name='oracle').one()[0]
@transactional_session
def touch_collection_replicas(collection_replicas, session=None):
"""
Update the accessed_at timestamp of the given collection replicas.
:param collection_replicas: the list of collection replicas.
:param session: The database session in use.
:returns: True, if successful, False otherwise.
"""
now = datetime.utcnow()
for collection_replica in collection_replicas:
try:
session.query(models.CollectionReplica).filter_by(scope=collection_replica['scope'], name=collection_replica['name'], rse_id=collection_replica['rse_id']).\
update({'accessed_at': collection_replica.get('accessed_at') or now}, synchronize_session=False)
except DatabaseError:
return False
return True
@stream_session
def list_dataset_replicas(scope, name, deep=False, session=None):
"""
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps
"""
if not deep:
query = session.query(models.CollectionReplica.scope,
models.CollectionReplica.name,
models.RSE.rse,
models.CollectionReplica.rse_id,
models.CollectionReplica.bytes,
models.CollectionReplica.length,
models.CollectionReplica.available_bytes,
models.CollectionReplica.available_replicas_cnt.label("available_length"),
models.CollectionReplica.state,
models.CollectionReplica.created_at,
models.CollectionReplica.updated_at,
models.CollectionReplica.accessed_at)\
.filter_by(scope=scope, name=name, did_type=DIDType.DATASET)\
.filter(models.CollectionReplica.rse_id == models.RSE.id)\
.filter(models.RSE.deleted == false())
for row in query:
yield row._asdict()
else:
# find maximum values
content_query = session\
.query(func.sum(models.DataIdentifierAssociation.bytes).label("bytes"),
func.count().label("length"))\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.filter(models.DataIdentifierAssociation.scope == scope)\
.filter(models.DataIdentifierAssociation.name == name)
bytes, length = 0, 0
for row in content_query:
bytes, length = row.bytes, row.length
# find archives that contain files of the requested dataset
sub_query_archives = session\
.query(models.DataIdentifierAssociation.scope.label('dataset_scope'),
models.DataIdentifierAssociation.name.label('dataset_name'),
models.DataIdentifierAssociation.bytes.label('file_bytes'),
models.ConstituentAssociation.child_scope.label('file_scope'),
models.ConstituentAssociation.child_name.label('file_name'),
models.RSEFileAssociation.scope.label('replica_scope'),
models.RSEFileAssociation.name.label('replica_name'),
models.RSE.rse,
models.RSE.id.label('rse_id'),
models.RSEFileAssociation.created_at,
models.RSEFileAssociation.accessed_at,
models.RSEFileAssociation.updated_at)\
.filter(models.DataIdentifierAssociation.scope == scope)\
.filter(models.DataIdentifierAssociation.name == name)\
.filter(models.ConstituentAssociation.child_scope == models.DataIdentifierAssociation.child_scope)\
.filter(models.ConstituentAssociation.child_name == models.DataIdentifierAssociation.child_name)\
.filter(models.ConstituentAssociation.scope == models.RSEFileAssociation.scope)\
.filter(models.ConstituentAssociation.name == models.RSEFileAssociation.name)\
.filter(models.RSEFileAssociation.rse_id == models.RSE.id)\
.filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\
.filter(models.RSE.deleted == false())\
.subquery()
# count the metrics
group_query_archives = session\
.query(sub_query_archives.c.dataset_scope,
sub_query_archives.c.dataset_name,
sub_query_archives.c.file_scope,
sub_query_archives.c.file_name,
sub_query_archives.c.rse_id,
sub_query_archives.c.rse,
func.sum(sub_query_archives.c.file_bytes).label('file_bytes'),
func.min(sub_query_archives.c.created_at).label('created_at'),
func.max(sub_query_archives.c.updated_at).label('updated_at'),
func.max(sub_query_archives.c.accessed_at).label('accessed_at'))\
.group_by(sub_query_archives.c.dataset_scope,
sub_query_archives.c.dataset_name,
sub_query_archives.c.file_scope,
sub_query_archives.c.file_name,
sub_query_archives.c.rse_id,
sub_query_archives.c.rse)\
.subquery()
# bring it in the same column state as the non-archive query
full_query_archives = session\
.query(group_query_archives.c.dataset_scope.label('scope'),
group_query_archives.c.dataset_name.label('name'),
group_query_archives.c.rse_id,
group_query_archives.c.rse,
func.sum(group_query_archives.c.file_bytes).label('available_bytes'),
func.count().label('available_length'),
func.min(group_query_archives.c.created_at).label('created_at'),
func.max(group_query_archives.c.updated_at).label('updated_at'),
func.max(group_query_archives.c.accessed_at).label('accessed_at'))\
.group_by(group_query_archives.c.dataset_scope,
group_query_archives.c.dataset_name,
group_query_archives.c.rse_id,
group_query_archives.c.rse)
# find the non-archive dataset replicas
sub_query = session\
.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.RSEFileAssociation.rse_id,
func.sum(models.RSEFileAssociation.bytes).label("available_bytes"),
func.count().label("available_length"),
func.min(models.RSEFileAssociation.created_at).label("created_at"),
func.max(models.RSEFileAssociation.updated_at).label("updated_at"),
func.max(models.RSEFileAssociation.accessed_at).label("accessed_at"))\
.with_hint(models.DataIdentifierAssociation, "INDEX_RS_ASC(CONTENTS CONTENTS_PK) INDEX_RS_ASC(REPLICAS REPLICAS_PK) NO_INDEX_FFS(CONTENTS CONTENTS_PK)", 'oracle')\
.filter(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope)\
.filter(models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name)\
.filter(models.DataIdentifierAssociation.scope == scope)\
.filter(models.DataIdentifierAssociation.name == name)\
.filter(models.RSEFileAssociation.state == ReplicaState.AVAILABLE)\
.group_by(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.RSEFileAssociation.rse_id)\
.subquery()
query = session\
.query(sub_query.c.scope,
sub_query.c.name,
sub_query.c.rse_id,
models.RSE.rse,
sub_query.c.available_bytes,
sub_query.c.available_length,
sub_query.c.created_at,
sub_query.c.updated_at,
sub_query.c.accessed_at)\
.filter(models.RSE.id == sub_query.c.rse_id)\
.filter(models.RSE.deleted == false())
# join everything together
final_query = query.union_all(full_query_archives)
for row in final_query.all():
replica = row._asdict()
replica['length'], replica['bytes'] = length, bytes
if replica['length'] == row.available_length:
replica['state'] = ReplicaState.AVAILABLE
else:
replica['state'] = ReplicaState.UNAVAILABLE
yield replica
@stream_session
def list_dataset_replicas_bulk(names_by_intscope, session=None):
"""
:param names_by_intscope: The dictionary of internal scopes pointing at the list of names.
:param session: Database session to use.
:returns: A list of dictionaries containing the dataset replicas
with associated metrics and timestamps
"""
condition = []
for scope in names_by_intscope:
condition.append(and_(models.CollectionReplica.scope == scope,
models.CollectionReplica.name.in_(names_by_intscope[scope])))
try:
# chunk size refers to the number of different scopes, see above
for chunk in chunks(condition, 10):
query = session.query(models.CollectionReplica.scope,
models.CollectionReplica.name,
models.RSE.rse,
models.CollectionReplica.rse_id,
models.CollectionReplica.bytes,
models.CollectionReplica.length,
models.CollectionReplica.available_bytes,
models.CollectionReplica.available_replicas_cnt.label("available_length"),
models.CollectionReplica.state,
models.CollectionReplica.created_at,
models.CollectionReplica.updated_at,
models.CollectionReplica.accessed_at) \
.filter(models.CollectionReplica.did_type == DIDType.DATASET) \
.filter(models.CollectionReplica.rse_id == models.RSE.id) \
.filter(or_(*chunk)) \
.filter(models.RSE.deleted == false())
for row in query:
yield row._asdict()
except NoResultFound:
raise exception.DataIdentifierNotFound('No Data Identifiers found')
@stream_session
def list_dataset_replicas_vp(scope, name, deep=False, session=None, logger=logging.log):
"""
List dataset replicas for a DID (scope:name) using the
Virtual Placement service.
NOTICE: This is an RnD function and might change or go away at any time.
:param scope: The scope of the dataset.
:param name: The name of the dataset.
:param deep: Lookup at the file level.
:param session: Database session to use.
:returns: If VP exists and there is at least one non-TAPE replica, returns a list of dicts of sites
"""
vp_endpoint = get_vp_endpoint()
vp_replies = ['other']
nr_replies = 5 # force limit reply size
if not vp_endpoint:
return vp_replies
try:
vp_replies = requests.get('{}/ds/{}/{}:{}'.format(vp_endpoint, nr_replies, scope, name),
verify=False,
timeout=1)
if vp_replies.status_code == 200:
vp_replies = vp_replies.json()
else:
vp_replies = ['other']
except requests.exceptions.RequestException as re:
logger(logging.ERROR, 'In list_dataset_replicas_vp, could not access {}. Error:{}'.format(vp_endpoint, re))
vp_replies = ['other']
if vp_replies != ['other']:
# check that there is at least one regular replica
# that is not on tape and has a protocol with scheme "root"
# and can be accessed from WAN
accessible_replica_exists = False
for reply in list_dataset_replicas(scope=scope, name=name, deep=deep, session=session):
rse_info = rsemgr.get_rse_info(rse=reply['rse'], vo=scope.vo, session=session)
if rse_info['rse_type'] == 'TAPE':
continue
for prot in rse_info['protocols']:
if prot['scheme'] == 'root' and prot['domains']['wan']['read']:
accessible_replica_exists = True
break
if accessible_replica_exists is True:
break
if accessible_replica_exists is True:
for vp_reply in vp_replies:
yield {'vp': True, 'site': vp_reply}
@stream_session
def list_datasets_per_rse(rse_id, filters=None, limit=None, session=None):
"""
List datasets at a RSE.
:param rse: the rse id.
:param filters: dictionary of attributes by which the results should be filtered.
:param limit: limit number.
:param session: Database session to use.
:returns: A list of dict dataset replicas
"""
query = session.query(models.CollectionReplica.scope,
models.CollectionReplica.name,
models.RSE.id.label('rse_id'),
models.RSE.rse,
models.CollectionReplica.bytes,
models.CollectionReplica.length,
models.CollectionReplica.available_bytes,
models.CollectionReplica.available_replicas_cnt.label("available_length"),
models.CollectionReplica.state,
models.CollectionReplica.created_at,
models.CollectionReplica.updated_at,
models.CollectionReplica.accessed_at)\
.filter_by(did_type=DIDType.DATASET)\
.filter(models.CollectionReplica.rse_id == models.RSE.id)\
.filter(models.RSE.id == rse_id)\
.filter(models.RSE.deleted == false())
for (k, v) in filters and filters.items() or []:
if k == 'name' or k == 'scope':
v_str = v if k != 'scope' else v.internal
if '*' in v_str or '%' in v_str:
if session.bind.dialect.name == 'postgresql': # PostgreSQL escapes automatically
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%')))
else:
query = query.filter(getattr(models.CollectionReplica, k).like(v_str.replace('*', '%'), escape='\\'))
else:
query = query.filter(getattr(models.CollectionReplica, k) == v)
# hints ?
elif k == 'created_before':
created_before = str_to_date(v)
query = query.filter(models.CollectionReplica.created_at <= created_before)
elif k == 'created_after':
created_after = str_to_date(v)
query = query.filter(models.CollectionReplica.created_at >= created_after)
else:
query = query.filter(getattr(models.CollectionReplica, k) == v)
if limit:
query = query.limit(limit)
for row in query:
yield row._asdict()
@transactional_session
def get_cleaned_updated_collection_replicas(total_workers, worker_number, limit=None, session=None):
"""
Get update request for collection replicas.
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: Maximum numberws to return.
:param session: Database session in use.
:returns: List of update requests for collection replicas.
"""
# Delete update requests which do not have collection_replicas
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.is_(None)
& ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503
models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope))).delete(synchronize_session=False)
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.rse_id.isnot(None)
& ~exists().where(and_(models.CollectionReplica.name == models.UpdatedCollectionReplica.name, # NOQA: W503
models.CollectionReplica.scope == models.UpdatedCollectionReplica.scope,
models.CollectionReplica.rse_id == models.UpdatedCollectionReplica.rse_id))).delete(synchronize_session=False)
# Delete duplicates
if session.bind.dialect.name == 'oracle':
schema = ''
if BASE.metadata.schema:
schema = BASE.metadata.schema + '.'
session.execute('DELETE FROM {schema}updated_col_rep A WHERE A.rowid > ANY (SELECT B.rowid FROM {schema}updated_col_rep B WHERE A.scope = B.scope AND A.name=B.name AND A.did_type=B.did_type AND (A.rse_id=B.rse_id OR (A.rse_id IS NULL and B.rse_id IS NULL)))'.format(schema=schema))
elif session.bind.dialect.name == 'mysql':
subquery1 = session.query(func.max(models.UpdatedCollectionReplica.id).label('max_id')).\
group_by(models.UpdatedCollectionReplica.scope,
models.UpdatedCollectionReplica.name,
models.UpdatedCollectionReplica.rse_id).subquery()
subquery2 = session.query(subquery1.c.max_id).subquery()
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.notin_(subquery2)).delete(synchronize_session=False)
else:
replica_update_requests = session.query(models.UpdatedCollectionReplica)
update_requests_with_rse_id = []
update_requests_without_rse_id = []
duplicate_request_ids = []
for update_request in replica_update_requests.all():
if update_request.rse_id is not None:
small_request = {'name': update_request.name, 'scope': update_request.scope, 'rse_id': update_request.rse_id}
if small_request not in update_requests_with_rse_id:
update_requests_with_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
else:
small_request = {'name': update_request.name, 'scope': update_request.scope}
if small_request not in update_requests_without_rse_id:
update_requests_without_rse_id.append(small_request)
else:
duplicate_request_ids.append(update_request.id)
continue
for chunk in chunks(duplicate_request_ids, 100):
session.query(models.UpdatedCollectionReplica).filter(models.UpdatedCollectionReplica.id.in_(chunk)).delete(synchronize_session=False)
query = session.query(models.UpdatedCollectionReplica)
if limit:
query = query.limit(limit)
return [update_request.to_dict() for update_request in query.all()]
@transactional_session
def update_collection_replica(update_request, session=None):
"""
Update a collection replica.
:param update_request: update request from the upated_col_rep table.
"""
if update_request['rse_id'] is not None:
# Check one specific dataset replica
ds_length = 0
old_available_replicas = 0
ds_bytes = 0
ds_replica_state = None
ds_available_bytes = 0
available_replicas = 0
try:
collection_replica = session.query(models.CollectionReplica)\
.filter_by(scope=update_request['scope'],
name=update_request['name'],
rse_id=update_request['rse_id'])\
.one()
ds_length = collection_replica.length
old_available_replicas = collection_replica.available_replicas_cnt
ds_bytes = collection_replica.bytes
except NoResultFound:
pass
try:
file_replica = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\
.filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope,
models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.name == update_request['name'],
models.RSEFileAssociation.rse_id == update_request['rse_id'],
models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
update_request['scope'] == models.DataIdentifierAssociation.scope)\
.with_entities(label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)),
label('available_replicas', func.count()))\
.one()
available_replicas = file_replica.available_replicas
ds_available_bytes = file_replica.ds_available_bytes
except NoResultFound:
pass
if available_replicas >= ds_length:
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
if old_available_replicas > 0 and available_replicas == 0:
session.query(models.CollectionReplica).filter_by(scope=update_request['scope'],
name=update_request['name'],
rse_id=update_request['rse_id'])\
.delete()
else:
updated_replica = session.query(models.CollectionReplica).filter_by(scope=update_request['scope'],
name=update_request['name'],
rse_id=update_request['rse_id'])\
.one()
updated_replica.state = ds_replica_state
updated_replica.available_replicas_cnt = available_replicas
updated_replica.length = ds_length
updated_replica.bytes = ds_bytes
updated_replica.available_bytes = ds_available_bytes
else:
# Check all dataset replicas
association = session.query(models.DataIdentifierAssociation)\
.filter_by(scope=update_request['scope'],
name=update_request['name'])\
.with_entities(label('ds_length', func.count()),
label('ds_bytes', func.sum(models.DataIdentifierAssociation.bytes)))\
.one()
ds_length = association.ds_length
ds_bytes = association.ds_bytes
ds_replica_state = None
collection_replicas = session.query(models.CollectionReplica)\
.filter_by(scope=update_request['scope'], name=update_request['name'])\
.all()
for collection_replica in collection_replicas:
if ds_length:
collection_replica.length = ds_length
else:
collection_replica.length = 0
if ds_bytes:
collection_replica.bytes = ds_bytes
else:
collection_replica.bytes = 0
file_replicas = session.query(models.RSEFileAssociation, models.DataIdentifierAssociation)\
.filter(models.RSEFileAssociation.scope == models.DataIdentifierAssociation.child_scope,
models.RSEFileAssociation.name == models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.name == update_request['name'],
models.RSEFileAssociation.state == ReplicaState.AVAILABLE,
update_request['scope'] == models.DataIdentifierAssociation.scope)\
.with_entities(models.RSEFileAssociation.rse_id,
label('ds_available_bytes', func.sum(models.RSEFileAssociation.bytes)),
label('available_replicas', func.count()))\
.group_by(models.RSEFileAssociation.rse_id)\
.all()
for file_replica in file_replicas:
if file_replica.available_replicas >= ds_length:
ds_replica_state = ReplicaState.AVAILABLE
else:
ds_replica_state = ReplicaState.UNAVAILABLE
collection_replica = session.query(models.CollectionReplica)\
.filter_by(scope=update_request['scope'], name=update_request['name'], rse_id=file_replica.rse_id)\
.first()
if collection_replica:
collection_replica.state = ds_replica_state
collection_replica.available_replicas_cnt = file_replica.available_replicas
collection_replica.available_bytes = file_replica.ds_available_bytes
session.query(models.UpdatedCollectionReplica).filter_by(id=update_request['id']).delete()
@read_session
def get_bad_pfns(limit=10000, thread=None, total_threads=None, session=None):
"""
Returns a list of bad PFNs
:param limit: The maximum number of replicas returned.
:param thread: The assigned thread for this minos instance.
:param total_threads: The total number of minos threads.
:param session: The database session in use.
returns: list of PFNs {'pfn': pfn, 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at}
"""
result = []
query = session.query(models.BadPFNs.path, models.BadPFNs.state, models.BadPFNs.reason, models.BadPFNs.account, models.BadPFNs.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_threads, thread_id=thread, hash_variable='path')
query.order_by(models.BadPFNs.created_at)
query = query.limit(limit)
for path, state, reason, account, expires_at in query.yield_per(1000):
result.append({'pfn': clean_surls([str(path)])[0], 'state': state, 'reason': reason, 'account': account, 'expires_at': expires_at})
return result
@transactional_session
def bulk_add_bad_replicas(replicas, account, state=BadFilesStatus.TEMPORARY_UNAVAILABLE, reason=None, expires_at=None, session=None):
"""
Bulk add new bad replicas.
:param replicas: the list of bad replicas.
:param account: The account who declared the bad replicas.
:param state: The state of the file (SUSPICIOUS, BAD or TEMPORARY_UNAVAILABLE).
:param session: The database session in use.
:returns: True is successful.
"""
for replica in replicas:
insert_new_row = True
if state == BadFilesStatus.TEMPORARY_UNAVAILABLE:
query = session.query(models.BadReplicas).filter_by(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], state=state)
if query.count():
query.update({'state': BadFilesStatus.TEMPORARY_UNAVAILABLE, 'updated_at': datetime.utcnow(), 'account': account, 'reason': reason, 'expires_at': expires_at}, synchronize_session=False)
insert_new_row = False
if insert_new_row:
new_bad_replica = models.BadReplicas(scope=replica['scope'], name=replica['name'], rse_id=replica['rse_id'], reason=reason,
state=state, account=account, bytes=None, expires_at=expires_at)
new_bad_replica.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.DataIdentifierAlreadyExists('Data Identifier already exists!')
raise exception.RucioException(error.args)
return True
@transactional_session
def bulk_delete_bad_pfns(pfns, session=None):
"""
Bulk delete bad PFNs.
:param pfns: the list of new files.
:param session: The database session in use.
:returns: True is successful.
"""
pfn_clause = []
for pfn in pfns:
pfn_clause.append(models.BadPFNs.path == pfn)
for chunk in chunks(pfn_clause, 100):
query = session.query(models.BadPFNs).filter(or_(*chunk))
query.delete(synchronize_session=False)
return True
@transactional_session
def bulk_delete_bad_replicas(bad_replicas, session=None):
"""
Bulk delete bad replica.
:param bad_replicas: The list of bad replicas to delete (Dictionaries).
:param session: The database session in use.
:returns: True is successful.
"""
replica_clause = []
for replica in bad_replicas:
replica_clause.append(and_(models.BadReplicas.scope == replica['scope'],
models.BadReplicas.name == replica['name'],
models.BadReplicas.rse_id == replica['rse_id'],
models.BadReplicas.state == replica['state']))
for chunk in chunks(replica_clause, 100):
session.query(models.BadReplicas).filter(or_(*chunk)).\
delete(synchronize_session=False)
return True
@transactional_session
def add_bad_pfns(pfns, account, state, reason=None, expires_at=None, session=None):
"""
Add bad PFNs.
:param pfns: the list of new files.
:param account: The account who declared the bad replicas.
:param state: One of the possible states : BAD, SUSPICIOUS, TEMPORARY_UNAVAILABLE.
:param reason: A string describing the reason of the loss.
:param expires_at: Specify a timeout for the TEMPORARY_UNAVAILABLE replicas. None for BAD files.
:param session: The database session in use.
:returns: True is successful.
"""
if isinstance(state, string_types):
rep_state = BadPFNStatus[state]
else:
rep_state = state
pfns = clean_surls(pfns)
for pfn in pfns:
new_pfn = models.BadPFNs(path=str(pfn), account=account, state=rep_state, reason=reason, expires_at=expires_at)
new_pfn = session.merge(new_pfn)
new_pfn.save(session=session, flush=False)
try:
session.flush()
except IntegrityError as error:
raise exception.RucioException(error.args)
except DatabaseError as error:
raise exception.RucioException(error.args)
except FlushError as error:
if match('New instance .* with identity key .* conflicts with persistent instance', error.args[0]):
raise exception.Duplicate('One PFN already exists!')
raise exception.RucioException(error.args)
return True
@read_session
def list_expired_temporary_unavailable_replicas(total_workers, worker_number, limit=10000, session=None):
"""
List the expired temporary unavailable replicas
:param total_workers: Number of total workers.
:param worker_number: id of the executing worker.
:param limit: The maximum number of replicas returned.
:param session: The database session in use.
"""
query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id).\
filter(models.BadReplicas.state == BadFilesStatus.TEMPORARY_UNAVAILABLE).\
filter(models.BadReplicas.expires_at < datetime.utcnow()).\
with_hint(models.ReplicationRule, "index(bad_replicas BAD_REPLICAS_EXPIRES_AT_IDX)", 'oracle').\
order_by(models.BadReplicas.expires_at)
query = filter_thread_work(session=session, query=query, total_threads=total_workers, thread_id=worker_number, hash_variable='name')
query = query.limit(limit)
return query.all()
@read_session
def get_replicas_state(scope=None, name=None, session=None):
"""
Method used by the necromancer to get all the replicas of a DIDs
:param scope: The scope of the file.
:param name: The name of the file.
:param session: The database session in use.
:returns: A dictionary with the list of states as keys and the rse_ids as value
"""
query = session.query(models.RSEFileAssociation.rse_id, models.RSEFileAssociation.state).filter_by(scope=scope, name=name)
states = {}
for res in query.all():
rse_id, state = res
if state not in states:
states[state] = []
states[state].append(rse_id)
return states
@read_session
def get_suspicious_files(rse_expression, filter=None, **kwargs):
"""
Gets a list of replicas from bad_replicas table which are: declared more than <nattempts> times since <younger_than> date,
present on the RSE specified by the <rse_expression> and do not have a state in <exclude_states> list.
Selected replicas can also be required to be <available_elsewhere> on another RSE than the one declared in bad_replicas table and/or
be declared as <is_suspicious> in the bad_replicas table.
Keyword Arguments:
:param younger_than: Datetime object to select the replicas which were declared since younger_than date. Default value = 10 days ago.
:param nattempts: The minimum number of replica appearances in the bad_replica DB table from younger_than date. Default value = 0.
:param rse_expression: The RSE expression where the replicas are located.
:param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}
:param: exclude_states: List of states which eliminates replicas from search result if any of the states in the list
was declared for a replica since younger_than date. Allowed values
= ['B', 'R', 'D', 'L', 'T', 'S'] (meaning 'BAD', 'RECOVERED', 'DELETED', 'LOST', 'TEMPORARY_UNAVAILABLE', 'SUSPICIOUS').
:param: available_elsewhere: If True, only replicas declared in addition as AVAILABLE on another RSE
than the one in the bad_replicas table will be taken into account. Default value = False.
:param: is_suspicious: If True, only replicas declared as SUSPICIOUS in bad replicas table will be taken into account. Default value = False.
:param session: The database session in use. Default value = None.
:returns: a list of replicas:
[{'scope': scope, 'name': name, 'rse': rse, 'rse_id': rse_id, cnt': cnt, 'created_at': created_at}, ...]
"""
younger_than = kwargs.get("younger_than", datetime.now() - timedelta(days=10))
nattempts = kwargs.get("nattempts", 0)
session = kwargs.get("session", None)
exclude_states = kwargs.get("exclude_states", ['B', 'R', 'D'])
available_elsewhere = kwargs.get("available_elsewhere", False)
is_suspicious = kwargs.get("is_suspicious", False)
# only for the 2 web api used parameters, checking value types and assigning the default values
if not isinstance(nattempts, int):
nattempts = 0
if not isinstance(younger_than, datetime):
younger_than = datetime.now() - timedelta(days=10)
# assembling exclude_states_clause
exclude_states_clause = []
for state in exclude_states:
exclude_states_clause.append(BadFilesStatus(state))
# making aliases for bad_replicas and replicas tables
bad_replicas_alias = aliased(models.BadReplicas, name='bad_replicas_alias')
replicas_alias = aliased(models.RSEFileAssociation, name='replicas_alias')
# assembling the selection rse_clause
rse_clause = []
if rse_expression:
parsedexp = parse_expression(expression=rse_expression, filter=filter, session=session)
for rse in parsedexp:
rse_clause.append(models.RSEFileAssociation.rse_id == rse['id'])
# query base
query = session.query(func.count(), bad_replicas_alias.scope, bad_replicas_alias.name, models.RSEFileAssociation.rse_id, func.min(models.RSEFileAssociation.created_at))\
.filter(models.RSEFileAssociation.rse_id == bad_replicas_alias.rse_id,
models.RSEFileAssociation.scope == bad_replicas_alias.scope,
models.RSEFileAssociation.name == bad_replicas_alias.name,
bad_replicas_alias.created_at >= younger_than)
if is_suspicious:
query.filter(bad_replicas_alias.state == BadFilesStatus.SUSPICIOUS)
if rse_clause:
query = query.filter(or_(*rse_clause))
if available_elsewhere:
available_replica = exists(select([1]).where(and_(replicas_alias.state == ReplicaState.AVAILABLE,
replicas_alias.scope == bad_replicas_alias.scope,
replicas_alias.name == bad_replicas_alias.name,
replicas_alias.rse_id != bad_replicas_alias.rse_id)))
query = query.filter(available_replica)
# it is required that the selected replicas
# do not occur as BAD/DELETED/LOST/RECOVERED/...
# in the bad_replicas table during the same time window.
other_states_present = exists(select([1]).where(and_(models.BadReplicas.scope == bad_replicas_alias.scope,
models.BadReplicas.name == bad_replicas_alias.name,
models.BadReplicas.created_at >= younger_than,
models.BadReplicas.rse_id == bad_replicas_alias.rse_id,
models.BadReplicas.state.in_(exclude_states_clause))))
query = query.filter(not_(other_states_present))
# finally, the results are grouped by RSE, scope, name and required to have
# at least 'nattempts' occurrences in the result of the query per replica
query_result = query.group_by(models.RSEFileAssociation.rse_id, bad_replicas_alias.scope, bad_replicas_alias.name).having(func.count() > nattempts).all()
# print(query)
# translating the rse_id to RSE name and assembling the return list of dictionaries
result = []
rses = {}
for cnt, scope, name, rse_id, created_at in query_result:
if rse_id not in rses:
rse = get_rse_name(rse_id=rse_id, session=session)
rses[rse_id] = rse
result.append({'scope': scope, 'name': name, 'rse': rses[rse_id], 'rse_id': rse_id, 'cnt': cnt, 'created_at': created_at})
return result
@transactional_session
def set_tombstone(rse_id, scope, name, tombstone=OBSOLETE, session=None):
"""
Sets a tombstone on a replica.
:param rse_id: ID of RSE.
:param scope: scope of the replica DID.
:param name: name of the replica DID.
:param tombstone: the tombstone to set. Default is OBSOLETE
:param session: database session in use.
"""
rowcount = session.query(models.RSEFileAssociation).filter(
and_(
models.RSEFileAssociation.rse_id == rse_id,
models.RSEFileAssociation.name == name,
models.RSEFileAssociation.scope == scope,
~exists().where(
and_(
models.ReplicaLock.rse_id == rse_id,
models.ReplicaLock.name == name,
models.ReplicaLock.scope == scope,
)
)
)
) \
.with_hint(models.RSEFileAssociation, "index(REPLICAS REPLICAS_PK)", 'oracle') \
.update({models.RSEFileAssociation.tombstone: tombstone}, synchronize_session=False)
if rowcount == 0:
try:
session.query(models.RSEFileAssociation).filter_by(scope=scope, name=name, rse_id=rse_id).one()
raise exception.ReplicaIsLocked('Replica %s:%s on RSE %s is locked.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
except NoResultFound:
raise exception.ReplicaNotFound('Replica %s:%s on RSE %s could not be found.' % (scope, name, get_rse_name(rse_id=rse_id, session=session)))
@read_session
def get_RSEcoverage_of_dataset(scope, name, session=None):
"""
Get total bytes present on RSEs
:param scope: Scope of the dataset
:param name: Name of the dataset
:param session: The db session.
:return: Dictionary { rse_id : <total bytes present at rse_id> }
"""
query = session.query(models.RSEFileAssociation.rse_id, func.sum(models.DataIdentifierAssociation.bytes))
query = query.filter(and_(models.DataIdentifierAssociation.child_scope == models.RSEFileAssociation.scope,
models.DataIdentifierAssociation.child_name == models.RSEFileAssociation.name,
models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name,
models.RSEFileAssociation.state != ReplicaState.BEING_DELETED,
))
query = query.group_by(models.RSEFileAssociation.rse_id)
result = {}
for rse_id, total in query:
if total:
result[rse_id] = total
return result
| 51.691101 | 289 | 0.600376 | [
"Apache-2.0"
] | bari12/rucio | lib/rucio/core/replica.py | 171,357 | Python |
import mlrose
from mlrose.algorithms.decorators import short_name
from mlrose.runners._runner_base import _RunnerBase
"""
Example usage:
experiment_name = 'example_experiment'
problem = TSPGenerator.generate(seed=SEED, number_of_cities=22)
ga = GARunner(problem=problem,
experiment_name=experiment_name,
output_directory=OUTPUT_DIRECTORY,
seed=SEED,
iteration_list=2 ** np.arange(12),
max_attempts=1000,
population_sizes=[150, 200, 300],
mutation_rates=[0.4, 0.5, 0.6])
# the two data frames will contain the results
df_run_stats, df_run_curves = ga.run()
"""
@short_name('ga')
class GARunner(_RunnerBase):
def __init__(self, problem, experiment_name, seed, iteration_list, population_sizes, mutation_rates,
hamming_factors=None, hamming_factor_decays=None, max_attempts=500, generate_curves=True, **kwargs):
super().__init__(problem=problem, experiment_name=experiment_name, seed=seed, iteration_list=iteration_list,
max_attempts=max_attempts, generate_curves=generate_curves,
**kwargs)
self.population_sizes = population_sizes
self.mutation_rates = mutation_rates
self.hamming_factors = hamming_factors
self.hamming_factor_decays = hamming_factor_decays
def run(self):
return super().run_experiment_(algorithm=mlrose.genetic_alg,
pop_size=('Population Size', self.population_sizes),
mutation_prob=('Mutation Rate', self.mutation_rates),
hamming_factor=('Hamming Factor', self.hamming_factors),
hamming_decay_factor=('Hamming Factor Decay Rate', self.hamming_factor_decays))
| 40.604167 | 118 | 0.623397 | [
"BSD-3-Clause"
] | tadmorgan/mlrose | mlrose/runners/ga_runner.py | 1,949 | Python |
from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField, ValidationError, BooleanField, TextAreaField,SelectField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class CommentForm(FlaskForm):
comment = TextAreaField('Your comment:', validators=[Required()])
submit = SubmitField('Comment')
pitch_category = [('Pickup Lines', 'Pickup Lines'), ('Interview Pitch', 'Inteview Pitch'), ('Product Pitch', 'Product Pitch'), ('Promo Pitch', 'Promo Pitch')]
class PitchForm(FlaskForm):
category = SelectField('Category', choices=pitch_category)
pitch = TextAreaField('Your pitch:', validators=[Required()])
submit = SubmitField('Submit Pitch') | 48.2 | 158 | 0.75657 | [
"MIT"
] | edumorris/pomodoro | app/main/forms.py | 723 | Python |
from django.test import TestCase, override_settings
from wagtail_storages.factories import (
CollectionFactory,
CollectionViewRestrictionFactory,
)
from wagtail_storages.utils import (
get_acl_for_collection,
get_frontend_cache_configuration,
is_s3_boto3_storage_used,
)
class TestIsS3Boto3StorageUsed(TestCase):
@override_settings(
DEFAULT_FILE_STORAGE="django.core.files.storage.FileSystemStorage"
)
def test_should_return_false_if_not(self):
self.assertIs(is_s3_boto3_storage_used(), False)
@override_settings(DEFAULT_FILE_STORAGE="storages.backends.s3boto3.S3Boto3Storage")
def test_should_return_true_if_yes(self):
self.assertIs(is_s3_boto3_storage_used(), True)
@override_settings(WAGTAIL_STORAGES_DOCUMENTS_FRONTENDCACHE={})
def test_get_frontend_cache_configuration_1(self):
self.assertEqual(get_frontend_cache_configuration(), {})
@override_settings(
WAGTAIL_STORAGES_DOCUMENTS_FRONTENDCACHE={
"varnish": {
"BACKEND": "wagtail.contrib.frontend_cache.backends.HTTPBackend",
"LOCATION": "http://localhost:8000",
},
}
)
def test_get_frontend_cache_configuration_2(self):
self.assertEqual(
get_frontend_cache_configuration(),
{
"varnish": {
"BACKEND": "wagtail.contrib.frontend_cache.backends.HTTPBackend",
"LOCATION": "http://localhost:8000",
},
},
)
class TestGetAclForCollection(TestCase):
def test_public_colleciton(self):
collection = CollectionFactory()
self.assertEqual(get_acl_for_collection(collection), "public-read")
def test_private_colleciton(self):
collection = CollectionViewRestrictionFactory().collection
self.assertEqual(get_acl_for_collection(collection), "private")
| 33.824561 | 87 | 0.693983 | [
"BSD-2-Clause"
] | PetrDlouhy/wagtail-storages | wagtail_storages/tests/test_utils.py | 1,928 | Python |
import numpy as np
import pytest
from sklearn.datasets import make_classification, make_regression
# To use this experimental feature, we need to explicitly ask for it:
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
@pytest.mark.parametrize('GradientBoosting, X, y', [
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression)
])
@pytest.mark.parametrize(
'params, err_msg',
[({'loss': 'blah'}, 'Loss blah is not supported for'),
({'learning_rate': 0}, 'learning_rate=0 must be strictly positive'),
({'learning_rate': -1}, 'learning_rate=-1 must be strictly positive'),
({'max_iter': 0}, 'max_iter=0 must not be smaller than 1'),
({'max_leaf_nodes': 0}, 'max_leaf_nodes=0 should not be smaller than 2'),
({'max_leaf_nodes': 1}, 'max_leaf_nodes=1 should not be smaller than 2'),
({'max_depth': 0}, 'max_depth=0 should not be smaller than 2'),
({'max_depth': 1}, 'max_depth=1 should not be smaller than 2'),
({'min_samples_leaf': 0}, 'min_samples_leaf=0 should not be smaller'),
({'l2_regularization': -1}, 'l2_regularization=-1 must be positive'),
({'max_bins': 1}, 'max_bins=1 should be no smaller than 2 and no larger'),
({'max_bins': 257}, 'max_bins=257 should be no smaller than 2 and no'),
({'n_iter_no_change': -1}, 'n_iter_no_change=-1 must be positive'),
({'validation_fraction': -1}, 'validation_fraction=-1 must be strictly'),
({'validation_fraction': 0}, 'validation_fraction=0 must be strictly'),
({'tol': -1}, 'tol=-1 must not be smaller than 0')]
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
def test_invalid_classification_loss():
binary_clf = HistGradientBoostingClassifier(loss="binary_crossentropy")
err_msg = ("loss='binary_crossentropy' is not defined for multiclass "
"classification with n_classes=3, use "
"loss='categorical_crossentropy' instead")
with pytest.raises(ValueError, match=err_msg):
binary_clf.fit(np.zeros(shape=(3, 2)), np.arange(3))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('neg_mean_squared_error', .1, 5, 1e-7), # use scorer
('neg_mean_squared_error', None, 5, 1e-1), # use scorer on train data
(None, .1, 5, 1e-7), # same with default scorer
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7), # use loss
('loss', None, 5, 1e-1), # use loss on training data
(None, None, None, None), # no early stopping
])
def test_early_stopping_regression(scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 200
X, y = make_regression(random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize('data', (
make_classification(random_state=0),
make_classification(n_classes=3, n_clusters_per_class=1, random_state=0)
))
@pytest.mark.parametrize(
'scoring, validation_fraction, n_iter_no_change, tol', [
('accuracy', .1, 5, 1e-7), # use scorer
('accuracy', None, 5, 1e-1), # use scorer on training data
(None, .1, 5, 1e-7), # same with default scorerscor
(None, None, 5, 1e-1),
('loss', .1, 5, 1e-7), # use loss
('loss', None, 5, 1e-1), # use loss on training data
(None, None, None, None), # no early stopping
])
def test_early_stopping_classification(data, scoring, validation_fraction,
n_iter_no_change, tol):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0
)
gb.fit(X, y)
if n_iter_no_change is not None:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
'scores, n_iter_no_change, tol, stopping',
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0., False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0., True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
]
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(
n_iter_no_change=n_iter_no_change, tol=tol
)
assert gbdt._should_stop(scores) == stopping
| 40.013514 | 79 | 0.647248 | [
"BSD-3-Clause"
] | KshitizSharmaV/Quant_Platform_Python | lib/python3.6/site-packages/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | 5,922 | Python |
from .KdfParams import KdfParams
from .CipherParams import CipherParams
class CryptoStruct:
def __init__(
self,
cipher: int,
ciphertext: str,
cipherparams: CipherParams,
kdf: str,
kdfparams: KdfParams,
mac: str,
):
self._cipher = cipher
self._ciphertext = ciphertext
self._cipherparams = cipherparams
self._kdf = kdf
self._kdfparams = kdfparams
self._mac = mac
@classmethod
def from_dict(cls, crypto):
new_crypto = cls.__new__(cls)
for key in crypto:
setattr(new_crypto, key, crypto[key])
return new_crypto
@property
def cipher(self):
return self._cipher
@cipher.setter
def cipher(self, cipher):
self._cipher = cipher
@property
def ciphertext(self):
return self._ciphertext
@ciphertext.setter
def ciphertext(self, ciphertext):
self._ciphertext = ciphertext
@property
def cipherparams(self):
return self._cipherparams
@cipherparams.setter
def cipherparams(self, cipherparams):
if isinstance(cipherparams, dict):
self._cipherparams = CipherParams.from_dict(cipherparams)
else:
self._cipherparams = cipherparams
@property
def kdf(self):
return self._kdf
@kdf.setter
def kdf(self, kdf):
self._kdf = kdf
@property
def kdfparams(self):
return self._kdfparams
@kdfparams.setter
def kdfparams(self, kdfparams):
if isinstance(kdfparams, dict):
self._kdfparams = KdfParams.from_dict(kdfparams)
else:
self._kdfparams = kdfparams
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, mac):
self._mac = mac | 22.641975 | 69 | 0.609051 | [
"MIT"
] | coryhill/xchainpy-lib | xchainpy/xchainpy_crypto/xchainpy_crypto/models/CryptoStruct.py | 1,834 | Python |
# coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.220
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ErrorList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'errors': 'list[ErrorData]'
}
attribute_map = {
'errors': 'errors'
}
def __init__(self, errors=None): # noqa: E501
"""ErrorList - a model defined in Swagger""" # noqa: E501
self._errors = None
self.discriminator = None
if errors is not None:
self.errors = errors
@property
def errors(self):
"""Gets the errors of this ErrorList. # noqa: E501
:return: The errors of this ErrorList. # noqa: E501
:rtype: list[ErrorData]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""Sets the errors of this ErrorList.
:param errors: The errors of this ErrorList. # noqa: E501
:type: list[ErrorData]
"""
self._errors = errors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ErrorList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.486486 | 80 | 0.550967 | [
"MIT"
] | dcompane/controlm_py | controlm_py/models/error_list.py | 3,051 | Python |
# Rainbow 2, by Al Sweigart [email protected]
# Shows a simple squiggle rainbow animation.
import time, random, sys
try:
import bext
except ImportError:
print("""This program requires the bext module, which you can install by
opening a Terminal window (on macOS & Linux) and running:
python3 -m pip install --user bext
or a Command Prompt window (on Windows) and running:
python -m pip install --user bext""")
sys.exit()
indent = 10 # How many spaces to indent.
while True:
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if random.randint(0, 1) == 0:
# Increase the number of spaces:
indent = indent + 1
if indent > 20:
indent = 20
else:
# Decrease the number of spaces:
indent = indent - 1
if indent < 0:
indent = 0
time.sleep(0.05) # Add a slight pause.
| 22.877551 | 76 | 0.576271 | [
"MIT"
] | skinzor/PythonStdioGames | src/gamesbyexample/rainbow2.py | 1,121 | Python |
'''
Defines the link functions to be used with GLM and GEE families.
'''
import numpy as np
import scipy.stats
FLOAT_EPS = np.finfo(float).eps
class Link(object):
"""
A generic link function for one-parameter exponential family.
`Link` does nothing, but lays out the methods expected of any subclass.
"""
def __call__(self, p):
"""
Return the value of the link function. This is just a placeholder.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g(p) : array_like
The value of the link function g(p) = z
"""
return NotImplementedError
def inverse(self, z):
"""
Inverse of the link function. Just a placeholder.
Parameters
----------
z : array_like
`z` is usually the linear predictor of the transformed variable
in the IRLS algorithm for GLM.
Returns
-------
g^(-1)(z) : ndarray
The value of the inverse of the link function g^(-1)(z) = p
"""
return NotImplementedError
def deriv(self, p):
"""
Derivative of the link function g'(p). Just a placeholder.
Parameters
----------
p : array_like
Returns
-------
g'(p) : ndarray
The value of the derivative of the link function g'(p)
"""
return NotImplementedError
def deriv2(self, p):
"""Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import _approx_fprime_cs_scalar
return _approx_fprime_cs_scalar(p, self.deriv)
def inverse_deriv(self, z):
"""
Derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the link function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
"""
return 1 / self.deriv(self.inverse(z))
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This reference implementation gives the correct result but is
inefficient, so it can be overridden in subclasses.
"""
iz = self.inverse(z)
return -self.deriv2(iz) / self.deriv(iz)**3
class Logit(Link):
"""
The logit transform
Notes
-----
call and derivative use a private method _clean to make trim p by
machine epsilon so that p is in (0,1)
Alias of Logit:
logit = Logit()
"""
def _clean(self, p):
"""
Clip logistic values to range (eps, 1-eps)
Parameters
----------
p : array_like
Probabilities
Returns
-------
pclip : ndarray
Clipped probabilities
"""
return np.clip(p, FLOAT_EPS, 1. - FLOAT_EPS)
def __call__(self, p):
"""
The logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
z : ndarray
Logit transform of `p`
Notes
-----
g(p) = log(p / (1 - p))
"""
p = self._clean(p)
return np.log(p / (1. - p))
def inverse(self, z):
"""
Inverse of the logit transform
Parameters
----------
z : array_like
The value of the logit transform at `p`
Returns
-------
p : ndarray
Probabilities
Notes
-----
g^(-1)(z) = exp(z)/(1+exp(z))
"""
z = np.asarray(z)
t = np.exp(-z)
return 1. / (1. + t)
def deriv(self, p):
"""
Derivative of the logit transform
Parameters
----------
p : array_like
Probabilities
Returns
-------
g'(p) : ndarray
Value of the derivative of logit transform at `p`
Notes
-----
g'(p) = 1 / (p * (1 - p))
Alias for `Logit`:
logit = Logit()
"""
p = self._clean(p)
return 1. / (p * (1 - p))
def inverse_deriv(self, z):
"""
Derivative of the inverse of the logit transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g'^(-1)(z) : ndarray
The value of the derivative of the inverse of the logit function
"""
t = np.exp(z)
return t/(1 + t)**2
def deriv2(self, p):
"""
Second derivative of the logit function.
Parameters
----------
p : array_like
probabilities
Returns
-------
g''(z) : ndarray
The value of the second derivative of the logit function
"""
v = p * (1 - p)
return (2*p - 1) / v**2
class logit(Logit):
pass
class Power(Link):
"""
The power transform
Parameters
----------
power : float
The exponent of the power transform
Notes
-----
Aliases of Power:
inverse = Power(power=-1)
sqrt = Power(power=.5)
inverse_squared = Power(power=-2.)
identity = Power(power=1.)
"""
def __init__(self, power=1.):
self.power = power
def __call__(self, p):
"""
Power transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : array_like
Power transform of x
Notes
-----
g(p) = x**self.power
"""
if self.power == 1:
return p
else:
return np.power(p, self.power)
def inverse(self, z):
"""
Inverse of the power transform link function
Parameters
----------
`z` : array_like
Value of the transformed mean parameters at `p`
Returns
-------
`p` : ndarray
Mean parameters
Notes
-----
g^(-1)(z`) = `z`**(1/`power`)
"""
if self.power == 1:
return z
else:
return np.power(z, 1. / self.power)
def deriv(self, p):
"""
Derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
Derivative of power transform of `p`
Notes
-----
g'(`p`) = `power` * `p`**(`power` - 1)
"""
if self.power == 1:
return np.ones_like(p)
else:
return self.power * np.power(p, self.power - 1)
def deriv2(self, p):
"""
Second derivative of the power transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of the power transform of `p`
Notes
-----
g''(`p`) = `power` * (`power` - 1) * `p`**(`power` - 2)
"""
if self.power == 1:
return np.zeros_like(p)
else:
return self.power * (self.power - 1) * np.power(p, self.power - 2)
def inverse_deriv(self, z):
"""
Derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.ones_like(z)
else:
return np.power(z, (1 - self.power)/self.power) / self.power
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the power transform
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the power transform
function
"""
if self.power == 1:
return np.zeros_like(z)
else:
return ((1 - self.power) *
np.power(z, (1 - 2*self.power)/self.power) / self.power**2)
class inverse_power(Power):
"""
The inverse transform
Notes
-----
g(p) = 1/p
Alias of statsmodels.family.links.Power(power=-1.)
"""
def __init__(self):
super(inverse_power, self).__init__(power=-1.)
class sqrt(Power):
"""
The square-root transform
Notes
-----
g(`p`) = sqrt(`p`)
Alias of statsmodels.family.links.Power(power=.5)
"""
def __init__(self):
super(sqrt, self).__init__(power=.5)
class inverse_squared(Power):
r"""
The inverse squared transform
Notes
-----
g(`p`) = 1/(`p`\*\*2)
Alias of statsmodels.family.links.Power(power=2.)
"""
def __init__(self):
super(inverse_squared, self).__init__(power=-2.)
class identity(Power):
"""
The identity transform
Notes
-----
g(`p`) = `p`
Alias of statsmodels.family.links.Power(power=1.)
"""
def __init__(self):
super(identity, self).__init__(power=1.)
class Log(Link):
"""
The log transform
Notes
-----
call and derivative call a private method _clean to trim the data by
machine epsilon so that p is in (0,1). log is an alias of Log.
"""
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p, **extra):
"""
Log transform link function
Parameters
----------
x : array_like
Mean parameters
Returns
-------
z : ndarray
log(x)
Notes
-----
g(p) = log(p)
"""
x = self._clean(p)
return np.log(x)
def inverse(self, z):
"""
Inverse of log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
p : ndarray
The mean probabilities given the value of the inverse `z`
Notes
-----
g^{-1}(z) = exp(z)
"""
return np.exp(z)
def deriv(self, p):
"""
Derivative of log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
derivative of log transform of x
Notes
-----
g'(x) = 1/x
"""
p = self._clean(p)
return 1. / p
def deriv2(self, p):
"""
Second derivative of the log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
Second derivative of log transform of x
Notes
-----
g''(x) = -1/x^2
"""
p = self._clean(p)
return -1. / p**2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the log transform link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the log function,
the exponential function
"""
return np.exp(z)
class log(Log):
"""
The log transform
Notes
-----
log is a an alias of Log.
"""
pass
# TODO: the CDFLink is untested
class CDFLink(Logit):
"""
The use the CDF of a scipy.stats distribution
CDFLink is a subclass of logit in order to use its _clean method
for the link and its derivative.
Parameters
----------
dbn : scipy.stats distribution
Default is dbn=scipy.stats.norm
Notes
-----
The CDF link is untested.
"""
def __init__(self, dbn=scipy.stats.norm):
self.dbn = dbn
def __call__(self, p):
"""
CDF link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
(ppf) inverse of CDF transform of p
Notes
-----
g(`p`) = `dbn`.ppf(`p`)
"""
p = self._clean(p)
return self.dbn.ppf(p)
def inverse(self, z):
"""
The inverse of the CDF link
Parameters
----------
z : array_like
The value of the inverse of the link function at `p`
Returns
-------
p : ndarray
Mean probabilities. The value of the inverse of CDF link of `z`
Notes
-----
g^(-1)(`z`) = `dbn`.cdf(`z`)
"""
return self.dbn.cdf(z)
def deriv(self, p):
"""
Derivative of CDF link
Parameters
----------
p : array_like
mean parameters
Returns
-------
g'(p) : ndarray
The derivative of CDF transform at `p`
Notes
-----
g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
"""
p = self._clean(p)
return 1. / self.dbn.pdf(self.dbn.ppf(p))
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
p = self._clean(p)
linpred = self.dbn.ppf(p)
return - self.inverse_deriv2(linpred) / self.dbn.pdf(linpred)**3
def deriv2_numdiff(self, p):
"""
Second derivative of the link function g''(p)
implemented through numerical differentiation
"""
from statsmodels.tools.numdiff import _approx_fprime_scalar
p = np.atleast_1d(p)
# Note: special function for norm.ppf does not support complex
return _approx_fprime_scalar(p, self.deriv, centered=True)
def inverse_deriv(self, z):
"""
Derivative of the inverse link function
Parameters
----------
z : ndarray
The inverse of the link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the logit function.
This is just the pdf in a CDFLink,
"""
return self.dbn.pdf(z)
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function g^(-1)(z).
Parameters
----------
z : array_like
`z` is usually the linear predictor for a GLM or GEE model.
Returns
-------
g^(-1)''(z) : ndarray
The value of the second derivative of the inverse of the link
function
Notes
-----
This method should be overwritten by subclasses.
The inherited method is implemented through numerical differentiation.
"""
from statsmodels.tools.numdiff import _approx_fprime_scalar
z = np.atleast_1d(z)
# Note: special function for norm.ppf does not support complex
return _approx_fprime_scalar(z, self.inverse_deriv, centered=True)
class probit(CDFLink):
"""
The probit (standard normal CDF) transform
Notes
-----
g(p) = scipy.stats.norm.ppf(p)
probit is an alias of CDFLink.
"""
def inverse_deriv2(self, z):
"""
Second derivative of the inverse link function
This is the derivative of the pdf in a CDFLink
"""
return - z * self.dbn.pdf(z)
def deriv2(self, p):
"""
Second derivative of the link function g''(p)
"""
p = self._clean(p)
linpred = self.dbn.ppf(p)
return linpred / self.dbn.pdf(linpred)**2
class cauchy(CDFLink):
"""
The Cauchy (standard Cauchy CDF) transform
Notes
-----
g(p) = scipy.stats.cauchy.ppf(p)
cauchy is an alias of CDFLink with dbn=scipy.stats.cauchy
"""
def __init__(self):
super(cauchy, self).__init__(dbn=scipy.stats.cauchy)
def deriv2(self, p):
"""
Second derivative of the Cauchy link function.
Parameters
----------
p : array_like
Probabilities
Returns
-------
g''(p) : ndarray
Value of the second derivative of Cauchy link function at `p`
"""
p = self._clean(p)
a = np.pi * (p - 0.5)
d2 = 2 * np.pi**2 * np.sin(a) / np.cos(a)**3
return d2
def inverse_deriv2(self, z):
return - 2 * z / (np.pi * (z**2 + 1)**2)
class CLogLog(Logit):
"""
The complementary log-log transform
CLogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
Notes
-----
CLogLog is untested.
"""
def __call__(self, p):
"""
C-Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The CLogLog transform of `p`
Notes
-----
g(p) = log(-log(1-p))
"""
p = self._clean(p)
return np.log(-np.log(1 - p))
def inverse(self, z):
"""
Inverse of C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = 1-exp(-exp(`z`))
"""
return 1 - np.exp(-np.exp(z))
def deriv(self, p):
"""
Derivative of C-Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the CLogLog transform link function
Notes
-----
g'(p) = - 1 / ((p-1)*log(1-p))
"""
p = self._clean(p)
return 1. / ((p - 1) * (np.log(1 - p)))
def deriv2(self, p):
"""
Second derivative of the C-Log-Log ink function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the CLogLog link function
"""
p = self._clean(p)
fl = np.log(1 - p)
d2 = -1 / ((1 - p)**2 * fl)
d2 *= 1 + 1 / fl
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the C-Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the CLogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the CLogLog link function
"""
return np.exp(z - np.exp(z))
class cloglog(CLogLog):
"""
The CLogLog transform link function.
Notes
-----
g(`p`) = log(-log(1-`p`))
cloglog is an alias for CLogLog
cloglog = CLogLog()
"""
pass
class LogLog(Logit):
"""
The log-log transform
LogLog inherits from Logit in order to have access to its _clean method
for the link and its derivative.
"""
def __call__(self, p):
"""
Log-Log transform link function
Parameters
----------
p : ndarray
Mean parameters
Returns
-------
z : ndarray
The LogLog transform of `p`
Notes
-----
g(p) = -log(-log(p))
"""
p = self._clean(p)
return -np.log(-np.log(p))
def inverse(self, z):
"""
Inverse of Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(`z`) = exp(-exp(-`z`))
"""
return np.exp(-np.exp(-z))
def deriv(self, p):
"""
Derivative of Log-Log transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the LogLog transform link function
Notes
-----
g'(p) = - 1 /(p * log(p))
"""
p = self._clean(p)
return -1. / (p * (np.log(p)))
def deriv2(self, p):
"""
Second derivative of the Log-Log link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the LogLog link function
"""
p = self._clean(p)
d2 = (1 + np.log(p)) / (p * (np.log(p)))**2
return d2
def inverse_deriv(self, z):
"""
Derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)'(z) : ndarray
The derivative of the inverse of the LogLog link function
"""
return np.exp(-np.exp(-z) - z)
def inverse_deriv2(self, z):
"""
Second derivative of the inverse of the Log-Log transform link function
Parameters
----------
z : array_like
The value of the inverse of the LogLog link function at `p`
Returns
-------
g^(-1)''(z) : ndarray
The second derivative of the inverse of the LogLog link function
"""
return self.inverse_deriv(z) * (np.exp(-z) - 1)
class loglog(LogLog):
"""
The LogLog transform link function.
Notes
-----
g(`p`) = -log(-log(`p`))
loglog is an alias for LogLog
loglog = LogLog()
"""
pass
class NegativeBinomial(Link):
'''
The negative binomial link function
Parameters
----------
alpha : float, optional
Alpha is the ancillary parameter of the Negative Binomial link
function. It is assumed to be nonstochastic. The default value is 1.
Permissible values are usually assumed to be in (.01, 2).
'''
def __init__(self, alpha=1.):
self.alpha = alpha
def _clean(self, x):
return np.clip(x, FLOAT_EPS, np.inf)
def __call__(self, p):
'''
Negative Binomial transform link function
Parameters
----------
p : array_like
Mean parameters
Returns
-------
z : ndarray
The negative binomial transform of `p`
Notes
-----
g(p) = log(p/(p + 1/alpha))
'''
p = self._clean(p)
return np.log(p/(p + 1/self.alpha))
def inverse(self, z):
'''
Inverse of the negative binomial transform
Parameters
----------
z : array_like
The value of the inverse of the negative binomial link at `p`.
Returns
-------
p : ndarray
Mean parameters
Notes
-----
g^(-1)(z) = exp(z)/(alpha*(1-exp(z)))
'''
return -1/(self.alpha * (1 - np.exp(-z)))
def deriv(self, p):
'''
Derivative of the negative binomial transform
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g'(p) : ndarray
The derivative of the negative binomial transform link function
Notes
-----
g'(x) = 1/(x+alpha*x^2)
'''
return 1/(p + self.alpha * p**2)
def deriv2(self, p):
'''
Second derivative of the negative binomial link function.
Parameters
----------
p : array_like
Mean parameters
Returns
-------
g''(p) : ndarray
The second derivative of the negative binomial transform link
function
Notes
-----
g''(x) = -(1+2*alpha*x)/(x+alpha*x^2)^2
'''
numer = -(1 + 2 * self.alpha * p)
denom = (p + self.alpha * p**2)**2
return numer / denom
def inverse_deriv(self, z):
'''
Derivative of the inverse of the negative binomial transform
Parameters
----------
z : array_like
Usually the linear predictor for a GLM or GEE model
Returns
-------
g^(-1)'(z) : ndarray
The value of the derivative of the inverse of the negative
binomial link
'''
t = np.exp(z)
return t / (self.alpha * (1-t)**2)
class nbinom(NegativeBinomial):
"""
The negative binomial link function.
Notes
-----
g(p) = log(p/(p + 1/alpha))
nbinom is an alias of NegativeBinomial.
nbinom = NegativeBinomial(alpha=1.)
"""
pass
| 21.986656 | 79 | 0.49082 | [
"BSD-3-Clause"
] | BioGeneTools/statsmodels | statsmodels/genmod/families/links.py | 26,362 | Python |
import numpy as np
class ridge:
""" Ridge estimator.
"""
def __init__(self, lmd=0.1):
self.lmd = lmd
self.hat = None
self.hatn = None
def fit(self, X, y):
if self.hat is None:
G = X.T.dot(X) + self.lmd * np.eye(X.shape[1])
self.hat = np.linalg.solve(G, X.T)
if self.hatn is None:
y0 = np.array(list(y[:-1]) + [0])
self.hatn = self.hat.dot(y0)
self.beta = self.hatn + y[-1] * self.hat[:, -1]
def predict(self, X):
return X.dot(self.beta)
def conformity(self, y, y_pred):
return 0.5 * np.square(y - y_pred)
class regressor:
def __init__(self, model=None, s_eps=0., conform=None):
self.model = model
self.coefs = []
self.s_eps = s_eps
self.conform = conform
def fit(self, X, y):
refit = True
for t in range(len(self.coefs)):
if self.s_eps == 0:
break
if abs(self.coefs[t][0] - y[-1]) <= self.s_eps:
self.beta = self.coefs[t][1].copy()
refit = False
break
if refit:
self.beta = self.model.fit(X, y)
if self.s_eps != 0:
self.coefs += [[y[-1], self.beta.copy()]]
def predict(self, X):
if len(X.shape) == 1:
X = X.reshape(1, -1)
return self.model.predict(X)
def conformity(self, y, y_pred):
if self.conform is None:
return np.abs(y - y_pred)
else:
return self.conform(y, y_pred)
| 20.831169 | 59 | 0.483791 | [
"BSD-3-Clause"
] | EugeneNdiaye/rootCP | rootcp/models.py | 1,604 | Python |
# qubit number=2
# total number=8
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.x(input_qubit[1]) # number=5
prog.z(input_qubit[1]) # number=4
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class137.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.256881 | 80 | 0.620779 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | data/p2DJ/New/program/qiskit/class/startQiskit_Class137.py | 3,080 | Python |
# Created by SylvanasSun in 2017.10.17
# !/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import jieba
from jieba import analyse
# TODO: Change default hash algorithms to the other algorithms of high-performance.
def _default_hashfunc(content, hashbits):
"""
Default hash function is variable-length version of Python's builtin hash.
:param content: data that needs to hash.
:return: return a decimal number.
"""
if content == "":
return 0
x = ord(content[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in content:
x = ((x * m) ^ ord(c)) & mask
x ^= len(content)
if x == -1:
x = -2
return x
# TODO: Change default toknizer to the c/c++ version or other tokenizer of high-performance.
def _default_tokenizer_func(content, keyword_weight_pair):
"""
Default tokenizer function that uses jieba tokenizer.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:return: return keyword-weight list. Example: [('Example',0.4511233019962264),('Hello',0.25548051420382073),...].
"""
seg_list = jieba.lcut_for_search(content)
# Extract keyword-weight list by TF-IDF algorithms and by sorted maximum weight
return jieba.analyse.extract_tags("".join(seg_list), topK=keyword_weight_pair, withWeight=True)
class Simhash(object):
"""
Class Simhash implements simhash algorithms of the Google for filter duplicate content.
Simhash algorithms idea is will reduce the dimension of content and compares the
difference of the "Hamming Distance" implements filter duplicate content.
About simhash algorithms the more introduction: https://en.wikipedia.org/wiki/SimHash
Simhash default tokenizer is jieba (https://github.com/fxsjy/jieba).
"""
def __init__(self, data, keyword_weight_pair=20, hash_bit_number=64, hashfunc=None, tokenizer_func=None):
"""
:param data: data that needs to be encode.
:param keyword_weight_pair: maximum pair number of the keyword-weight list.
:param hash_bit_number: maximum bit number for hashcode.
:param hashfunc: hash function,its first parameter must be data that needs to be encode
and the second parameter must be hash bit number.
:param tokenizer_func: tokenizer function,its first parameter must be content that
needs to be tokenizer and the second parameter must be
keyword_weight_pair.
"""
if hashfunc is None:
self.hashfunc = _default_hashfunc
else:
self.hashfunc = hashfunc
if tokenizer_func is None:
self.tokenizer_func = _default_tokenizer_func
else:
self.tokenizer_func = tokenizer_func
self.hash_bit_number = hash_bit_number
self.keyword_weight_pari = keyword_weight_pair
if isinstance(data, Simhash):
self.hash = data.hash
elif isinstance(data, int):
self.hash = data
else:
self.simhash(data)
def __str__(self):
return str(self.hash)
def simhash(self, content):
"""
Select policies for simhash on the different types of content.
"""
if content is None:
self.hash = -1
return
if isinstance(content, str):
features = self.tokenizer_func(content, self.keyword_weight_pari)
self.hash = self.build_from_features(features)
elif isinstance(content, collections.Iterable):
self.hash = self.build_from_features(content)
elif isinstance(content, int):
self.hash = content
else:
raise Exception("Unsupported parameter type %s" % type(content))
def build_from_features(self, features):
"""
:param features: a list of (token,weight) tuples or a token -> weight dict,
if is a string so it need compute weight (a weight of 1 will be assumed).
:return: a decimal digit for the accumulative result of each after handled features-weight pair.
"""
v = [0] * self.hash_bit_number
if isinstance(features, dict):
features = features.items()
# Starting longitudinal accumulation of bits, current bit add current weight
# when the current bits equal 1 and else current bit minus the current weight.
for f in features:
if isinstance(f, str):
h = self.hashfunc(f, self.hash_bit_number)
w = 1
else:
assert isinstance(f, collections.Iterable)
h = self.hashfunc(f[0], self.hash_bit_number)
w = f[1]
for i in range(self.hash_bit_number):
bitmask = 1 << i
v[i] += w if h & bitmask else -w
# Just record weight of the non-negative
fingerprint = 0
for i in range(self.hash_bit_number):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def is_equal(self, another, limit=0.8):
"""
Determine two simhash are similar or not similar.
:param another: another simhash.
:param limit: a limit of the similarity.
:return: if similarity greater than limit return true and else return false.
"""
if another is None:
raise Exception("Parameter another is null")
if isinstance(another, int):
distance = self.hamming_distance(another)
elif isinstance(another, Simhash):
assert self.hash_bit_number == another.hash_bit_number
distance = self.hamming_distance(another.hash)
else:
raise Exception("Unsupported parameter type %s" % type(another))
similarity = float(self.hash_bit_number - distance) / self.hash_bit_number
if similarity > limit:
return True
return False
def hamming_distance(self, another):
"""
Compute hamming distance,hamming distance is a total number of different bits of two binary numbers.
:param another: another simhash value.
:return: a hamming distance that current simhash and another simhash.
"""
x = (self.hash ^ another) & ((1 << self.hash_bit_number) - 1)
result = 0
while x:
result += 1
x &= x - 1
return result
if __name__ == "__main__":
sentence_A = """
明朝军制建立在军户制度上,军户即为中国古代世代从军、充当军差的人户。
东晋南北朝时,士兵及家属的户籍隶于军府称为军户。军户子弟世袭为兵未经准许不得脱离军籍。
北魏军户亦有用俘虏充当的。元朝实行军户制度,军户必须出成年男子到军队服役,父死子替,兄亡弟代,世代相袭。
"""
sentence_B = """
明朝的军制是在元朝基础上改进,而没有采用唐宋时期的募兵制。
元朝的军制是建立在游牧民族制度上发展而来,游牧民族在战争是全民征兵,实际上是军户制度。
建立元朝以后,蒙古族还是全部军户,对于占领区招降的军队,也实行军户制度。
"""
sentence_C = "You know nothing Jon Snow!"
sentence_D = "Jon Snow: I konw nothing."
simhash_A = Simhash(sentence_A)
simhash_B = Simhash(sentence_B)
simhash_C = Simhash(sentence_C)
simhash_D = Simhash(sentence_D)
print(simhash_A)
print(simhash_B)
print(simhash_C)
print(simhash_D)
assert simhash_A.is_equal(simhash_B) is True
assert simhash_B.is_equal(simhash_C) is False
assert simhash_C.is_equal(simhash_D) is True
| 36.190244 | 117 | 0.626365 | [
"MIT"
] | SylvanasSun/code-snippets | algorithms/hash/simhash.py | 7,893 | Python |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
| 34.411576 | 100 | 0.611942 | [
"Apache-2.0"
] | Ali-Tahir/luigi | luigi/contrib/scalding.py | 10,702 | Python |
import hugectr
solver = hugectr.CreateSolver(max_eval_batches = 1,
batchsize_eval = 4096,
batchsize = 64,
lr = 0.001,
vvgpu = [[0,1]],
repeat_dataset = True,
i64_input_key = True,
use_cuda_graph = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Parquet,
source = ["./din_data/train/_file_list.txt"],
eval_source = "./din_data/valid/_file_list.txt",
check_type = hugectr.Check_t.Non,
slot_size_array = [192403, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 63001, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 801])
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.000000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 0, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("UserID", 1, True, 1),
hugectr.DataReaderSparseParam("GoodID", 1, True, 11),
hugectr.DataReaderSparseParam("CateID", 1, True, 11)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 28,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_user",
bottom_name = "UserID",
optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 24,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_good",
bottom_name = "GoodID",
optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 10,
embedding_vec_size = 18,
combiner = "sum",
sparse_embedding_name = "sparse_embedding_cate",
bottom_name = "CateID",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.FusedReshapeConcat,
bottom_names = ["sparse_embedding_good", "sparse_embedding_cate"],
top_names = ["FusedReshapeConcat_item_his_em", "FusedReshapeConcat_item"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["FusedReshapeConcat_item"],
top_names = ["item1", "item2"],
ranges=[(0,36),(0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["FusedReshapeConcat_item_his_em"],
top_names = ["item_his1", "item_his2", "item_his3", "item_his4", "item_his5"],
ranges=[(0,36),(0, 36),(0, 36), (0, 36), (0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Scale,
bottom_names = ["item1"],
top_names = ["Scale_item"],
axis = 1, factor = 10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["Scale_item"],
top_names = ["Scale_item1", "Scale_item2", "Scale_item3"],
ranges=[(0,36),(0, 36),(0, 36)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Sub,
bottom_names = ["Scale_item1", "item_his1"],
top_names = ["sub_ih"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.DotProduct,
bottom_names = ["Scale_item2", "item_his2"],
top_names = ["DotProduct_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["Scale_item3", "item_his3", "sub_ih", "DotProduct_i"],
top_names = ["concat_i_h"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat_i_h"],
top_names = ["fc_att_i2"],
num_output=40))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["fc_att_i2"],
top_names = ["fc_att_i3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["fc_att_i3"],
top_names = ["reshape_score"],
leading_dim=10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Softmax,
bottom_names = ["reshape_score"],
top_names = ["softmax_att_i"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Scale,
bottom_names = ["softmax_att_i"],
top_names = ["Scale_i"],
axis = 0, factor = 36))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["item_his4"],
top_names = ["reshape_item_his"],
leading_dim=360))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.DotProduct,
bottom_names = ["Scale_i", "reshape_item_his"],
top_names = ["DotProduct_ih"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceSum,
bottom_names = ["DotProduct_ih"],
top_names = ["reduce_ih"],
axis = 1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["item_his5"],
top_names = ["reshape_his"],
leading_dim=36,
time_step =10))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReduceMean,
bottom_names = ["reshape_his"],
top_names = ["reduce_item_his"],
axis = 1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["reduce_item_his"],
top_names = ["reshape_reduce_item_his"],
leading_dim=36))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding_user"],
top_names = ["reshape_user"],
leading_dim=18))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape_user", "reshape_reduce_item_his", "reduce_ih", "item2"],
top_names = ["concat_din_i"]))
# build_fcn_net
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat_din_i"],
top_names = ["fc_din_i1"],
num_output=200))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.PReLU_Dice,
bottom_names = ["fc_din_i1"],
top_names = ["dice_1"],
elu_alpha=0.2, eps=1e-8))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dice_1"],
top_names = ["fc_din_i2"],
num_output=80))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.PReLU_Dice,
bottom_names = ["fc_din_i2"],
top_names = ["dice_2"],
elu_alpha=0.2, eps=1e-8))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dice_2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.fit(max_iter = 6000, display = 1000, eval_interval = 1000, snapshot = 2000000, snapshot_prefix = "din")
model.eval()
metrics = model.get_eval_metrics()
print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iter, metrics[0][1]))
if metrics[0][1] <0.8:
raise RuntimeError("Cannot reach the AUC threshold {}".format(0.8))
sys.exit(1)
else:
print("Successfully reach the AUC threshold {}".format(metrics[0][1])) | 58.568862 | 134 | 0.510991 | [
"Apache-2.0"
] | Chunshuizhao/HugeCTR | test/pybind_test/din_fp32_2gpu.py | 9,781 | Python |
"""
file system and database initialization.
tables:
- polls:
- id PRIMARY KEY
- owner_id => users.id
- topic
- users:
- id PRIMARY KEY
- first_name
- last_name
- username
- answers:
- id PRIMARY KEY
- poll_id => polls.id
- text
- votes:
- user_id => users.id
- poll_id => polls.id
- answer_id => answers.id
"""
import os
from os.path import expanduser, join
from yoyo import get_backend, read_migrations
from . import log
logger = log.getLogger('app.fs')
DATA_DIR: str = expanduser("~/.local/share/multi_vote_bot")
if not os.path.exists(DATA_DIR):
logger.info("Creating data dir at path %s", DATA_DIR)
os.makedirs(DATA_DIR, exist_ok=True)
DB_PATH: str = join(DATA_DIR, "data.db")
def migrate():
""" apply yoyo migrations """
logger.info("Migrating to the latest schema")
log.getLogger('yoyo').setLevel(log.DEBUG)
backend = get_backend('sqlite:///' + DB_PATH)
migrations = read_migrations('./migrations')
with backend.lock():
backend.apply_migrations(backend.to_apply(migrations))
# auto migrate when imported
migrate()
| 19.315789 | 62 | 0.679382 | [
"MIT"
] | ratijas/multi_vote_bot | src/app/fs.py | 1,101 | Python |
# -*- coding: UTF-8 -*-
import os # File and path handling
import numpy
import copy # for deepcopy
import math
from .image import ImageFile, Image, ImageROI, ImageStack
from .geometry import Geometry
from .processing.pipeline import Pipeline
from .processing.step import Step
from .helpers import *
def touchDirectory(folder):
if not os.path.exists(folder):
os.makedirs(folder)
class generalTest(Step):
""" General class for test scenario evaluations: get image(s), run and store evaluation. """
def __init__(self, testName="General Test", name=None, nExpectedRuns=1, resultFileDirectory=".", rawOutput=False):
Step.__init__(self, testName)
self.testName = testName
self.subtests = []
self.prepared = False
self.currentRun = 0
self.nExpectedRuns = None # usually, number of projections to evaluate
self.resultFileDirectory = None
self.name = None
self.rawOutput = None
self.setName(name)
self.setExpectedRuns(nExpectedRuns)
self.setResultFileDirectory(resultFileDirectory)
self.setRawOutput(rawOutput)
self.reset()
def reset(self):
self.currentRun = 0
self.prepared = False
def addSubtest(self, subt):
self.subtests.append(subt)
def setName(self, name=None):
""" Set an individual name for the (sub) test. """
if name != None:
self.name = name
else:
self.name = self.testName
def setExpectedRuns(self, n=1):
self.nExpectedRuns = n
def setResultFileDirectory(self, resultFileDirectory="."):
""" Set the location where test results should be saved. """
self.resultFileDirectory = resultFileDirectory
touchDirectory(self.resultFileDirectory)
def setRawOutput(self, rawOutput=False):
""" Save intermediate projections as RAW instead of TIFF? """
self.rawOutput = rawOutput
def plotResults(self):
""" Plot results of evaluation. """
# Should be called by step's followUp() function, if needed.
pass
| 30.943662 | 119 | 0.631771 | [
"Apache-2.0"
] | BAMresearch/ctsimu-toolbox | ctsimu/test.py | 2,197 | Python |
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class SimplyQuinoa(AbstractScraper):
@classmethod
def host(self):
return 'simplyquinoa.com'
def title(self):
return self.soup.find(
'h2',
{'class': 'wprm-recipe-name'}
).get_text()
def total_time(self):
return get_minutes(self.soup.find(
'span',
{'class': 'wprm-recipe-total_time'}).parent
)
def yields(self):
yields = self.soup.find(
'span',
{'class': 'wprm-recipe-servings'}
).get_text()
return get_yields("{} servings".format(yields))
def ingredients(self):
ingredients = self.soup.findAll(
'li',
{'class': 'wprm-recipe-ingredient'}
)
return [
normalize_string(ingredient.get_text())
for ingredient in ingredients
]
def instructions(self):
instructions = self.soup.findAll(
'div',
{'class': 'wprm-recipe-instruction-text'}
)
return '\n'.join([
normalize_string(instruction.get_text())
for instruction in instructions
])
def ratings(self):
return round(float(
self.soup.find(
"span",
{"class": "wprm-recipe-rating-average"}
).get_text()), 2
) | 24.694915 | 61 | 0.533288 | [
"MIT"
] | PatrickPierce/recipe-scrapers | recipe_scrapers/simplyquinoa.py | 1,457 | Python |
"""
Custom dataset processing/generation functions should be added to this file
"""
import pathlib
from sklearn.datasets import fetch_20newsgroups
from functools import partial
from src import workflow, paths
from src.log import logger
import src.log.debug
from tqdm.auto import tqdm
from .. import paths
from ..log import logger
__all__ = [
'process_20_newsgroups'
]
def process_20_newsgroups(*, extract_dir='20_newsgroups',
metadata=None, unpack_dir=None,
opts={"subset":"all", "remove":"('headers', 'footers', 'quotes')"}):
"""
Process 20 newsgroups into (data, target, metadata) format.
Parameters
----------
unpack_dir: path
The interim parent directory the dataset files have been unpacked into.
extract_dir: str
Name of the directory of the unpacked files relative to the unpack_dir. Note that
opts: dict default {"subset":"all", "remove"="('headers', 'footers', 'quotes')"}
Options to pass to sklearn.datasets.fetch_20newsgroups.
Returns
-------
A tuple:
(data, target, additional_metadata)
"""
if metadata is None:
metadata = {}
if unpack_dir is None:
unpack_dir = paths['interim_data_path']
else:
unpack_dir = pathlib.Path(unpack_dir)
data_dir = unpack_dir / f"{extract_dir}"
news = fetch_20newsgroups(**opts)
metadata['target_names'] = news.target_names
return news.data, news.target, metadata
| 25.423729 | 94 | 0.662667 | [
"MIT"
] | acwooding/docmap_playground | src/data/process_functions.py | 1,500 | Python |
#!/usr/bin/python
# Copyright (c) 2017, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_vault_secret_actions
short_description: Perform actions on a Secret resource in Oracle Cloud Infrastructure
description:
- Perform actions on a Secret resource in Oracle Cloud Infrastructure
- For I(action=cancel_secret_deletion), cancels the pending deletion of the specified secret. Canceling
a scheduled deletion restores the secret's lifecycle state to what
it was before you scheduled the secret for deletion.
- For I(action=schedule_secret_deletion), schedules the deletion of the specified secret. This sets the lifecycle state of the secret
to `PENDING_DELETION` and then deletes it after the specified retention period ends.
version_added: "2.9"
author: Oracle (@oracle)
options:
secret_id:
description:
- The OCID of the secret.
type: str
aliases: ["id"]
required: true
time_of_deletion:
description:
- An optional property indicating when to delete the secret version, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
- Applicable only for I(action=schedule_secret_deletion).
type: str
action:
description:
- The action to perform on the Secret.
type: str
required: true
choices:
- "cancel_secret_deletion"
- "schedule_secret_deletion"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action cancel_secret_deletion on secret
oci_vault_secret_actions:
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: cancel_secret_deletion
- name: Perform action schedule_secret_deletion on secret
oci_vault_secret_actions:
time_of_deletion: 2018-04-03T21:10:29.600Z
secret_id: ocid1.secret.oc1..xxxxxxEXAMPLExxxxxx
action: schedule_secret_deletion
"""
RETURN = """
secret:
description:
- Details of the Secret resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment where you want to create the secret.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
current_version_number:
description:
- The version number of the secret version that's currently in use.
returned: on success
type: int
sample: 56
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
description:
description:
- A brief description of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: description_example
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The OCID of the secret.
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
key_id:
description:
- The OCID of the master encryption key that is used to encrypt the secret.
returned: on success
type: string
sample: ocid1.key.oc1..xxxxxxEXAMPLExxxxxx
lifecycle_details:
description:
- Additional information about the current lifecycle state of the secret.
returned: on success
type: string
sample: lifecycle_details_example
lifecycle_state:
description:
- The current lifecycle state of the secret.
returned: on success
type: string
sample: CREATING
metadata:
description:
- Additional metadata that you can use to provide context about how to use the secret or during rotation or
other administrative tasks. For example, for a secret that you use to connect to a database, the additional
metadata might specify the connection endpoint and the connection string. Provide additional metadata as key-value pairs.
returned: on success
type: dict
sample: {}
secret_name:
description:
- The user-friendly name of the secret. Avoid entering confidential information.
returned: on success
type: string
sample: secret_name_example
secret_rules:
description:
- A list of rules that control how the secret is used and managed.
returned: on success
type: complex
contains:
rule_type:
description:
- The type of rule, which either controls when the secret contents expire or whether they can be reused.
returned: on success
type: string
sample: SECRET_EXPIRY_RULE
secret_version_expiry_interval:
description:
- A property indicating how long the secret contents will be considered valid, expressed in
L(ISO 8601,https://en.wikipedia.org/wiki/ISO_8601#Time_intervals) format. The secret needs to be
updated when the secret content expires. No enforcement mechanism exists at this time, but audit logs
record the expiration on the appropriate date, according to the time interval specified in the rule.
The timer resets after you update the secret contents.
The minimum value is 1 day and the maximum value is 90 days for this property. Currently, only intervals expressed in days are
supported.
For example, pass `P3D` to have the secret version expire every 3 days.
returned: on success
type: string
sample: secret_version_expiry_interval_example
time_of_absolute_expiry:
description:
- "An optional property indicating the absolute time when this secret will expire, expressed in L(RFC
3339,https://tools.ietf.org/html/rfc3339) timestamp format.
The minimum number of days from current time is 1 day and the maximum number of days from current time is 365 days.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
is_secret_content_retrieval_blocked_on_expiry:
description:
- A property indicating whether to block retrieval of the secret content, on expiry. The default is false.
If the secret has already expired and you would like to retrieve the secret contents,
you need to edit the secret rule to disable this property, to allow reading the secret content.
returned: on success
type: bool
sample: true
is_enforced_on_deleted_secret_versions:
description:
- A property indicating whether the rule is applied even if the secret version with the content you are trying to reuse was deleted.
returned: on success
type: bool
sample: true
time_created:
description:
- "A property indicating when the secret was created, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_current_version_expiry:
description:
- "An optional property indicating when the current secret version will expire, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339)
timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
time_of_deletion:
description:
- "An optional property indicating when to delete the secret, expressed in L(RFC 3339,https://tools.ietf.org/html/rfc3339) timestamp format.
Example: `2019-04-03T21:10:29.600Z`"
returned: on success
type: string
sample: 2019-04-03T21:10:29.600Z
vault_id:
description:
- The OCID of the vault where the secret exists.
returned: on success
type: string
sample: ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"current_version_number": 56,
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"description": "description_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"key_id": "ocid1.key.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_details": "lifecycle_details_example",
"lifecycle_state": "CREATING",
"metadata": {},
"secret_name": "secret_name_example",
"secret_rules": [{
"rule_type": "SECRET_EXPIRY_RULE",
"secret_version_expiry_interval": "secret_version_expiry_interval_example",
"time_of_absolute_expiry": "2019-04-03T21:10:29.600Z",
"is_secret_content_retrieval_blocked_on_expiry": true,
"is_enforced_on_deleted_secret_versions": true
}],
"time_created": "2019-04-03T21:10:29.600Z",
"time_of_current_version_expiry": "2019-04-03T21:10:29.600Z",
"time_of_deletion": "2019-04-03T21:10:29.600Z",
"vault_id": "ocid1.vault.oc1..xxxxxxEXAMPLExxxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.vault import VaultsClient
from oci.vault.models import ScheduleSecretDeletionDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SecretActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
cancel_secret_deletion
schedule_secret_deletion
"""
@staticmethod
def get_module_resource_id_param():
return "secret_id"
def get_module_resource_id(self):
return self.module.params.get("secret_id")
def get_get_fn(self):
return self.client.get_secret
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_secret, secret_id=self.module.params.get("secret_id"),
)
def cancel_secret_deletion(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.cancel_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(secret_id=self.module.params.get("secret_id"),),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def schedule_secret_deletion(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ScheduleSecretDeletionDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.schedule_secret_deletion,
call_fn_args=(),
call_fn_kwargs=dict(
secret_id=self.module.params.get("secret_id"),
schedule_secret_deletion_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
SecretActionsHelperCustom = get_custom_class("SecretActionsHelperCustom")
class ResourceHelper(SecretActionsHelperCustom, SecretActionsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
secret_id=dict(aliases=["id"], type="str", required=True),
time_of_deletion=dict(type="str"),
action=dict(
type="str",
required=True,
choices=["cancel_secret_deletion", "schedule_secret_deletion"],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="secret",
service_client_class=VaultsClient,
namespace="vault",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| 41.237333 | 159 | 0.620215 | [
"Apache-2.0"
] | hanielburton/oci-ansible-collection | plugins/modules/oci_vault_secret_actions.py | 15,464 | Python |
import os
import sys
import json
from .version import __version__
from satsearch import Search
from satstac import Items
from satsearch.parser import SatUtilsParser
import satsearch.config as config
def main(items=None, printmd=None, printcal=False, found=False,
save=None, download=None, requestor_pays=False, **kwargs):
""" Main function for performing a search """
if items is None:
## if there are no items then perform a search
search = Search.search(**kwargs)
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
# otherwise, load a search from a file
items = Items.load(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar())
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items
def cli():
parser = SatUtilsParser.newbie(description='sat-search (v%s)' % __version__)
kwargs = parser.parse_args(sys.argv[1:])
# if a filename, read the GeoJSON file
if 'intersects' in kwargs:
if os.path.exists(kwargs['intersects']):
with open(kwargs['intersects']) as f:
kwargs['intersects'] = json.loads(f.read())
cmd = kwargs.pop('command', None)
if cmd is not None:
main(**kwargs)
if __name__ == "__main__":
cli()
| 27.3 | 113 | 0.628467 | [
"MIT"
] | lishrimp/sat-search | satsearch/main.py | 1,911 | Python |
from argparse import ArgumentParser
import datetime
import dateutil
import sys, re
from os import path
def parseArgs():
parser = ArgumentParser(add_help=False)
parser.add_argument("-a", "--action", help="Please select an option out of <discover, manage, settings>", type=str, required=True)
parser.add_argument("-f", "--file", help="Please specify absolute path to initial dataset", type=str)
args = parser.parse_args()
# for debugging TODO: remove later
args.file = r"C:\Users\flietz\OneDrive - TU Wien\!Studium\1_MSc\!Diplomarbeit\code\pipeline\resources\dataset\Mail_ApplicationDummy.csv"
if args.action is None or args.action not in ("discover", "manage", "settings"):
sys.exit('Please specify an action out of <"discover", "manager", "settings">')
if args.action == "discover" and (args.file is None or not path.exists(args.file)):
sys.exit("The input file could not be found in the filesystem.")
arguments = {"file": args.file}
return args.action, arguments
class DataCleaner:
def __init__(self, removeURLs, removeMultWhitespace, lowercasing, dateFormat):
self.removeURLs = removeURLs
self.removeMultWhitespace = removeMultWhitespace
self.lowercasing = lowercasing
self.dateFormat = dateFormat
def apply(self, inputDf):
def removeUrl(content):
return re.sub(r'https?://\S+', '', content)
def removeMultWhitespace(content):
return re.sub(r' +', ' ', content)
# Remove URLs
if self.removeURLs:
inputDf["Content"] = inputDf.apply(lambda row: removeUrl(row["Content"]), axis=1)
# Remove Multi-Whitespaces
if self.removeMultWhitespace:
inputDf["Content"] = inputDf.apply(lambda row: removeMultWhitespace(row["Content"]), axis=1)
if self.lowercasing:
inputDf["Content"] = inputDf.apply(lambda row: row["Content"].lower(), axis=1)
# Not-Empty-Constraints
if inputDf["Content"].isnull().values.any() or \
inputDf["Datetime"].isnull().values.any() or \
inputDf["From"].isnull().values.any() or \
inputDf["To"].isnull().values.any():
raise AttributeError("Content, Datetime, From and To field cannot be empty. Please check your input dataset.")
# Unify Date format - reformat to %Y-%m-%d %H:%M:%S
def reformatDate(datestring, dateformat):
try:
newDate = dateutil.parser.parse(datestring, dayfirst=True)
return newDate.strftime(dateformat)
except ValueError as e:
raise ValueError("Make sure that all datetime columns are well-formatted "
"and that they contain dates that are within the possible bounds.") from e
inputDf["Datetime"] = inputDf.apply(lambda row: reformatDate(row["Datetime"], self.dateFormat), axis=1)
# clean signatures, clauses
def stripEndClauses(content, clauses):
clauseIndex = 0
index = 0
# Find lowest greetings or end clause index and strip off everything that comes after it
for item in clauses:
# needle and haystack both in lowercase to ignore case
index = content.lower().find(item.lower())
if index > -1 and (index < clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[:clauseIndex]
else:
return content
def stripStartClauses(content, clauses):
clauseIndex = 0
index = 0
# Find lowest greetings or end clause index and strip off everything that comes after it
for item in clauses:
# needle and haystack both in lowercase to ignore case
index = content.lower().find(item.lower())
if index > -1 and (index > clauseIndex or clauseIndex == 0):
clauseIndex = index
if clauseIndex > 0:
return content[clauseIndex:]
else:
return content
startClausesList = []
endGreetingsList = ["Yours sincerely", "Sincerely", "Sincerely yours", "Take care", "Regards",
"Warm regards", "Best regards", "Kind regards", "Warmest regards", "Yours truly", "Yours,",
"Warmly,", "Warm wishes", "Best,", "Best Wishes", "Thanks in advance", "Thank you in advance",
"Thanks in advance"]
confList = ["The information contained in this communication",
"The content of this email is confidential", "The content of this e-mail", "This email and attachments (if any) is intended",
"This email is intended solely", "This e-mail is intended solely"]
endClausesList = endGreetingsList+confList
inputDf["Content"] = inputDf.apply(lambda row: stripEndClauses(row["Content"], endClausesList), axis=1)
inputDf["Content"] = inputDf.apply(lambda row: stripStartClauses(row["Content"], startClausesList), axis=1)
# Reduce multiple new-lines to one
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n+', '\n', row["Content"]), axis=1)
# Replace new-lines with whitespaces
inputDf["Content"] = inputDf.apply(lambda row: re.sub(r'\n', ' ', row["Content"]), axis=1)
def convertDateString(datestring):
try:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S")
except ValueError:
return datetime.datetime.strptime(datestring, "%Y-%m-%d %H:%M:%S") | 49.877193 | 145 | 0.616426 | [
"Apache-2.0"
] | lif22/tmpm_pipeline | stages/utils/utils.py | 5,686 | Python |
__author__ = 'igor'
| 10 | 19 | 0.7 | [
"MIT"
] | igorcoding/os-simulation | src/gui/__init__.py | 20 | Python |
#!/usr/bin/env python3.8
import importlib
import typing
from enum import Enum
import discord
from discord.ext import commands
from discord.types.interactions import ApplicationCommandOption
import common.paginator as paginator
import common.star_classes as star_classes
import common.utils as utils
class OwnerCMDs(commands.Cog, name="Owner", command_attrs=dict(hidden=True)):
def __init__(self, bot):
self.bot: utils.SeraphimBase = bot
async def cog_check(self, ctx):
return await self.bot.is_owner(ctx.author)
@commands.command(hidden=True, aliases=["reloadallextensions"])
async def reload_all_extensions(self, ctx):
extensions = [i for i in self.bot.extensions.keys() if i != "cogs.db_handler"]
for extension in extensions:
self.bot.reload_extension(extension)
await ctx.reply("All extensions reloaded!")
@commands.command(hidden=True)
async def list_loaded_extensions(self, ctx):
exten_list = [f"`{k}`" for k in self.bot.extensions.keys()]
exten_str = ", ".join(exten_list)
await ctx.reply(f"Extensions: {exten_str}")
class OptionTypeEnum(Enum):
SUB_COMMAND = 1
SUB_COMMAND_GROUP = 2
STRING = 3
INTEGER = 4
BOOLEAN = 5
USER = 6
CHANNEL = 7
ROLE = 8
MENTIONABLE = 9
NUMBER = 10
@commands.command(hidden=True, aliases=["list_slash_commands", "listslashcmds"])
async def list_slash_cmds(
self, ctx: utils.SeraContextBase, guild: typing.Optional[discord.Guild]
):
if not guild:
app_cmds = await ctx.bot.http.get_global_commands(ctx.bot.application_id)
else:
app_cmds = await ctx.bot.http.get_guild_commands(
ctx.bot.application_id, guild.id
)
slash_entries = []
if not app_cmds:
raise commands.BadArgument(
"This guild/bot does not have any specific slash commands."
)
for entry in app_cmds:
if entry.get("type", 0) == 1:
entry_str_list = []
if entry["description"]:
entry_str_list.append(entry["description"])
else:
entry_str_list.append("No description provided.")
if entry.get("options"):
entry_str_list.append("__Arguments:__")
for option in entry["options"]: # type: ignore
option: ApplicationCommandOption
option_type = self.OptionTypeEnum(option["type"]).name
required_txt = ", required" if option["required"] else ""
entry_str_list.append(
f"{option['name']} (type {option_type}{required_txt}) - {option['description']}"
)
slash_entries.append(
(f"{entry['name']} - ID {entry['id']}", "\n".join(entry_str_list))
)
if not slash_entries:
raise commands.BadArgument(
"This guild/bot does not have any specific slash commands."
)
pages = paginator.FieldPages(ctx, entries=slash_entries, per_page=6)
await pages.paginate()
@commands.command(hidden=True, aliases=["removeslashcmd"])
async def remove_slash_cmd(
self, ctx, cmd: discord.Object, guild: typing.Optional[discord.Guild],
):
if guild:
await self.bot.http.delete_guild_command(
self.bot.application_id, guild.id, cmd.id
)
else:
await self.bot.http.delete_global_command(self.bot.application_id, cmd.id)
await ctx.reply("Removed command.")
@commands.command(hidden=True, aliases=["removeallslashcmds"])
async def remove_all_slash_cmds(self, ctx, guild: typing.Optional[discord.Guild]):
if not guild:
app_cmds = await self.bot.http.get_global_commands(self.bot.application_id)
else:
app_cmds = await self.bot.http.get_guild_commands(
self.bot.application_id, guild.id
)
slash_cmd_ids = [e["id"] for e in app_cmds if e.get("type", 0) == 1]
for cmd_id in slash_cmd_ids:
if not guild:
await self.bot.http.delete_global_command(
self.bot.application_id, cmd_id
)
else:
await self.bot.http.delete_guild_command(
self.bot.application_id, guild.id, cmd_id
)
await ctx.reply("Removed all commands.")
def setup(bot):
importlib.reload(utils)
importlib.reload(star_classes)
importlib.reload(paginator)
bot.add_cog(OwnerCMDs(bot))
| 33.838028 | 108 | 0.594173 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Astrea49/Seraphim-Bot | cogs/core/cmds/owner_cmds.py | 4,805 | Python |
# Copyright (c) 2015-2016, 2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2016 Ceridwen <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Hooks for nose library."""
import re
import textwrap
import astroid
import astroid.builder
_BUILDER = astroid.builder.AstroidBuilder(astroid.MANAGER)
def _pep8(name, caps=re.compile("([A-Z])")):
return caps.sub(lambda m: "_" + m.groups()[0].lower(), name)
def _nose_tools_functions():
"""Get an iterator of names and bound methods."""
module = _BUILDER.string_build(
textwrap.dedent(
"""
import unittest
class Test(unittest.TestCase):
pass
a = Test()
"""
)
)
try:
case = next(module["a"].infer())
except astroid.InferenceError:
return
for method in case.methods():
if method.name.startswith("assert") and "_" not in method.name:
pep8_name = _pep8(method.name)
yield pep8_name, astroid.BoundMethod(method, case)
if method.name == "assertEqual":
# nose also exports assert_equals.
yield "assert_equals", astroid.BoundMethod(method, case)
def _nose_tools_transform(node):
for method_name, method in _nose_tools_functions():
node.locals[method_name] = [method]
def _nose_tools_trivial_transform():
"""Custom transform for the nose.tools module."""
stub = _BUILDER.string_build("""__all__ = []""")
all_entries = ["ok_", "eq_"]
for pep8_name, method in _nose_tools_functions():
all_entries.append(pep8_name)
stub[pep8_name] = method
# Update the __all__ variable, since nose.tools
# does this manually with .append.
all_assign = stub["__all__"].parent
all_object = astroid.List(all_entries)
all_object.parent = all_assign
all_assign.value = all_object
return stub
astroid.register_module_extender(
astroid.MANAGER, "nose.tools.trivial", _nose_tools_trivial_transform
)
astroid.MANAGER.register_transform(
astroid.Module, _nose_tools_transform, lambda n: n.name == "nose.tools"
)
| 28.886076 | 85 | 0.678791 | [
"MIT"
] | Nucl3arSn3k/randomplushmiku | venv/Lib/site-packages/astroid/brain/brain_nose.py | 2,282 | Python |
from bridge.deploy.sagemaker import SageMakerDeployTarget
DEPLOY_REGISTRY = {"sagemaker": SageMakerDeployTarget}
| 23 | 57 | 0.843478 | [
"Apache-2.0"
] | jfdesroches/domino-research | bridge/bridge/deploy/registry.py | 115 | Python |
# Lint as: python3
# Copyright 2020 The DMLab2D Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function for flattening dictionary settings."""
import numbers
from typing import Mapping, Sequence
def _flatten_args(pairs_in, args_out, prefix, visited_stack):
"""Helper function for flatten_args. See `flatten_args` below for details."""
for key, v in pairs_in:
if not isinstance(key, str):
raise ValueError('Keys must be strings. %r' % key)
flat_key = prefix + '.' + key if prefix else key
if v is None:
args_out[flat_key] = 'none'
elif isinstance(v, str):
args_out[flat_key] = v
elif isinstance(v, bool):
args_out[flat_key] = 'true' if v else 'false'
elif isinstance(v, numbers.Number):
args_out[flat_key] = str(v)
elif isinstance(v, Mapping):
if not any(v is entry for entry in visited_stack):
_flatten_args(v.items(), args_out, flat_key, visited_stack + [v])
elif isinstance(v, Sequence):
if not any(v is entry for entry in visited_stack):
_flatten_args(((str(i + 1), vv) for i, vv in enumerate(v)), args_out,
flat_key, visited_stack + [v])
else:
raise ValueError('Value for \'{}\' cannot be type: \'{}\''.format(
flat_key, str(type(v))))
def flatten_args(args_in):
"""Converts a dictionary of dictionarys and lists into a flat table.
Args:
args_in: dictionary containing a hierachy of dictionaries and lists. Leaf
values can be strings, bools, numbers..
Returns:
A flat dictionary with keys separated by '.' and string values.
"""
args_out = {}
_flatten_args(args_in.items(), args_out, None, [args_in])
return args_out
| 35.031746 | 79 | 0.686905 | [
"Apache-2.0"
] | LaudateCorpus1/lab2d | dmlab2d/settings_helper.py | 2,207 | Python |
def init(id, cfg):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, cfg.port, cfg.python_script))
return True
def init_standard(id, env):
log_info("pythonmod: init called, module id is %d port: %d script: %s" % (id, env.cfg.port, env.cfg.python_script))
return True
def deinit(id):
log_info("pythonmod: deinit called, module id is %d" % id)
return True
def inform_super(id, qstate, superqstate, qdata):
return True
def operate(id, event, qstate, qdata):
log_info("pythonmod: operate called, id: %d, event:%s" % (id, strmodulevent(event)))
if event == MODULE_EVENT_NEW:
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
if event == MODULE_EVENT_MODDONE:
log_info("pythonmod: module we are waiting for is done")
qstate.ext_state[id] = MODULE_FINISHED
return True
if event == MODULE_EVENT_PASS:
log_info("pythonmod: event_pass")
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
log_err("pythonmod: BAD event")
qstate.ext_state[id] = MODULE_ERROR
return True
log_info("pythonmod: script loaded.")
| 29.815789 | 118 | 0.68579 | [
"BSD-3-Clause"
] | Berbe/unbound | pythonmod/doc/examples/example0-1.py | 1,133 | Python |
import os
import numpy as np
import scipy.sparse as sp
import pickle
import torch
from torch.utils.data import DataLoader
from dgl.data.utils import download, _get_dgl_url, get_download_dir, extract_archive
import random
import time
import dgl
def ReadTxtNet(file_path="", undirected=True):
""" Read the txt network file.
Notations: The network is unweighted.
Parameters
----------
file_path str : path of network file
undirected bool : whether the edges are undirected
Return
------
net dict : a dict recording the connections in the graph
node2id dict : a dict mapping the nodes to their embedding indices
id2node dict : a dict mapping nodes embedding indices to the nodes
"""
if file_path == 'youtube' or file_path == 'blog':
name = file_path
dir = get_download_dir()
zip_file_path='{}/{}.zip'.format(dir, name)
download(_get_dgl_url(os.path.join('dataset/DeepWalk/', '{}.zip'.format(file_path))), path=zip_file_path)
extract_archive(zip_file_path,
'{}/{}'.format(dir, name))
file_path = "{}/{}/{}-net.txt".format(dir, name, name)
node2id = {}
id2node = {}
cid = 0
src = []
dst = []
weight = []
net = {}
with open(file_path, "r") as f:
for line in f.readlines():
tup = list(map(int, line.strip().split(" ")))
assert len(tup) in [2, 3], "The format of network file is unrecognizable."
if len(tup) == 3:
n1, n2, w = tup
elif len(tup) == 2:
n1, n2 = tup
w = 1
if n1 not in node2id:
node2id[n1] = cid
id2node[cid] = n1
cid += 1
if n2 not in node2id:
node2id[n2] = cid
id2node[cid] = n2
cid += 1
n1 = node2id[n1]
n2 = node2id[n2]
if n1 not in net:
net[n1] = {n2: w}
src.append(n1)
dst.append(n2)
weight.append(w)
elif n2 not in net[n1]:
net[n1][n2] = w
src.append(n1)
dst.append(n2)
weight.append(w)
if undirected:
if n2 not in net:
net[n2] = {n1: w}
src.append(n2)
dst.append(n1)
weight.append(w)
elif n1 not in net[n2]:
net[n2][n1] = w
src.append(n2)
dst.append(n1)
weight.append(w)
print("node num: %d" % len(net))
print("edge num: %d" % len(src))
assert max(net.keys()) == len(net) - 1, "error reading net, quit"
sm = sp.coo_matrix(
(np.array(weight), (src, dst)),
dtype=np.float32)
return net, node2id, id2node, sm
def net2graph(net_sm):
""" Transform the network to DGL graph
Return
------
G DGLGraph : graph by DGL
"""
start = time.time()
G = dgl.DGLGraph(net_sm)
end = time.time()
t = end - start
print("Building DGLGraph in %.2fs" % t)
return G
def make_undirected(G):
#G.readonly(False)
G.add_edges(G.edges()[1], G.edges()[0])
return G
def find_connected_nodes(G):
nodes = torch.nonzero(G.out_degrees()).squeeze(-1)
return nodes
class LineDataset:
def __init__(self,
net_file,
batch_size,
num_samples,
negative=5,
gpus=[0],
fast_neg=True,
ogbl_name="",
load_from_ogbl=False,
ogbn_name="",
load_from_ogbn=False,
):
""" This class has the following functions:
1. Transform the txt network file into DGL graph;
2. Generate random walk sequences for the trainer;
3. Provide the negative table if the user hopes to sample negative
nodes according to nodes' degrees;
Parameter
---------
net_file str : path of the dgl network file
walk_length int : number of nodes in a sequence
window_size int : context window size
num_walks int : number of walks for each node
batch_size int : number of node sequences in each batch
negative int : negative samples for each positve node pair
fast_neg bool : whether do negative sampling inside a batch
"""
self.batch_size = batch_size
self.negative = negative
self.num_samples = num_samples
self.num_procs = len(gpus)
self.fast_neg = fast_neg
if load_from_ogbl:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbl_with_name
self.G = load_from_ogbl_with_name(ogbl_name)
elif load_from_ogbn:
assert len(gpus) == 1, "ogb.linkproppred is not compatible with multi-gpu training."
from load_dataset import load_from_ogbn_with_name
self.G = load_from_ogbn_with_name(ogbn_name)
else:
self.G = dgl.load_graphs(net_file)[0][0]
self.G = make_undirected(self.G)
print("Finish reading graph")
self.num_nodes = self.G.number_of_nodes()
start = time.time()
seeds = np.random.choice(np.arange(self.G.number_of_edges()),
self.num_samples,
replace=True) # edge index
self.seeds = torch.split(torch.LongTensor(seeds),
int(np.ceil(self.num_samples / self.num_procs)),
0)
end = time.time()
t = end - start
print("generate %d samples in %.2fs" % (len(seeds), t))
# negative table for true negative sampling
self.valid_nodes = find_connected_nodes(self.G)
if not fast_neg:
node_degree = self.G.out_degrees(self.valid_nodes).numpy()
node_degree = np.power(node_degree, 0.75)
node_degree /= np.sum(node_degree)
node_degree = np.array(node_degree * 1e8, dtype=np.int)
self.neg_table = []
for idx, node in enumerate(self.valid_nodes):
self.neg_table += [node] * node_degree[idx]
self.neg_table_size = len(self.neg_table)
self.neg_table = np.array(self.neg_table, dtype=np.long)
del node_degree
def create_sampler(self, i):
""" create random walk sampler """
return EdgeSampler(self.G, self.seeds[i])
def save_mapping(self, map_file):
with open(map_file, "wb") as f:
pickle.dump(self.node2id, f)
class EdgeSampler(object):
def __init__(self, G, seeds):
self.G = G
self.seeds = seeds
self.edges = torch.cat((self.G.edges()[0].unsqueeze(0), self.G.edges()[1].unsqueeze(0)), 0).t()
def sample(self, seeds):
""" seeds torch.LongTensor : a batch of indices of edges """
return self.edges[torch.LongTensor(seeds)] | 33.523585 | 113 | 0.555649 | [
"Apache-2.0"
] | IzabelaMazur/dgl | examples/pytorch/ogb/line/reading_data.py | 7,107 | Python |
"""Tests for the DirecTV component."""
from http import HTTPStatus
from homeassistant.components.directv.const import CONF_RECEIVER_ID, DOMAIN
from homeassistant.components.ssdp import ATTR_SSDP_LOCATION
from homeassistant.const import CONF_HOST, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
HOST = "127.0.0.1"
RECEIVER_ID = "028877455858"
SSDP_LOCATION = "http://127.0.0.1/"
UPNP_SERIAL = "RID-028877455858"
MOCK_CONFIG = {DOMAIN: [{CONF_HOST: HOST}]}
MOCK_SSDP_DISCOVERY_INFO = {ATTR_SSDP_LOCATION: SSDP_LOCATION}
MOCK_USER_INPUT = {CONF_HOST: HOST}
def mock_connection(aioclient_mock: AiohttpClientMocker) -> None:
"""Mock the DirecTV connection for Home Assistant."""
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
text=load_fixture("directv/info-get-version.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/getLocations",
text=load_fixture("directv/info-get-locations.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "B01234567890"},
text=load_fixture("directv/info-mode-standby.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
params={"clientAddr": "9XXXXXXXXXX9"},
status=HTTPStatus.INTERNAL_SERVER_ERROR,
text=load_fixture("directv/info-mode-error.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/info/mode",
text=load_fixture("directv/info-mode.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/remote/processKey",
text=load_fixture("directv/remote-process-key.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/tune",
text=load_fixture("directv/tv-tune.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "2CA17D1CD30X"},
text=load_fixture("directv/tv-get-tuned.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "A01234567890"},
text=load_fixture("directv/tv-get-tuned-music.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
params={"clientAddr": "C01234567890"},
status=HTTPStatus.FORBIDDEN,
text=load_fixture("directv/tv-get-tuned-restricted.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://{HOST}:8080/tv/getTuned",
text=load_fixture("directv/tv-get-tuned-movie.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
async def setup_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
skip_entry_setup: bool = False,
setup_error: bool = False,
) -> MockConfigEntry:
"""Set up the DirecTV integration in Home Assistant."""
if setup_error:
aioclient_mock.get(
f"http://{HOST}:8080/info/getVersion",
status=HTTPStatus.INTERNAL_SERVER_ERROR,
)
else:
mock_connection(aioclient_mock)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id=RECEIVER_ID,
data={CONF_HOST: HOST, CONF_RECEIVER_ID: RECEIVER_ID},
)
entry.add_to_hass(hass)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
| 31.603175 | 75 | 0.668257 | [
"Apache-2.0"
] | 2Fake/core | tests/components/directv/__init__.py | 3,982 | Python |
from random import shuffle
from models.RainbowModelLeaveRecsOut import RainbowModelLeaveRecsOut
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Flatten, Dense, Dropout # type: ignore
from tensorflow.keras.models import Sequential # type: ignore
import numpy as np
from utils.Recording import Recording
from utils.array_operations import split_list_by_percentage
from utils.typing import assert_type
class ConvModel(RainbowModelLeaveRecsOut):
def __init__(self, **kwargs):
"""
Convolutional model
:param kwargs:
window_size: int
stride_size: int
test_percentage: float
n_features: int
n_outputs: int
"""
# hyper params to instance vars
self.window_size = kwargs["window_size"]
self.stride_size = kwargs["stride_size"]
self.test_percentage = kwargs["test_percentage"]
self.verbose = 0
self.epochs = 10
self.batch_size = 32
# create model
self.model = self.__create_model(kwargs["n_features"], kwargs["n_outputs"])
def __create_model(self, n_features, n_outputs):
# window_size, n_features, n_outputs = X.shape[1], X.shape[2], y.shape[1]
print(
f"Building model for {self.window_size} timesteps (window_size) and {n_features} features"
)
model = Sequential()
model.add(
Conv1D(
filters=64,
kernel_size=3,
activation="relu",
input_shape=(self.window_size, n_features),
)
)
model.add(Conv1D(filters=64, kernel_size=3, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(100, activation="relu"))
model.add(Dense(n_outputs, activation="softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
return model
| 33.311475 | 102 | 0.628937 | [
"MIT"
] | Sensors-in-Paradise/OpportunityML | archive/model_archive/ConvModel.py | 2,032 | Python |
from rest_framework import serializers
from gestion.models.providerOrder import ProviderOrder
from auth_app.serializers.userSerializer import UserSerializer
from gestion.serializers.providerSerializer import ProviderSerializer
from auth_app.models.user import User
from gestion.models.provider import Provider
from serverConfig.utils import check_user_has_permission
class ProviderOrderSerializer(serializers.ModelSerializer):
seller = UserSerializer(read_only=True)
provider = ProviderSerializer(read_only=True)
orderList = serializers.SerializerMethodField("getOrderList", required=False)
orderNumber = serializers.SerializerMethodField(read_only=False, required=False)
class Meta:
model = ProviderOrder
fields = ('__all__')
def create(self, validated_data):
order = ProviderOrder.objects.create(**validated_data)
return order
def update(self, instance, validated_data):
return super().update(instance, validated_data)
def getOrderList(self, obj):
from .entriesSerializer import EntriesSerializer
return EntriesSerializer(obj.orderList(), context=self.context, many=True).data
def get_orderNumber(self, obj: ProviderOrder):
return str(obj.id).zfill(5) | 34.605263 | 88 | 0.742205 | [
"MIT"
] | JetLightStudio/Jet-Gest-stock-management | server/gestion/serializers/providerOrderSerializer.py | 1,315 | Python |
import json
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
from django.utils import six
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE_CLASSES list."
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, six.string_types):
return json.loads(data, cls=MessageDecoder)
return data
| 34.979592 | 90 | 0.65811 | [
"BSD-3-Clause"
] | Acidburn0zzz/django | django/contrib/messages/storage/session.py | 1,714 | Python |
from django.contrib import admin
# Register your models here.
from account.models import UserProfile
from blog.models import BlogArticles
class BlogArticlesAdmin(admin.ModelAdmin):
list_display = ("title", "author", "publish")
list_filter = ("publish", "author")
search_fields = ("title", "body")
raw_id_fields = ("author",)
date_hierarchy = "publish"
ordering = ("-publish", "author")
admin.site.register(BlogArticles, BlogArticlesAdmin)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("user", "birth", "phone")
list_filter = ("phone",)
admin.site.register(UserProfile, UserProfileAdmin)
| 24.615385 | 52 | 0.715625 | [
"MIT"
] | jinjf553/mysite | blog/admin.py | 640 | Python |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yatube.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 25.857143 | 73 | 0.672192 | [
"BSD-2-Clause"
] | LazarevaKate/hw02_community | yatube/manage.py | 543 | Python |
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.constants import pi
__all__ = [
# functional api
"rad2deg",
"deg2rad",
"pol2cart",
"cart2pol",
"convert_points_from_homogeneous",
"convert_points_to_homogeneous",
"convert_affinematrix_to_homography",
"convert_affinematrix_to_homography3d",
"angle_axis_to_rotation_matrix",
"angle_axis_to_quaternion",
"rotation_matrix_to_angle_axis",
"rotation_matrix_to_quaternion",
"quaternion_to_angle_axis",
"quaternion_to_rotation_matrix",
"quaternion_log_to_exp",
"quaternion_exp_to_log",
"denormalize_pixel_coordinates",
"normalize_pixel_coordinates",
"normalize_quaternion",
"denormalize_pixel_coordinates3d",
"normalize_pixel_coordinates3d",
]
def rad2deg(tensor: torch.Tensor) -> torch.Tensor:
r"""Function that converts angles from radians to degrees.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: Tensor with same shape as input.
Example:
>>> input = torch.tensor(3.1415926535) * torch.rand(1, 3, 3)
>>> output = rad2deg(input)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
return 180. * tensor / pi.to(tensor.device).type(tensor.dtype)
def deg2rad(tensor: torch.Tensor) -> torch.Tensor:
r"""Function that converts angles from degrees to radians.
Args:
tensor (torch.Tensor): Tensor of arbitrary shape.
Returns:
torch.Tensor: tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = deg2rad(input)
"""
if not isinstance(tensor, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(tensor)))
return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.
def pol2cart(rho: torch.Tensor, phi: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Function that converts polar coordinates to cartesian coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> rho = torch.rand(1, 3, 3)
>>> phi = torch.rand(1, 3, 3)
>>> x, y = pol2cart(rho, phi)
"""
if not (isinstance(rho, torch.Tensor) & isinstance(phi, torch.Tensor)):
raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format(
type(rho), type(phi)))
x = rho * torch.cos(phi)
y = rho * torch.sin(phi)
return x, y
def cart2pol(x: torch.Tensor, y: torch.Tensor, eps: float = 1e-8) -> Tuple[torch.Tensor, torch.Tensor]:
"""Function that converts cartesian coordinates to polar coordinates.
Args:
rho (torch.Tensor): Tensor of arbitrary shape.
phi (torch.Tensor): Tensor of same arbitrary shape.
eps (float): To avoid division by zero. Default is 1e-8
Returns:
torch.Tensor, torch.Tensor: Tensor with same shape as input.
Example:
>>> x = torch.rand(1, 3, 3)
>>> y = torch.rand(1, 3, 3)
>>> rho, phi = cart2pol(x, y)
"""
if not (isinstance(x, torch.Tensor) & isinstance(y, torch.Tensor)):
raise TypeError("Input type is not a torch.Tensor. Got {}, {}".format(
type(x), type(y)))
rho = torch.sqrt(x**2 + y**2 + eps)
phi = torch.atan2(y, x)
return rho, phi
def convert_points_from_homogeneous(
points: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
r"""Function that converts points from homogeneous to Euclidean space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_from_homogeneous(input) # BxNx2
"""
if not isinstance(points, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
# we check for points at infinity
z_vec: torch.Tensor = points[..., -1:]
# set the results of division by zeror/near-zero to 1.0
# follow the convention of opencv:
# https://github.com/opencv/opencv/pull/14411/files
mask: torch.Tensor = torch.abs(z_vec) > eps
scale: torch.Tensor = torch.ones_like(z_vec).masked_scatter_(
mask, torch.tensor(1.0).to(points.device) / z_vec[mask])
return scale * points[..., :-1]
def convert_points_to_homogeneous(points: torch.Tensor) -> torch.Tensor:
r"""Function that converts points from Euclidean to homogeneous space.
Examples::
>>> input = torch.rand(2, 4, 3) # BxNx3
>>> output = convert_points_to_homogeneous(input) # BxNx4
"""
if not isinstance(points, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(points)))
if len(points.shape) < 2:
raise ValueError("Input must be at least a 2D tensor. Got {}".format(
points.shape))
return torch.nn.functional.pad(points, [0, 1], "constant", 1.0)
def _convert_affinematrix_to_homography_impl(A: torch.Tensor) -> torch.Tensor:
H: torch.Tensor = torch.nn.functional.pad(A, [0, 0, 0, 1], "constant", value=0.)
H[..., -1, -1] += 1.0
return H
def convert_affinematrix_to_homography(A: torch.Tensor) -> torch.Tensor:
r"""Function that converts batch of affine matrices from [Bx2x3] to [Bx3x3].
Examples::
>>> input = torch.rand(2, 2, 3) # Bx2x3
>>> output = convert_affinematrix_to_homography(input) # Bx3x3
"""
if not isinstance(A, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(A)))
if not (len(A.shape) == 3 and A.shape[-2:] == (2, 3)):
raise ValueError("Input matrix must be a Bx2x3 tensor. Got {}"
.format(A.shape))
return _convert_affinematrix_to_homography_impl(A)
def convert_affinematrix_to_homography3d(A: torch.Tensor) -> torch.Tensor:
r"""Function that converts batch of affine matrices from [Bx3x4] to [Bx4x4].
Examples::
>>> input = torch.rand(2, 3, 4) # Bx3x4
>>> output = convert_affinematrix_to_homography3d(input) # Bx4x4
"""
if not isinstance(A, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(A)))
if not (len(A.shape) == 3 and A.shape[-2:] == (3, 4)):
raise ValueError("Input matrix must be a Bx3x4 tensor. Got {}"
.format(A.shape))
return _convert_affinematrix_to_homography_impl(A)
def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert 3d vector of axis-angle rotation to 3x3 rotation matrix
Args:
angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
torch.Tensor: tensor of 3x3 rotation matrices.
Shape:
- Input: :math:`(N, 3)`
- Output: :math:`(N, 3, 3)`
Example:
>>> input = torch.rand(1, 3) # Nx3
>>> output = angle_axis_to_rotation_matrix(input) # Nx3x3
"""
if not isinstance(angle_axis, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError(
"Input size must be a (*, 3) tensor. Got {}".format(
angle_axis.shape))
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
# we get a division by zero.
k_one = 1.0
theta = torch.sqrt(theta2)
wxyz = angle_axis / (theta + eps)
wx, wy, wz = torch.chunk(wxyz, 3, dim=1)
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
r00 = cos_theta + wx * wx * (k_one - cos_theta)
r10 = wz * sin_theta + wx * wy * (k_one - cos_theta)
r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta)
r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta
r11 = cos_theta + wy * wy * (k_one - cos_theta)
r21 = wx * sin_theta + wy * wz * (k_one - cos_theta)
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
[r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
[k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
_angle_axis = torch.unsqueeze(angle_axis, dim=1)
theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2))
theta2 = torch.squeeze(theta2, dim=1)
# compute rotation matrices
rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2)
rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis)
# create mask to handle both cases
eps = 1e-6
mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device)
mask_pos = (mask).type_as(theta2)
mask_neg = (mask == False).type_as(theta2) # noqa
# create output pose matrix
batch_size = angle_axis.shape[0]
rotation_matrix = torch.eye(3).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 3, 3).repeat(batch_size, 1, 1)
# fill output matrix with masked values
rotation_matrix[..., :3, :3] = \
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
return rotation_matrix # Nx3x3
def rotation_matrix_to_angle_axis(
rotation_matrix: torch.Tensor) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to Rodrigues vector.
Args:
rotation_matrix (torch.Tensor): rotation matrix.
Returns:
torch.Tensor: Rodrigues vector transformation.
Shape:
- Input: :math:`(N, 3, 3)`
- Output: :math:`(N, 3)`
Example:
>>> input = torch.rand(2, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_angle_axis(input) # Nx3
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(
"Input size must be a (*, 3, 3) tensor. Got {}".format(
rotation_matrix.shape))
quaternion: torch.Tensor = rotation_matrix_to_quaternion(rotation_matrix)
return quaternion_to_angle_axis(quaternion)
def rotation_matrix_to_quaternion(
rotation_matrix: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Convert 3x3 rotation matrix to 4d quaternion vector.
The quaternion vector has components in (x, y, z, w) format.
Args:
rotation_matrix (torch.Tensor): the rotation matrix to convert.
eps (float): small value to avoid zero division. Default: 1e-8.
Return:
torch.Tensor: the rotation in quaternion.
Shape:
- Input: :math:`(*, 3, 3)`
- Output: :math:`(*, 4)`
Example:
>>> input = torch.rand(4, 3, 3) # Nx3x3
>>> output = rotation_matrix_to_quaternion(input) # Nx4
"""
if not isinstance(rotation_matrix, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if not rotation_matrix.shape[-2:] == (3, 3):
raise ValueError(
"Input size must be a (*, 3, 3) tensor. Got {}".format(
rotation_matrix.shape))
def safe_zero_division(numerator: torch.Tensor,
denominator: torch.Tensor) -> torch.Tensor:
eps: float = torch.finfo(numerator.dtype).tiny # type: ignore
return numerator / torch.clamp(denominator, min=eps)
rotation_matrix_vec: torch.Tensor = rotation_matrix.view(
*rotation_matrix.shape[:-2], 9)
m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.chunk(
rotation_matrix_vec, chunks=9, dim=-1)
trace: torch.Tensor = m00 + m11 + m22
def trace_positive_cond():
sq = torch.sqrt(trace + 1.0) * 2. # sq = 4 * qw.
qw = 0.25 * sq
qx = safe_zero_division(m21 - m12, sq)
qy = safe_zero_division(m02 - m20, sq)
qz = safe_zero_division(m10 - m01, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_1():
sq = torch.sqrt(1.0 + m00 - m11 - m22 + eps) * 2. # sq = 4 * qx.
qw = safe_zero_division(m21 - m12, sq)
qx = 0.25 * sq
qy = safe_zero_division(m01 + m10, sq)
qz = safe_zero_division(m02 + m20, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_2():
sq = torch.sqrt(1.0 + m11 - m00 - m22 + eps) * 2. # sq = 4 * qy.
qw = safe_zero_division(m02 - m20, sq)
qx = safe_zero_division(m01 + m10, sq)
qy = 0.25 * sq
qz = safe_zero_division(m12 + m21, sq)
return torch.cat([qx, qy, qz, qw], dim=-1)
def cond_3():
sq = torch.sqrt(1.0 + m22 - m00 - m11 + eps) * 2. # sq = 4 * qz.
qw = safe_zero_division(m10 - m01, sq)
qx = safe_zero_division(m02 + m20, sq)
qy = safe_zero_division(m12 + m21, sq)
qz = 0.25 * sq
return torch.cat([qx, qy, qz, qw], dim=-1)
where_2 = torch.where(m11 > m22, cond_2(), cond_3())
where_1 = torch.where(
(m00 > m11) & (m00 > m22), cond_1(), where_2)
quaternion: torch.Tensor = torch.where(
trace > 0., trace_positive_cond(), where_1)
return quaternion
def normalize_quaternion(quaternion: torch.Tensor,
eps: float = 1e-12) -> torch.Tensor:
r"""Normalizes a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
normalized. The tensor can be of shape :math:`(*, 4)`.
eps (Optional[bool]): small value to avoid division by zero.
Default: 1e-12.
Return:
torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([1., 0., 1., 0.])
>>> normalize_quaternion(quaternion)
tensor([0.7071, 0.0000, 0.7071, 0.0000])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
return F.normalize(quaternion, p=2, dim=-1, eps=eps)
# based on:
# https://github.com/matthew-brett/transforms3d/blob/8965c48401d9e8e66b6a8c37c65f2fc200a076fa/transforms3d/quaternions.py#L101
# https://github.com/tensorflow/graphics/blob/master/tensorflow_graphics/geometry/transformation/rotation_matrix_3d.py#L247
def quaternion_to_rotation_matrix(quaternion: torch.Tensor) -> torch.Tensor:
r"""Converts a quaternion to a rotation matrix.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the rotation matrix of shape :math:`(*, 3, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 1., 0.])
>>> quaternion_to_rotation_matrix(quaternion)
tensor([[-1., 0., 0.],
[ 0., -1., 0.],
[ 0., 0., 1.]])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
# normalize the input quaternion
quaternion_norm: torch.Tensor = normalize_quaternion(quaternion)
# unpack the normalized quaternion components
x, y, z, w = torch.chunk(quaternion_norm, chunks=4, dim=-1)
# compute the actual conversion
tx: torch.Tensor = 2.0 * x
ty: torch.Tensor = 2.0 * y
tz: torch.Tensor = 2.0 * z
twx: torch.Tensor = tx * w
twy: torch.Tensor = ty * w
twz: torch.Tensor = tz * w
txx: torch.Tensor = tx * x
txy: torch.Tensor = ty * x
txz: torch.Tensor = tz * x
tyy: torch.Tensor = ty * y
tyz: torch.Tensor = tz * y
tzz: torch.Tensor = tz * z
one: torch.Tensor = torch.tensor(1.)
matrix: torch.Tensor = torch.stack([
one - (tyy + tzz), txy - twz, txz + twy,
txy + twz, one - (txx + tzz), tyz - twx,
txz - twy, tyz + twx, one - (txx + tyy)
], dim=-1).view(-1, 3, 3)
if len(quaternion.shape) == 1:
matrix = torch.squeeze(matrix, dim=0)
return matrix
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
The quaternion should be in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape Nx4 or 4. Got {}".format(
quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
def quaternion_log_to_exp(quaternion: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Applies exponential map to log quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 3)`.
Return:
torch.Tensor: the quaternion exponential map of shape :math:`(*, 4)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0.])
>>> quaternion_log_to_exp(quaternion)
tensor([0., 0., 0., 1.])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 3:
raise ValueError(
"Input must be a tensor of shape (*, 3). Got {}".format(
quaternion.shape))
# compute quaternion norm
norm_q: torch.Tensor = torch.norm(
quaternion, p=2, dim=-1, keepdim=True).clamp(min=eps)
# compute scalar and vector
quaternion_vector: torch.Tensor = quaternion * torch.sin(norm_q) / norm_q
quaternion_scalar: torch.Tensor = torch.cos(norm_q)
# compose quaternion and return
quaternion_exp: torch.Tensor = torch.cat(
[quaternion_vector, quaternion_scalar], dim=-1)
return quaternion_exp
def quaternion_exp_to_log(quaternion: torch.Tensor,
eps: float = 1e-8) -> torch.Tensor:
r"""Applies the log map to a quaternion.
The quaternion should be in (x, y, z, w) format.
Args:
quaternion (torch.Tensor): a tensor containing a quaternion to be
converted. The tensor can be of shape :math:`(*, 4)`.
Return:
torch.Tensor: the quaternion log map of shape :math:`(*, 3)`.
Example:
>>> quaternion = torch.tensor([0., 0., 0., 1.])
>>> quaternion_exp_to_log(quaternion)
tensor([0., 0., 0.])
"""
if not isinstance(quaternion, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape (*, 4). Got {}".format(
quaternion.shape))
# unpack quaternion vector and scalar
quaternion_vector: torch.Tensor = quaternion[..., 0:3]
quaternion_scalar: torch.Tensor = quaternion[..., 3:4]
# compute quaternion norm
norm_q: torch.Tensor = torch.norm(
quaternion_vector, p=2, dim=-1, keepdim=True).clamp(min=eps)
# apply log map
quaternion_log: torch.Tensor = quaternion_vector * torch.acos(
torch.clamp(quaternion_scalar, min=-1.0, max=1.0)) / norm_q
return quaternion_log
# based on:
# https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py#L138
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
r"""Convert an angle axis to a quaternion.
The quaternion vector has components in (x, y, z, w) format.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 3) # Nx3
>>> quaternion = angle_axis_to_quaternion(angle_axis) # Nx4
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError(
"Input must be a tensor of shape Nx3 or 3. Got {}".format(
angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
return torch.cat([w, quaternion], dim=-1)
# based on:
# https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py#L65-L71
def normalize_pixel_coordinates(
pixel_coordinates: torch.Tensor,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 2:
raise ValueError("Input pixel_coordinates must be of shape (*, 2). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
hw: torch.Tensor = torch.stack([
torch.tensor(width, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype),
torch.tensor(height, device=pixel_coordinates.device, dtype=pixel_coordinates.dtype)
])
factor: torch.Tensor = torch.tensor(
2., device=pixel_coordinates.device, dtype=pixel_coordinates.dtype) / (hw - 1).clamp(eps)
return factor * pixel_coordinates - 1
def denormalize_pixel_coordinates(
pixel_coordinates: torch.Tensor,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 2)`.
width (int): the maximum width in the x-axis.
height (int): the maximum height in the y-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 2:
raise ValueError("Input pixel_coordinates must be of shape (*, 2). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
hw: torch.Tensor = torch.stack([
torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (hw - 1).clamp(eps)
return torch.tensor(1.) / factor * (pixel_coordinates + 1)
def normalize_pixel_coordinates3d(
pixel_coordinates: torch.Tensor,
depth: int,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Normalize pixel coordinates between -1 and 1.
Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the grid with pixel coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the z-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the normalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 3:
raise ValueError("Input pixel_coordinates must be of shape (*, 3). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
dhw: torch.Tensor = torch.stack([
torch.tensor(depth), torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)
return factor * pixel_coordinates - 1
def denormalize_pixel_coordinates3d(
pixel_coordinates: torch.Tensor,
depth: int,
height: int,
width: int,
eps: float = 1e-8) -> torch.Tensor:
r"""Denormalize pixel coordinates.
The input is assumed to be -1 if on extreme left, 1 if on
extreme right (x = w-1).
Args:
pixel_coordinates (torch.Tensor): the normalized grid coordinates.
Shape can be :math:`(*, 3)`.
depth (int): the maximum depth in the x-axis.
height (int): the maximum height in the y-axis.
width (int): the maximum width in the x-axis.
eps (float): safe division by zero. (default 1e-8).
Return:
torch.Tensor: the denormalized pixel coordinates.
"""
if pixel_coordinates.shape[-1] != 3:
raise ValueError("Input pixel_coordinates must be of shape (*, 3). "
"Got {}".format(pixel_coordinates.shape))
# compute normalization factor
dhw: torch.Tensor = torch.stack([
torch.tensor(depth), torch.tensor(width), torch.tensor(height)
]).to(pixel_coordinates.device).to(pixel_coordinates.dtype)
factor: torch.Tensor = torch.tensor(2.) / (dhw - 1).clamp(eps)
return torch.tensor(1.) / factor * (pixel_coordinates + 1)
| 35.379854 | 126 | 0.618941 | [
"ECL-2.0",
"Apache-2.0"
] | anthonytec2/kornia | kornia/geometry/conversions.py | 29,153 | Python |
#!/usr/bin/env python3
# In this example, we demonstrate how Korali samples the posterior distribution
# in a bayesian problem where the likelihood is calculated by providing
# reference data points and their objective values.
# Importing the computational model
import sys
sys.path.append('./_model')
from model import *
# Creating new experiment
import korali
e = korali.Experiment()
# Setting up the reference likelihood for the Bayesian Problem
e["Problem"]["Type"] = "Bayesian/Reference"
e["Problem"]["Likelihood Model"] = "Normal"
e["Problem"]["Reference Data"] = getReferenceData()
e["Problem"]["Computational Model"] = lambda sampleData: model(sampleData, getReferencePoints())
# Configuring Nested Sampling parameters
e["Solver"]["Type"] = "Sampler/Nested"
e["Solver"]["Resampling Method"] = "Ellipse"
e["Solver"]["Number Live Points"] = 1500
# Configuring the problem's random distributions
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = 0.0
e["Distributions"][0]["Maximum"] = +5.0
# Configuring the problem's variables and their prior distributions
e["Variables"][0]["Name"] = "a"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][1]["Name"] = "b"
e["Variables"][1]["Prior Distribution"] = "Uniform 0"
e["Variables"][2]["Name"] = "[Sigma]"
e["Variables"][2]["Prior Distribution"] = "Uniform 0"
e["File Output"]["Frequency"] = 1000
e["Console Output"]["Frequency"] = 500
e["Console Output"]["Verbosity"] = 'Detailed'
e["Solver"]["Termination Criteria"]["Max Generations"] = 100000
e["Solver"]["Termination Criteria"]["Min Log Evidence Delta"] = 1e-1
# Configuring output settings
e["File Output"]["Path"] = '_korali_result_nested'
# Starting Korali's Engine and running experiment
k = korali.Engine()
k.run(e)
| 33.381818 | 96 | 0.712418 | [
"MIT"
] | JonathanLehner/korali | examples/bayesian.inference/reference/run-nested.py | 1,836 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.