body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def write_immediate(self, addr: Union[(str, List[str])], value: Union[(object, List[object])], timeout: float=None):
'\n Instead of waiting for a group write, writes the given value immediately. Note, this is not very efficient\n and should be used sparingly. '
if isinstance(addr, list):
if isinstance(value, list):
items = {addr: self._value_to_str(val) for (addr, val) in zip(addr, value)}
else:
value = self._value_to_str(value)
items = {addr: value for addr in addr}
else:
items = {addr: self._value_to_str(value)}
try:
timeout = (self.timeout if (timeout is None) else timeout)
with lock:
self._req.get(self._url, params=items, timeout=timeout)
for (addr, value) in items.items():
self._io[addr] = value
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex) | -1,611,480,356,399,589,400 | Instead of waiting for a group write, writes the given value immediately. Note, this is not very efficient
and should be used sparingly. | controlpyweb/reader_writer.py | write_immediate | washad/ControlPyWeb | python | def write_immediate(self, addr: Union[(str, List[str])], value: Union[(object, List[object])], timeout: float=None):
'\n Instead of waiting for a group write, writes the given value immediately. Note, this is not very efficient\n and should be used sparingly. '
if isinstance(addr, list):
if isinstance(value, list):
items = {addr: self._value_to_str(val) for (addr, val) in zip(addr, value)}
else:
value = self._value_to_str(value)
items = {addr: value for addr in addr}
else:
items = {addr: self._value_to_str(value)}
try:
timeout = (self.timeout if (timeout is None) else timeout)
with lock:
self._req.get(self._url, params=items, timeout=timeout)
for (addr, value) in items.items():
self._io[addr] = value
except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ex:
raise WebIOConnectionError(ex) |
def __init__(self, model='inception_v3', weights='imagenet', include_top=False, pooling=None, n_channels=None, clf_head_dense_dim=1024):
" Creates ImageNet base model for featurization or classification and corresponding image\n preprocessing function\n :param model: options are xception, inception_v3, and mobilenet_v2\n :param weights: 'imagenet' or filepath\n :param include_top: whether to include original ImageNet classification head with 1000 classes\n :param pooling: 'avg', 'max', or None\n :param n_channels: number of channels to keep if performing featurization\n :param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies\n if `include_top` is false)\n "
self.include_top = include_top
self.n_channels = n_channels
self.pooling = pooling
self.clf_head_dense_dim = clf_head_dense_dim
if (model == 'xception'):
self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = xception.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = xception.decode_predictions
else:
self.output_dim = ((n_channels if n_channels else 2048) * (1 if pooling else (10 ** 2)))
elif (model == 'inception_v3'):
self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = inception_v3.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = inception_v3.decode_predictions
else:
self.output_dim = ((n_channels if n_channels else 2048) * (1 if pooling else (8 ** 2)))
elif (model == 'mobilenet_v2'):
self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = mobilenetv2.preprocess_input
self.target_size = (244, 244)
if include_top:
self.decode = mobilenetv2.decode_predictions
else:
self.output_dim = ((n_channels if n_channels else 1280) * (1 if pooling else (7 ** 2)))
else:
raise Exception('model option not implemented') | 105,362,475,208,379,410 | Creates ImageNet base model for featurization or classification and corresponding image
preprocessing function
:param model: options are xception, inception_v3, and mobilenet_v2
:param weights: 'imagenet' or filepath
:param include_top: whether to include original ImageNet classification head with 1000 classes
:param pooling: 'avg', 'max', or None
:param n_channels: number of channels to keep if performing featurization
:param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies
if `include_top` is false) | primitives/image_classification/utils/imagenet.py | __init__ | Yonder-OSS/D3M-Primitives | python | def __init__(self, model='inception_v3', weights='imagenet', include_top=False, pooling=None, n_channels=None, clf_head_dense_dim=1024):
" Creates ImageNet base model for featurization or classification and corresponding image\n preprocessing function\n :param model: options are xception, inception_v3, and mobilenet_v2\n :param weights: 'imagenet' or filepath\n :param include_top: whether to include original ImageNet classification head with 1000 classes\n :param pooling: 'avg', 'max', or None\n :param n_channels: number of channels to keep if performing featurization\n :param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies\n if `include_top` is false)\n "
self.include_top = include_top
self.n_channels = n_channels
self.pooling = pooling
self.clf_head_dense_dim = clf_head_dense_dim
if (model == 'xception'):
self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = xception.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = xception.decode_predictions
else:
self.output_dim = ((n_channels if n_channels else 2048) * (1 if pooling else (10 ** 2)))
elif (model == 'inception_v3'):
self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = inception_v3.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = inception_v3.decode_predictions
else:
self.output_dim = ((n_channels if n_channels else 2048) * (1 if pooling else (8 ** 2)))
elif (model == 'mobilenet_v2'):
self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = mobilenetv2.preprocess_input
self.target_size = (244, 244)
if include_top:
self.decode = mobilenetv2.decode_predictions
else:
self.output_dim = ((n_channels if n_channels else 1280) * (1 if pooling else (7 ** 2)))
else:
raise Exception('model option not implemented') |
def _load_finetune_model(self, nclasses=2, weights_path=None):
' Constructs finetuning model architecture and optionally loads weights\n :param nclasses: number of classes on which to softmax over\n :param weights_path: optional filepath from which to try to load weights\n '
out = self.model.output
if (self.pooling is None):
out = GlobalAveragePooling2D()(out)
dense = Dense(self.clf_head_dense_dim, activation='relu')(out)
preds = Dense(nclasses, activation='softmax')(dense)
finetune_model = Model(inputs=self.model.input, outputs=preds)
if (weights_path is not None):
if os.path.isfile(weights_path):
finetune_model.load_weights(weights_path)
return finetune_model | 6,284,525,206,236,823,000 | Constructs finetuning model architecture and optionally loads weights
:param nclasses: number of classes on which to softmax over
:param weights_path: optional filepath from which to try to load weights | primitives/image_classification/utils/imagenet.py | _load_finetune_model | Yonder-OSS/D3M-Primitives | python | def _load_finetune_model(self, nclasses=2, weights_path=None):
' Constructs finetuning model architecture and optionally loads weights\n :param nclasses: number of classes on which to softmax over\n :param weights_path: optional filepath from which to try to load weights\n '
out = self.model.output
if (self.pooling is None):
out = GlobalAveragePooling2D()(out)
dense = Dense(self.clf_head_dense_dim, activation='relu')(out)
preds = Dense(nclasses, activation='softmax')(dense)
finetune_model = Model(inputs=self.model.input, outputs=preds)
if (weights_path is not None):
if os.path.isfile(weights_path):
finetune_model.load_weights(weights_path)
return finetune_model |
def get_features(self, images_array):
' takes a batch of images as a 4-d array and returns the (flattened) imagenet features for those images as a 2-d array '
if self.include_top:
raise Exception('getting features from a classification model with include_top=True is currently not supported')
if (images_array.ndim != 4):
raise Exception('invalid input shape for images_array, expects a 4d array')
logger.debug(f'preprocessing {images_array.shape[0]} images')
images_array = self.preprocess(images_array)
logger.debug(f'computing image features')
image_features = self.model.predict(images_array)
if self.n_channels:
logger.debug(f'truncating to first {self.n_channels} channels')
image_features = image_features.T[:self.n_channels].T
shape = image_features.shape
return image_features.reshape(shape[0], np.prod(shape[1:])) | -2,330,387,601,279,151,600 | takes a batch of images as a 4-d array and returns the (flattened) imagenet features for those images as a 2-d array | primitives/image_classification/utils/imagenet.py | get_features | Yonder-OSS/D3M-Primitives | python | def get_features(self, images_array):
' '
if self.include_top:
raise Exception('getting features from a classification model with include_top=True is currently not supported')
if (images_array.ndim != 4):
raise Exception('invalid input shape for images_array, expects a 4d array')
logger.debug(f'preprocessing {images_array.shape[0]} images')
images_array = self.preprocess(images_array)
logger.debug(f'computing image features')
image_features = self.model.predict(images_array)
if self.n_channels:
logger.debug(f'truncating to first {self.n_channels} channels')
image_features = image_features.T[:self.n_channels].T
shape = image_features.shape
return image_features.reshape(shape[0], np.prod(shape[1:])) |
def predict(self, images_array):
' alias for get_features to more closely match scikit-learn interface '
return self.get_features(images_array) | 2,710,568,764,399,794,000 | alias for get_features to more closely match scikit-learn interface | primitives/image_classification/utils/imagenet.py | predict | Yonder-OSS/D3M-Primitives | python | def predict(self, images_array):
' '
return self.get_features(images_array) |
def finetune(self, train_dataset, val_dataset=None, nclasses=2, top_layer_epochs=1, unfreeze_proportions=[0.5], all_layer_epochs=5, class_weight=None, optimizer_top='rmsprop', optimizer_full='sgd', callbacks=None, num_workers=8, load_weights_path=None, save_weights_path=None):
' Finetunes the Imagenet model iteratively on a smaller set of images with (potentially) a smaller set of classes.\n First finetunes last layer then freezes bottom N layers and retrains the rest\n :param train_dataset: (X, y) pair of tf.constant tensors for training \n :param val_dataset: (X, y) pair of tf.constant tensors for validation, optional \n :param nclasses: number of classes \n :param top_layer_epochs: how many epochs for which to finetune classification head (happens first)\n :param unfreeze_proportions: list of proportions representing how much of the base ImageNet model one wants to\n unfreeze (later layers unfrozen) for another round of finetuning\n :param all_layer_epochs: how many epochs for which to finetune entire model (happens second)\n :param class_weight: class weights (used for both training steps)\n :param optimizer_top: optimizer to use for training of classification head\n :param optimizer_full: optimizer to use for training full classification model\n * suggest to use lower learning rate / more conservative optimizer for this step to \n prevent catastrophic forgetting \n :param callbacks: optional list of callbacks to use for each round of finetuning\n :param num_workers: number of workers to use for multiprocess data loading\n :param load_weights_path: optional filepath from which to try to load weights\n :param save_weights_path: optional filepath to which to store weights\n '
finetune_model = self._load_finetune_model(nclasses=nclasses, weights_path=load_weights_path)
fitting_histories = []
for layer in self.model.layers:
layer.trainable = False
finetune_model.compile(optimizer=optimizer_top, loss='categorical_crossentropy')
fitting_histories.append(finetune_model.fit(train_dataset, validation_data=val_dataset, epochs=top_layer_epochs, class_weight=class_weight, shuffle=True, use_multiprocessing=True, workers=num_workers, callbacks=callbacks))
finetune_model.compile(optimizer=optimizer_full, loss='categorical_crossentropy')
for p in unfreeze_proportions:
freeze_count = int((len(self.model.layers) * p))
for layer in finetune_model.layers[:freeze_count]:
layer.trainable = False
for layer in finetune_model.layers[freeze_count:]:
layer.trainable = True
fitting_histories.append(finetune_model.fit(train_dataset, validation_data=val_dataset, epochs=all_layer_epochs, class_weight=class_weight, shuffle=True, use_multiprocessing=True, workers=num_workers, callbacks=callbacks))
if (save_weights_path is not None):
finetune_model.save_weights(save_weights_path)
return fitting_histories | 4,750,495,175,294,076,000 | Finetunes the Imagenet model iteratively on a smaller set of images with (potentially) a smaller set of classes.
First finetunes last layer then freezes bottom N layers and retrains the rest
:param train_dataset: (X, y) pair of tf.constant tensors for training
:param val_dataset: (X, y) pair of tf.constant tensors for validation, optional
:param nclasses: number of classes
:param top_layer_epochs: how many epochs for which to finetune classification head (happens first)
:param unfreeze_proportions: list of proportions representing how much of the base ImageNet model one wants to
unfreeze (later layers unfrozen) for another round of finetuning
:param all_layer_epochs: how many epochs for which to finetune entire model (happens second)
:param class_weight: class weights (used for both training steps)
:param optimizer_top: optimizer to use for training of classification head
:param optimizer_full: optimizer to use for training full classification model
* suggest to use lower learning rate / more conservative optimizer for this step to
prevent catastrophic forgetting
:param callbacks: optional list of callbacks to use for each round of finetuning
:param num_workers: number of workers to use for multiprocess data loading
:param load_weights_path: optional filepath from which to try to load weights
:param save_weights_path: optional filepath to which to store weights | primitives/image_classification/utils/imagenet.py | finetune | Yonder-OSS/D3M-Primitives | python | def finetune(self, train_dataset, val_dataset=None, nclasses=2, top_layer_epochs=1, unfreeze_proportions=[0.5], all_layer_epochs=5, class_weight=None, optimizer_top='rmsprop', optimizer_full='sgd', callbacks=None, num_workers=8, load_weights_path=None, save_weights_path=None):
' Finetunes the Imagenet model iteratively on a smaller set of images with (potentially) a smaller set of classes.\n First finetunes last layer then freezes bottom N layers and retrains the rest\n :param train_dataset: (X, y) pair of tf.constant tensors for training \n :param val_dataset: (X, y) pair of tf.constant tensors for validation, optional \n :param nclasses: number of classes \n :param top_layer_epochs: how many epochs for which to finetune classification head (happens first)\n :param unfreeze_proportions: list of proportions representing how much of the base ImageNet model one wants to\n unfreeze (later layers unfrozen) for another round of finetuning\n :param all_layer_epochs: how many epochs for which to finetune entire model (happens second)\n :param class_weight: class weights (used for both training steps)\n :param optimizer_top: optimizer to use for training of classification head\n :param optimizer_full: optimizer to use for training full classification model\n * suggest to use lower learning rate / more conservative optimizer for this step to \n prevent catastrophic forgetting \n :param callbacks: optional list of callbacks to use for each round of finetuning\n :param num_workers: number of workers to use for multiprocess data loading\n :param load_weights_path: optional filepath from which to try to load weights\n :param save_weights_path: optional filepath to which to store weights\n '
finetune_model = self._load_finetune_model(nclasses=nclasses, weights_path=load_weights_path)
fitting_histories = []
for layer in self.model.layers:
layer.trainable = False
finetune_model.compile(optimizer=optimizer_top, loss='categorical_crossentropy')
fitting_histories.append(finetune_model.fit(train_dataset, validation_data=val_dataset, epochs=top_layer_epochs, class_weight=class_weight, shuffle=True, use_multiprocessing=True, workers=num_workers, callbacks=callbacks))
finetune_model.compile(optimizer=optimizer_full, loss='categorical_crossentropy')
for p in unfreeze_proportions:
freeze_count = int((len(self.model.layers) * p))
for layer in finetune_model.layers[:freeze_count]:
layer.trainable = False
for layer in finetune_model.layers[freeze_count:]:
layer.trainable = True
fitting_histories.append(finetune_model.fit(train_dataset, validation_data=val_dataset, epochs=all_layer_epochs, class_weight=class_weight, shuffle=True, use_multiprocessing=True, workers=num_workers, callbacks=callbacks))
if (save_weights_path is not None):
finetune_model.save_weights(save_weights_path)
return fitting_histories |
def finetune_classify(self, test_dataset, nclasses=2, num_workers=8, load_weights_path=None):
' Uses the finetuned model to predict on a test dataset. \n :param test_dataset: X, tf.constant tensor for inference\n :param nclasses: number of classes \n :param num_workers: number of workers to use for multiprocess data loading\n :return: array of softmaxed prediction probabilities\n :param load_weights_path: optional filepath from which to try to load weights\n '
finetune_model = self._load_finetune_model(nclasses=nclasses, weights_path=load_weights_path)
return finetune_model.predict_generator(test_dataset, use_multiprocessing=True, workers=num_workers) | -4,570,401,618,268,227,000 | Uses the finetuned model to predict on a test dataset.
:param test_dataset: X, tf.constant tensor for inference
:param nclasses: number of classes
:param num_workers: number of workers to use for multiprocess data loading
:return: array of softmaxed prediction probabilities
:param load_weights_path: optional filepath from which to try to load weights | primitives/image_classification/utils/imagenet.py | finetune_classify | Yonder-OSS/D3M-Primitives | python | def finetune_classify(self, test_dataset, nclasses=2, num_workers=8, load_weights_path=None):
' Uses the finetuned model to predict on a test dataset. \n :param test_dataset: X, tf.constant tensor for inference\n :param nclasses: number of classes \n :param num_workers: number of workers to use for multiprocess data loading\n :return: array of softmaxed prediction probabilities\n :param load_weights_path: optional filepath from which to try to load weights\n '
finetune_model = self._load_finetune_model(nclasses=nclasses, weights_path=load_weights_path)
return finetune_model.predict_generator(test_dataset, use_multiprocessing=True, workers=num_workers) |
def test(test_config_path):
'Runs an object detection test configuration\n \n This runs an object detection test configuration. This involves\n \n 1. Download and build a model architecture (or use cached).\n 2. Optimize the model architecrue\n 3. Benchmark the optimized model against a dataset\n 4. (optional) Run assertions to check the benchmark output\n\n The input to this function is a JSON file which specifies the test\n configuration.\n\n example_test_config.json:\n\n {\n "model_config": { ... },\n "optimization_config": { ... },\n "benchmark_config": { ... },\n "assertions": [ ... ]\n }\n\n model_config: A dictionary of arguments passed to build_model, which\n specify the pre-optimized model architure. The model will be passed\n to optimize_model.\n optimization_config: A dictionary of arguments passed to optimize_model.\n Please see help(optimize_model) for more details.\n benchmark_config: A dictionary of arguments passed to benchmark_model.\n Please see help(benchmark_model) for more details.\n assertions: A list of strings containing python code that will be \n evaluated. If the code returns false, an error will be thrown. These\n assertions can reference any variables local to this \'test\' function.\n Some useful values are\n\n statistics[\'map\']\n statistics[\'avg_latency\']\n statistics[\'avg_throughput\']\n\n Args\n ----\n test_config_path: A string corresponding to the test configuration\n JSON file.\n '
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(**test_config['model_config'])
frozen_graph = optimize_model(frozen_graph, **test_config['optimization_config'])
statistics = benchmark_model(frozen_graph=frozen_graph, **test_config['benchmark_config'])
print_statistics = statistics
if ('runtimes_ms' in print_statistics):
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
if ('assertions' in test_config):
for a in test_config['assertions']:
if (not eval(a)):
raise AssertionError(('ASSERTION FAILED: %s' % a))
else:
print(('ASSERTION PASSED: %s' % a)) | 1,570,044,009,871,800,600 | Runs an object detection test configuration
This runs an object detection test configuration. This involves
1. Download and build a model architecture (or use cached).
2. Optimize the model architecrue
3. Benchmark the optimized model against a dataset
4. (optional) Run assertions to check the benchmark output
The input to this function is a JSON file which specifies the test
configuration.
example_test_config.json:
{
"model_config": { ... },
"optimization_config": { ... },
"benchmark_config": { ... },
"assertions": [ ... ]
}
model_config: A dictionary of arguments passed to build_model, which
specify the pre-optimized model architure. The model will be passed
to optimize_model.
optimization_config: A dictionary of arguments passed to optimize_model.
Please see help(optimize_model) for more details.
benchmark_config: A dictionary of arguments passed to benchmark_model.
Please see help(benchmark_model) for more details.
assertions: A list of strings containing python code that will be
evaluated. If the code returns false, an error will be thrown. These
assertions can reference any variables local to this 'test' function.
Some useful values are
statistics['map']
statistics['avg_latency']
statistics['avg_throughput']
Args
----
test_config_path: A string corresponding to the test configuration
JSON file. | tftrt/examples/object_detection/test.py | test | HubBucket-Team/tensorrt | python | def test(test_config_path):
'Runs an object detection test configuration\n \n This runs an object detection test configuration. This involves\n \n 1. Download and build a model architecture (or use cached).\n 2. Optimize the model architecrue\n 3. Benchmark the optimized model against a dataset\n 4. (optional) Run assertions to check the benchmark output\n\n The input to this function is a JSON file which specifies the test\n configuration.\n\n example_test_config.json:\n\n {\n "model_config": { ... },\n "optimization_config": { ... },\n "benchmark_config": { ... },\n "assertions": [ ... ]\n }\n\n model_config: A dictionary of arguments passed to build_model, which\n specify the pre-optimized model architure. The model will be passed\n to optimize_model.\n optimization_config: A dictionary of arguments passed to optimize_model.\n Please see help(optimize_model) for more details.\n benchmark_config: A dictionary of arguments passed to benchmark_model.\n Please see help(benchmark_model) for more details.\n assertions: A list of strings containing python code that will be \n evaluated. If the code returns false, an error will be thrown. These\n assertions can reference any variables local to this \'test\' function.\n Some useful values are\n\n statistics[\'map\']\n statistics[\'avg_latency\']\n statistics[\'avg_throughput\']\n\n Args\n ----\n test_config_path: A string corresponding to the test configuration\n JSON file.\n '
with open(args.test_config_path, 'r') as f:
test_config = json.load(f)
print(json.dumps(test_config, sort_keys=True, indent=4))
frozen_graph = build_model(**test_config['model_config'])
frozen_graph = optimize_model(frozen_graph, **test_config['optimization_config'])
statistics = benchmark_model(frozen_graph=frozen_graph, **test_config['benchmark_config'])
print_statistics = statistics
if ('runtimes_ms' in print_statistics):
print_statistics.pop('runtimes_ms')
print(json.dumps(print_statistics, sort_keys=True, indent=4))
if ('assertions' in test_config):
for a in test_config['assertions']:
if (not eval(a)):
raise AssertionError(('ASSERTION FAILED: %s' % a))
else:
print(('ASSERTION PASSED: %s' % a)) |
def draw_approx_polyDP(cnt, epsilon=0.01, closed=True):
'用多边形来近似的表示曲线'
epsilon = (epsilon * cv2.arcLength(cnt, closed))
return cv2.approxPolyDP(cnt, epsilon, closed) | 3,543,939,564,340,141,000 | 用多边形来近似的表示曲线 | my_cv/utils/cv2_util.py | draw_approx_polyDP | strawsyz/straw | python | def draw_approx_polyDP(cnt, epsilon=0.01, closed=True):
epsilon = (epsilon * cv2.arcLength(cnt, closed))
return cv2.approxPolyDP(cnt, epsilon, closed) |
def draw_convex_hull(cnt):
'画凸包,传入的是一些点'
return cv2.convexHull(cnt) | -3,145,086,267,839,236,600 | 画凸包,传入的是一些点 | my_cv/utils/cv2_util.py | draw_convex_hull | strawsyz/straw | python | def draw_convex_hull(cnt):
return cv2.convexHull(cnt) |
def camera_show(window_name='camera'):
'最好在改进一下关闭窗口部分的功能\n 建立一个窗口捕捉摄像头显示的内容\n 当左键点击过窗口,且按过任意键盘键,才会退出窗口'
clicked = False
camera_capture = cv2.VideoCapture(0)
def on_mouse(event, x, y, flags, param):
global clicked
if (event == cv2.EVENT_LBUTTONUP):
clicked = True
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, on_mouse)
(success, frame) = camera_capture.read()
while (success and (cv2.waitKey(1) == (- 1)) and (not clicked)):
cv2.imshow(window_name, frame)
(success, frame) = camera_capture.read()
cv2.destroyAllWindows()
camera_capture.release() | -9,194,454,016,718,126,000 | 最好在改进一下关闭窗口部分的功能
建立一个窗口捕捉摄像头显示的内容
当左键点击过窗口,且按过任意键盘键,才会退出窗口 | my_cv/utils/cv2_util.py | camera_show | strawsyz/straw | python | def camera_show(window_name='camera'):
'最好在改进一下关闭窗口部分的功能\n 建立一个窗口捕捉摄像头显示的内容\n 当左键点击过窗口,且按过任意键盘键,才会退出窗口'
clicked = False
camera_capture = cv2.VideoCapture(0)
def on_mouse(event, x, y, flags, param):
global clicked
if (event == cv2.EVENT_LBUTTONUP):
clicked = True
cv2.namedWindow(window_name)
cv2.setMouseCallback(window_name, on_mouse)
(success, frame) = camera_capture.read()
while (success and (cv2.waitKey(1) == (- 1)) and (not clicked)):
cv2.imshow(window_name, frame)
(success, frame) = camera_capture.read()
cv2.destroyAllWindows()
camera_capture.release() |
def from_upload_jobs(upload_jobs):
'Creates a new upload queue from a list of upload jobs.\n\n Creates a new queue of files to upload by starting with the full input\n dataset and removing any files that are uploaded (partially or fully) to\n Sia.\n\n Args:\n upload_jobs: The unfiltered set of upload jobs.\n\n Returns:\n A Queue of upload jobs, filtered to remove jobs that are already\n complete (the paths already exist on Sia).\n '
return from_upload_jobs_and_sia_client(upload_jobs, sc.make_sia_client()) | 5,794,168,816,860,892,000 | Creates a new upload queue from a list of upload jobs.
Creates a new queue of files to upload by starting with the full input
dataset and removing any files that are uploaded (partially or fully) to
Sia.
Args:
upload_jobs: The unfiltered set of upload jobs.
Returns:
A Queue of upload jobs, filtered to remove jobs that are already
complete (the paths already exist on Sia). | sia_load_tester/upload_queue.py | from_upload_jobs | mtlynch/sia_load_tester | python | def from_upload_jobs(upload_jobs):
'Creates a new upload queue from a list of upload jobs.\n\n Creates a new queue of files to upload by starting with the full input\n dataset and removing any files that are uploaded (partially or fully) to\n Sia.\n\n Args:\n upload_jobs: The unfiltered set of upload jobs.\n\n Returns:\n A Queue of upload jobs, filtered to remove jobs that are already\n complete (the paths already exist on Sia).\n '
return from_upload_jobs_and_sia_client(upload_jobs, sc.make_sia_client()) |
def from_upload_jobs_and_sia_client(upload_jobs, sia_client):
'Creates a new upload queue from a dataset.\n\n Creates a new queue of files to upload by starting with the full input\n dataset and removing any files that are uploaded (partially or fully) to\n Sia.\n\n Args:\n upload_jobs: The unfiltered set of upload jobs.\n sia_client: An implementation of the Sia client interface.\n\n Returns:\n A Queue of upload jobs, filtered to remove jobs that are already\n complete (the paths already exist on Sia).\n '
sia_paths = _get_sia_paths(sia_client)
upload_jobs = [j for j in upload_jobs if (j.sia_path not in sia_paths)]
logger.info('%d files already uploaded to Sia, need to upload %d more', len(sia_paths), len(upload_jobs))
upload_queue = Queue.Queue()
for upload_job in upload_jobs:
upload_queue.put(upload_job)
return upload_queue | 6,385,742,080,236,516,000 | Creates a new upload queue from a dataset.
Creates a new queue of files to upload by starting with the full input
dataset and removing any files that are uploaded (partially or fully) to
Sia.
Args:
upload_jobs: The unfiltered set of upload jobs.
sia_client: An implementation of the Sia client interface.
Returns:
A Queue of upload jobs, filtered to remove jobs that are already
complete (the paths already exist on Sia). | sia_load_tester/upload_queue.py | from_upload_jobs_and_sia_client | mtlynch/sia_load_tester | python | def from_upload_jobs_and_sia_client(upload_jobs, sia_client):
'Creates a new upload queue from a dataset.\n\n Creates a new queue of files to upload by starting with the full input\n dataset and removing any files that are uploaded (partially or fully) to\n Sia.\n\n Args:\n upload_jobs: The unfiltered set of upload jobs.\n sia_client: An implementation of the Sia client interface.\n\n Returns:\n A Queue of upload jobs, filtered to remove jobs that are already\n complete (the paths already exist on Sia).\n '
sia_paths = _get_sia_paths(sia_client)
upload_jobs = [j for j in upload_jobs if (j.sia_path not in sia_paths)]
logger.info('%d files already uploaded to Sia, need to upload %d more', len(sia_paths), len(upload_jobs))
upload_queue = Queue.Queue()
for upload_job in upload_jobs:
upload_queue.put(upload_job)
return upload_queue |
def __init__(self, name, lammps_input, lammps_data=None, data_filename='in.data', user_lammps_settings=None):
"\n Implementation of LammpsInputSet that is initialized from a dict\n settings. It is typically used by other LammpsInputSets for\n initialization from json or yaml source files.\n\n Args:\n name (str): A name for the input set.\n lammps_input (LammpsInput): The config dictionary to use.\n lammps_data (LammpsData): LammpsData object\n data_filename (str): name of the the lammps data file.\n Note: this will override the value for 'data_file' key in lammps_input\n user_lammps_settings (dict): User lammps settings. This allows a user\n to override lammps settings, e.g., setting a different force field\n or bond type.\n "
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings['data_file'] = data_filename
self.user_lammps_settings = (user_lammps_settings or {})
self.lammps_input.settings.update(self.user_lammps_settings) | -4,467,768,066,648,851,000 | Implementation of LammpsInputSet that is initialized from a dict
settings. It is typically used by other LammpsInputSets for
initialization from json or yaml source files.
Args:
name (str): A name for the input set.
lammps_input (LammpsInput): The config dictionary to use.
lammps_data (LammpsData): LammpsData object
data_filename (str): name of the the lammps data file.
Note: this will override the value for 'data_file' key in lammps_input
user_lammps_settings (dict): User lammps settings. This allows a user
to override lammps settings, e.g., setting a different force field
or bond type. | pymatgen/io/lammps/sets.py | __init__ | JSelf42/pymatgen | python | def __init__(self, name, lammps_input, lammps_data=None, data_filename='in.data', user_lammps_settings=None):
"\n Implementation of LammpsInputSet that is initialized from a dict\n settings. It is typically used by other LammpsInputSets for\n initialization from json or yaml source files.\n\n Args:\n name (str): A name for the input set.\n lammps_input (LammpsInput): The config dictionary to use.\n lammps_data (LammpsData): LammpsData object\n data_filename (str): name of the the lammps data file.\n Note: this will override the value for 'data_file' key in lammps_input\n user_lammps_settings (dict): User lammps settings. This allows a user\n to override lammps settings, e.g., setting a different force field\n or bond type.\n "
self.name = name
self.lines = []
self.lammps_input = lammps_input
self.lammps_data = lammps_data
self.data_filename = data_filename
self.lammps_input.settings['data_file'] = data_filename
self.user_lammps_settings = (user_lammps_settings or {})
self.lammps_input.settings.update(self.user_lammps_settings) |
def write_input(self, input_filename, data_filename=None):
'\n Get the string representation of the main input file and write it.\n Also writes the data file if the lammps_data attribute is set.\n\n Args:\n input_filename (string): name of the input file\n data_filename (string): override the data file name with this\n '
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if (data_filename and ('data_file' in self.lammps_input.settings)):
self.lammps_input.settings['data_file'] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename) | -123,766,993,869,552,430 | Get the string representation of the main input file and write it.
Also writes the data file if the lammps_data attribute is set.
Args:
input_filename (string): name of the input file
data_filename (string): override the data file name with this | pymatgen/io/lammps/sets.py | write_input | JSelf42/pymatgen | python | def write_input(self, input_filename, data_filename=None):
'\n Get the string representation of the main input file and write it.\n Also writes the data file if the lammps_data attribute is set.\n\n Args:\n input_filename (string): name of the input file\n data_filename (string): override the data file name with this\n '
if data_filename:
data_filename = os.path.abspath(os.path.join(os.getcwd(), data_filename))
if (data_filename and ('data_file' in self.lammps_input.settings)):
self.lammps_input.settings['data_file'] = data_filename
self.data_filename = data_filename
self.lammps_input.write_file(input_filename)
if self.lammps_data:
self.lammps_data.write_file(filename=self.data_filename) |
@classmethod
def from_file(cls, name, input_template, user_settings, lammps_data=None, data_filename='in.data'):
'\n Returns LammpsInputSet from input file template and input data.\n\n Args:\n name (str)\n input_template (string): path to the input template file.\n user_settings (dict): User lammps settings, the keys must\n correspond to the keys in the template.\n lammps_data (string/LammpsData): path to the\n data file or an appropriate object\n data_filename (string): name of the the lammps data file.\n\n Returns:\n LammpsInputSet\n '
user_settings['data_file'] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data, data_filename=data_filename) | -3,239,966,993,342,371,300 | Returns LammpsInputSet from input file template and input data.
Args:
name (str)
input_template (string): path to the input template file.
user_settings (dict): User lammps settings, the keys must
correspond to the keys in the template.
lammps_data (string/LammpsData): path to the
data file or an appropriate object
data_filename (string): name of the the lammps data file.
Returns:
LammpsInputSet | pymatgen/io/lammps/sets.py | from_file | JSelf42/pymatgen | python | @classmethod
def from_file(cls, name, input_template, user_settings, lammps_data=None, data_filename='in.data'):
'\n Returns LammpsInputSet from input file template and input data.\n\n Args:\n name (str)\n input_template (string): path to the input template file.\n user_settings (dict): User lammps settings, the keys must\n correspond to the keys in the template.\n lammps_data (string/LammpsData): path to the\n data file or an appropriate object\n data_filename (string): name of the the lammps data file.\n\n Returns:\n LammpsInputSet\n '
user_settings['data_file'] = data_filename
lammps_input = LammpsInput.from_file(input_template, user_settings)
if isinstance(lammps_data, six.string_types):
lammps_data = LammpsData.from_file(lammps_data)
return cls(name, lammps_input, lammps_data=lammps_data, data_filename=data_filename) |
def get_token(auth_ctx):
'Acquire token via client credential flow (ADAL Python library is utilized)'
token = auth_ctx.acquire_token_with_client_credentials('https://graph.microsoft.com', settings['client_credentials']['client_id'], settings['client_credentials']['client_secret'])
return token | 8,581,977,952,059,346,000 | Acquire token via client credential flow (ADAL Python library is utilized) | examples/outlook/send_message.py | get_token | stardust85/Office365-REST-Python-Client | python | def get_token(auth_ctx):
token = auth_ctx.acquire_token_with_client_credentials('https://graph.microsoft.com', settings['client_credentials']['client_id'], settings['client_credentials']['client_secret'])
return token |
def test_jwt_manager_initialized(jwt):
'Assert that the jwt_manager is created as part of the fixtures.'
assert jwt | 706,935,926,803,901,000 | Assert that the jwt_manager is created as part of the fixtures. | legal-api/tests/unit/services/test_authorization.py | test_jwt_manager_initialized | leksmall/lear | python | def test_jwt_manager_initialized(jwt):
assert jwt |
@not_github_ci
def test_jwt_manager_correct_test_config(app_request, jwt):
'Assert that the test configuration for the JWT is working as expected.'
message = 'This is a protected end-point'
protected_route = '/fake_jwt_route'
@app_request.route(protected_route)
@jwt.has_one_of_roles([STAFF_ROLE])
def get():
return jsonify(message=message)
token = helper_create_jwt(jwt, [STAFF_ROLE])
headers = {'Authorization': ('Bearer ' + token)}
rv = app_request.test_client().get(protected_route, headers=headers)
assert (rv.status_code == HTTPStatus.OK)
token = helper_create_jwt(jwt, ['SHOULD-FAIL'])
headers = {'Authorization': ('Bearer ' + token)}
rv = app_request.test_client().get(protected_route, headers=headers)
assert (rv.status_code == HTTPStatus.UNAUTHORIZED) | -5,227,954,913,948,805,000 | Assert that the test configuration for the JWT is working as expected. | legal-api/tests/unit/services/test_authorization.py | test_jwt_manager_correct_test_config | leksmall/lear | python | @not_github_ci
def test_jwt_manager_correct_test_config(app_request, jwt):
message = 'This is a protected end-point'
protected_route = '/fake_jwt_route'
@app_request.route(protected_route)
@jwt.has_one_of_roles([STAFF_ROLE])
def get():
return jsonify(message=message)
token = helper_create_jwt(jwt, [STAFF_ROLE])
headers = {'Authorization': ('Bearer ' + token)}
rv = app_request.test_client().get(protected_route, headers=headers)
assert (rv.status_code == HTTPStatus.OK)
token = helper_create_jwt(jwt, ['SHOULD-FAIL'])
headers = {'Authorization': ('Bearer ' + token)}
rv = app_request.test_client().get(protected_route, headers=headers)
assert (rv.status_code == HTTPStatus.UNAUTHORIZED) |
@not_github_ci
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected', TEST_AUTHZ_DATA)
def test_authorized_user(monkeypatch, app_request, jwt, test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
'Assert that the type of user authorization is correct, based on the expected outcome.'
from requests import Response
print(test_name)
def mock_get(*args, **kwargs):
resp = Response()
resp.status_code = 200
return resp
def mock_json(self, **kwargs):
return {'roles': allowed_actions}
monkeypatch.setattr('requests.sessions.Session.get', mock_get)
monkeypatch.setattr('requests.Response.json', mock_json)
@app_request.route('/fake_jwt_route/<string:identifier>')
@jwt.requires_auth
def get_fake(identifier: str):
if (not authorized(identifier, jwt, ['view'])):
return (jsonify(message='failed'), HTTPStatus.METHOD_NOT_ALLOWED)
return (jsonify(message='success'), HTTPStatus.OK)
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
rv = app_request.test_client().get(f'/fake_jwt_route/{identifier}', headers=headers)
assert (rv.status_code == expected) | 8,930,474,284,491,742,000 | Assert that the type of user authorization is correct, based on the expected outcome. | legal-api/tests/unit/services/test_authorization.py | test_authorized_user | leksmall/lear | python | @not_github_ci
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected', TEST_AUTHZ_DATA)
def test_authorized_user(monkeypatch, app_request, jwt, test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
from requests import Response
print(test_name)
def mock_get(*args, **kwargs):
resp = Response()
resp.status_code = 200
return resp
def mock_json(self, **kwargs):
return {'roles': allowed_actions}
monkeypatch.setattr('requests.sessions.Session.get', mock_get)
monkeypatch.setattr('requests.Response.json', mock_json)
@app_request.route('/fake_jwt_route/<string:identifier>')
@jwt.requires_auth
def get_fake(identifier: str):
if (not authorized(identifier, jwt, ['view'])):
return (jsonify(message='failed'), HTTPStatus.METHOD_NOT_ALLOWED)
return (jsonify(message='success'), HTTPStatus.OK)
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
rv = app_request.test_client().get(f'/fake_jwt_route/{identifier}', headers=headers)
assert (rv.status_code == expected) |
@integration_authorization
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected', TEST_INTEG_AUTHZ_DATA)
def test_authorized_user_integ(monkeypatch, app, jwt, test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
'Assert that the type of user authorization is correct, based on the expected outcome.'
import flask
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
if (expected == HTTPStatus.OK):
assert rv
else:
assert (not rv) | -1,942,846,359,137,083,600 | Assert that the type of user authorization is correct, based on the expected outcome. | legal-api/tests/unit/services/test_authorization.py | test_authorized_user_integ | leksmall/lear | python | @integration_authorization
@pytest.mark.parametrize('test_name,identifier,username,roles,allowed_actions,requested_actions,expected', TEST_INTEG_AUTHZ_DATA)
def test_authorized_user_integ(monkeypatch, app, jwt, test_name, identifier, username, roles, allowed_actions, requested_actions, expected):
import flask
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
if (expected == HTTPStatus.OK):
assert rv
else:
assert (not rv) |
def test_authorized_missing_args():
'Assert that the missing args return False.'
identifier = 'a corp'
jwt = 'fake'
action = 'fake'
rv = authorized(identifier, jwt, None)
assert (not rv)
rv = authorized(identifier, None, action)
assert (not rv)
rv = authorized(None, jwt, action)
assert (not rv) | -6,687,189,366,203,435,000 | Assert that the missing args return False. | legal-api/tests/unit/services/test_authorization.py | test_authorized_missing_args | leksmall/lear | python | def test_authorized_missing_args():
identifier = 'a corp'
jwt = 'fake'
action = 'fake'
rv = authorized(identifier, jwt, None)
assert (not rv)
rv = authorized(identifier, None, action)
assert (not rv)
rv = authorized(None, jwt, action)
assert (not rv) |
def test_authorized_bad_url(monkeypatch, app, jwt):
'Assert that an invalid auth service URL returns False.'
import flask
identifier = 'CP1234567'
username = 'username'
roles = [BASIC_USER]
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
auth_svc_url = app.config['AUTH_SVC_URL']
app.config['AUTH_SVC_URL'] = 'http://no.way.this.works/dribble'
rv = authorized(identifier, jwt, ['view'])
app.config['AUTH_SVC_URL'] = auth_svc_url
assert (not rv) | 2,734,986,831,551,713,000 | Assert that an invalid auth service URL returns False. | legal-api/tests/unit/services/test_authorization.py | test_authorized_bad_url | leksmall/lear | python | def test_authorized_bad_url(monkeypatch, app, jwt):
import flask
identifier = 'CP1234567'
username = 'username'
roles = [BASIC_USER]
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
auth_svc_url = app.config['AUTH_SVC_URL']
app.config['AUTH_SVC_URL'] = 'http://no.way.this.works/dribble'
rv = authorized(identifier, jwt, ['view'])
app.config['AUTH_SVC_URL'] = auth_svc_url
assert (not rv) |
def test_authorized_invalid_roles(monkeypatch, app, jwt):
'Assert that an invalid role returns False.'
import flask
identifier = 'CP1234567'
username = 'username'
roles = ['NONE']
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
assert (not rv) | 7,091,088,088,241,768,000 | Assert that an invalid role returns False. | legal-api/tests/unit/services/test_authorization.py | test_authorized_invalid_roles | leksmall/lear | python | def test_authorized_invalid_roles(monkeypatch, app, jwt):
import flask
identifier = 'CP1234567'
username = 'username'
roles = ['NONE']
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers['Authorization']
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
rv = authorized(identifier, jwt, ['view'])
assert (not rv) |
@pytest.mark.parametrize('test_name,state,legal_type,username,roles,expected', [('staff_active_cp', Business.State.ACTIVE, 'CP', 'staff', [STAFF_ROLE], ['annualReport', 'changeOfAddress', 'changeOfDirectors', 'correction', 'courtOrder', 'dissolution', 'incorporationApplication', 'specialResolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_bc', Business.State.ACTIVE, 'BC', 'staff', [STAFF_ROLE], ['alteration', 'courtOrder', 'dissolution', 'incorporationApplication', 'transition', 'registrarsNotation', 'registrarsOrder']), ('staff_active_ben', Business.State.ACTIVE, 'BEN', 'staff', [STAFF_ROLE], ['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'conversion', 'correction', 'courtOrder', 'dissolution', 'incorporationApplication', 'transition', 'registrarsNotation', 'registrarsOrder']), ('staff_active_cc', Business.State.ACTIVE, 'CC', 'staff', [STAFF_ROLE], ['courtOrder', 'dissolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_ulc', Business.State.ACTIVE, 'ULC', 'staff', [STAFF_ROLE], ['alteration', 'courtOrder', 'dissolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_llc', Business.State.ACTIVE, 'LLC', 'staff', [STAFF_ROLE], ['courtOrder', 'dissolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_sp', Business.State.ACTIVE, 'SP', 'staff', [STAFF_ROLE], ['changeOfRegistration', 'conversion', 'dissolution', 'registration']), ('staff_active_gp', Business.State.ACTIVE, 'GP', 'staff', [STAFF_ROLE], ['changeOfRegistration', 'conversion', 'dissolution', 'registration']), ('user_active_cp', Business.State.ACTIVE, 'CP', 'user', [BASIC_USER], ['annualReport', 'changeOfAddress', 'changeOfDirectors', 'dissolution', 'incorporationApplication', 'specialResolution']), ('user_active_bc', Business.State.ACTIVE, 'BC', 'user', [BASIC_USER], ['alteration', 'dissolution', 'incorporationApplication', 'transition']), ('user_active_ben', Business.State.ACTIVE, 'BEN', 'user', [BASIC_USER], ['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'dissolution', 'incorporationApplication', 'transition']), ('user_active_cc', Business.State.ACTIVE, 'CC', 'user', [BASIC_USER], ['dissolution']), ('user_active_ulc', Business.State.ACTIVE, 'ULC', 'user', [BASIC_USER], ['alteration', 'dissolution']), ('user_active_llc', Business.State.ACTIVE, 'LLC', 'user', [BASIC_USER], ['dissolution']), ('user_active_sp', Business.State.ACTIVE, 'SP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']), ('user_active_gp', Business.State.ACTIVE, 'GP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']), ('staff_historical_cp', Business.State.HISTORICAL, 'CP', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration']}]), ('staff_historical_bc', Business.State.HISTORICAL, 'BC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_ben', Business.State.HISTORICAL, 'BEN', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_cc', Business.State.HISTORICAL, 'CC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_ulc', Business.State.HISTORICAL, 'ULC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_llc', Business.State.HISTORICAL, 'LLC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('user_historical_llc', Business.State.HISTORICAL, 'LLC', 'user', [BASIC_USER], [])])
def test_get_allowed(monkeypatch, app, jwt, test_name, state, legal_type, username, roles, expected):
'Assert that get allowed returns valid filings.'
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
filing_types = get_allowed(state, legal_type, jwt)
assert (filing_types == expected) | 8,420,474,170,027,016,000 | Assert that get allowed returns valid filings. | legal-api/tests/unit/services/test_authorization.py | test_get_allowed | leksmall/lear | python | @pytest.mark.parametrize('test_name,state,legal_type,username,roles,expected', [('staff_active_cp', Business.State.ACTIVE, 'CP', 'staff', [STAFF_ROLE], ['annualReport', 'changeOfAddress', 'changeOfDirectors', 'correction', 'courtOrder', 'dissolution', 'incorporationApplication', 'specialResolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_bc', Business.State.ACTIVE, 'BC', 'staff', [STAFF_ROLE], ['alteration', 'courtOrder', 'dissolution', 'incorporationApplication', 'transition', 'registrarsNotation', 'registrarsOrder']), ('staff_active_ben', Business.State.ACTIVE, 'BEN', 'staff', [STAFF_ROLE], ['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'conversion', 'correction', 'courtOrder', 'dissolution', 'incorporationApplication', 'transition', 'registrarsNotation', 'registrarsOrder']), ('staff_active_cc', Business.State.ACTIVE, 'CC', 'staff', [STAFF_ROLE], ['courtOrder', 'dissolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_ulc', Business.State.ACTIVE, 'ULC', 'staff', [STAFF_ROLE], ['alteration', 'courtOrder', 'dissolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_llc', Business.State.ACTIVE, 'LLC', 'staff', [STAFF_ROLE], ['courtOrder', 'dissolution', 'registrarsNotation', 'registrarsOrder']), ('staff_active_sp', Business.State.ACTIVE, 'SP', 'staff', [STAFF_ROLE], ['changeOfRegistration', 'conversion', 'dissolution', 'registration']), ('staff_active_gp', Business.State.ACTIVE, 'GP', 'staff', [STAFF_ROLE], ['changeOfRegistration', 'conversion', 'dissolution', 'registration']), ('user_active_cp', Business.State.ACTIVE, 'CP', 'user', [BASIC_USER], ['annualReport', 'changeOfAddress', 'changeOfDirectors', 'dissolution', 'incorporationApplication', 'specialResolution']), ('user_active_bc', Business.State.ACTIVE, 'BC', 'user', [BASIC_USER], ['alteration', 'dissolution', 'incorporationApplication', 'transition']), ('user_active_ben', Business.State.ACTIVE, 'BEN', 'user', [BASIC_USER], ['alteration', 'annualReport', 'changeOfAddress', 'changeOfDirectors', 'dissolution', 'incorporationApplication', 'transition']), ('user_active_cc', Business.State.ACTIVE, 'CC', 'user', [BASIC_USER], ['dissolution']), ('user_active_ulc', Business.State.ACTIVE, 'ULC', 'user', [BASIC_USER], ['alteration', 'dissolution']), ('user_active_llc', Business.State.ACTIVE, 'LLC', 'user', [BASIC_USER], ['dissolution']), ('user_active_sp', Business.State.ACTIVE, 'SP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']), ('user_active_gp', Business.State.ACTIVE, 'GP', 'user', [BASIC_USER], ['changeOfRegistration', 'dissolution', 'registration']), ('staff_historical_cp', Business.State.HISTORICAL, 'CP', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration']}]), ('staff_historical_bc', Business.State.HISTORICAL, 'BC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_ben', Business.State.HISTORICAL, 'BEN', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_cc', Business.State.HISTORICAL, 'CC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_ulc', Business.State.HISTORICAL, 'ULC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('staff_historical_llc', Business.State.HISTORICAL, 'LLC', 'staff', [STAFF_ROLE], ['courtOrder', 'registrarsNotation', 'registrarsOrder', {'restoration': ['fullRestoration', 'limitedRestoration']}]), ('user_historical_llc', Business.State.HISTORICAL, 'LLC', 'user', [BASIC_USER], [])])
def test_get_allowed(monkeypatch, app, jwt, test_name, state, legal_type, username, roles, expected):
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
filing_types = get_allowed(state, legal_type, jwt)
assert (filing_types == expected) |
@pytest.mark.parametrize('test_name,state,filing_type,sub_filing_type,legal_types,username,roles,expected', [('staff_active_allowed', Business.State.ACTIVE, 'alteration', None, ['BC', 'BEN', 'ULC'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'alteration', None, ['CP', 'CC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'annualReport', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'changeOfAddress', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'changeOfDirectors', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'correction', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'correction', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None, ['CP', 'BC', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'specialResolution', None, ['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'transition', None, ['CP', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'registration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], True), ('user_active_allowed', Business.State.ACTIVE, 'alteration', None, ['BC', 'BEN', 'ULC'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'alteration', None, ['CP', 'CC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'annualReport', None, ['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None, ['CP', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'changeOfAddress', None, ['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None, ['CP', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'changeOfDirectors', None, ['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'correction', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'user', [BASIC_USER], True), ('user_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None, ['CP', 'BC', 'BEN'], 'user', [BASIC_USER], True), ('user_active_allowed', Business.State.ACTIVE, 'registration', None, ['SP', 'GP'], 'user', [BASIC_USER], True), ('user_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None, ['SP', 'GP'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'specialResolution', None, ['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'transition', None, ['CP', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('staff_historical', Business.State.HISTORICAL, 'alteration', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'annualReport', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'changeOfAddress', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'changeOfDirectors', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'correction', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical_allowed', Business.State.HISTORICAL, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical', Business.State.HISTORICAL, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'incorporationApplication', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'limitedRestoration', ['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration', ['CP'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'specialResolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'transition', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical', Business.State.HISTORICAL, 'registration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'changeOfRegistration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], False), ('user_historical', Business.State.HISTORICAL, 'alteration', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'annualReport', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'changeOfAddress', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'changeOfDirectors', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'correction', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP', 'SP', 'GP'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'incorporationApplication', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'specialResolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'transition', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'registration', None, ['SP', 'GP'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'changeOfRegistration', None, ['SP', 'GP'], 'user', [BASIC_USER], False)])
def test_is_allowed(monkeypatch, app, jwt, test_name, state, filing_type, sub_filing_type, legal_types, username, roles, expected):
'Assert that get allowed returns valid filings.'
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
for legal_type in legal_types:
filing_types = is_allowed(state, filing_type, legal_type, jwt, sub_filing_type)
assert (filing_types == expected) | -2,506,679,965,252,201,000 | Assert that get allowed returns valid filings. | legal-api/tests/unit/services/test_authorization.py | test_is_allowed | leksmall/lear | python | @pytest.mark.parametrize('test_name,state,filing_type,sub_filing_type,legal_types,username,roles,expected', [('staff_active_allowed', Business.State.ACTIVE, 'alteration', None, ['BC', 'BEN', 'ULC'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'alteration', None, ['CP', 'CC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'annualReport', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'changeOfAddress', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'changeOfDirectors', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'correction', None, ['CP', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'correction', None, ['BC', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None, ['CP', 'BC', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'specialResolution', None, ['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'staff', [STAFF_ROLE], True), ('staff_active', Business.State.ACTIVE, 'transition', None, ['CP', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_active_allowed', Business.State.ACTIVE, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'registration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], True), ('staff_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], True), ('user_active_allowed', Business.State.ACTIVE, 'alteration', None, ['BC', 'BEN', 'ULC'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'alteration', None, ['CP', 'CC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'annualReport', None, ['CP', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'annualReport', None, ['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'changeOfAddress', None, ['CP', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'changeOfAddress', None, ['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'changeOfDirectors', None, ['CP', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'changeOfDirectors', None, ['BC', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'correction', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'user', [BASIC_USER], True), ('user_active_allowed', Business.State.ACTIVE, 'incorporationApplication', None, ['CP', 'BC', 'BEN'], 'user', [BASIC_USER], True), ('user_active_allowed', Business.State.ACTIVE, 'registration', None, ['SP', 'GP'], 'user', [BASIC_USER], True), ('user_active_allowed', Business.State.ACTIVE, 'changeOfRegistration', None, ['SP', 'GP'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'restoration', 'limitedRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'specialResolution', None, ['CP'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'specialResolution', None, ['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active_allowed', Business.State.ACTIVE, 'transition', None, ['BC', 'BEN'], 'user', [BASIC_USER], True), ('user_active', Business.State.ACTIVE, 'transition', None, ['CP', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_active', Business.State.ACTIVE, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('staff_historical', Business.State.HISTORICAL, 'alteration', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'annualReport', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'changeOfAddress', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'changeOfDirectors', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'correction', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical_allowed', Business.State.HISTORICAL, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical', Business.State.HISTORICAL, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'incorporationApplication', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical_allowed', Business.State.HISTORICAL, 'restoration', 'limitedRestoration', ['BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration', ['CP'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'specialResolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'transition', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], False), ('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical_allowed', Business.State.HISTORICAL, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'staff', [STAFF_ROLE], True), ('staff_historical', Business.State.HISTORICAL, 'registration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], False), ('staff_historical', Business.State.HISTORICAL, 'changeOfRegistration', None, ['SP', 'GP'], 'staff', [STAFF_ROLE], False), ('user_historical', Business.State.HISTORICAL, 'alteration', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'annualReport', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'changeOfAddress', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'changeOfDirectors', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'correction', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'courtOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'dissolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC', 'SP', 'GP', 'SP', 'GP'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'incorporationApplication', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'restoration', 'fullRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'restoration', 'limitedRestoration', ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'specialResolution', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'transition', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'registrarsNotation', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'registrarsOrder', None, ['CP', 'BC', 'BEN', 'CC', 'ULC', 'LLC'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'registration', None, ['SP', 'GP'], 'user', [BASIC_USER], False), ('user_historical', Business.State.HISTORICAL, 'changeOfRegistration', None, ['SP', 'GP'], 'user', [BASIC_USER], False)])
def test_is_allowed(monkeypatch, app, jwt, test_name, state, filing_type, sub_filing_type, legal_types, username, roles, expected):
token = helper_create_jwt(jwt, roles=roles, username=username)
headers = {'Authorization': ('Bearer ' + token)}
def mock_auth(one, two):
return headers[one]
with app.test_request_context():
monkeypatch.setattr('flask.request.headers.get', mock_auth)
for legal_type in legal_types:
filing_types = is_allowed(state, filing_type, legal_type, jwt, sub_filing_type)
assert (filing_types == expected) |
@classmethod
def create(cls, surface, features):
'\n Create instance of MeCabServiceNode\n\n Parameters\n ----------\n surface : str\n Surface of the word\n features : dict\n Features analyzed by MeCabService\n '
return cls(surface=surface, part=features['part'], part_detail1=features['part_detail1'], part_detail2=features['part_detail2'], part_detail3=features['part_detail3'], stem_type=features['stem_type'], stem_form=features['stem_form'], word=features['word'], kana=features['kana'], pronunciation=features['pronunciation']) | 508,100,562,846,045,630 | Create instance of MeCabServiceNode
Parameters
----------
surface : str
Surface of the word
features : dict
Features analyzed by MeCabService | minette/tagger/mecabservice.py | create | uezo/minette-python | python | @classmethod
def create(cls, surface, features):
'\n Create instance of MeCabServiceNode\n\n Parameters\n ----------\n surface : str\n Surface of the word\n features : dict\n Features analyzed by MeCabService\n '
return cls(surface=surface, part=features['part'], part_detail1=features['part_detail1'], part_detail2=features['part_detail2'], part_detail3=features['part_detail3'], stem_type=features['stem_type'], stem_form=features['stem_form'], word=features['word'], kana=features['kana'], pronunciation=features['pronunciation']) |
def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs):
'\n Parameters\n ----------\n config : Config, default None\n Configuration\n timezone : timezone, default None\n Timezone\n logger : Logger, default None\n Logger\n api_url : str, default None\n URL for MeCabService API.\n If None trial URL is used.\n '
super().__init__(config=config, timezone=timezone, logger=logger)
if (not api_url):
self.api_url = 'https://api.uezo.net/mecab/parse'
self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')
else:
self.api_url = api_url | -7,093,479,140,288,094,000 | Parameters
----------
config : Config, default None
Configuration
timezone : timezone, default None
Timezone
logger : Logger, default None
Logger
api_url : str, default None
URL for MeCabService API.
If None trial URL is used. | minette/tagger/mecabservice.py | __init__ | uezo/minette-python | python | def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs):
'\n Parameters\n ----------\n config : Config, default None\n Configuration\n timezone : timezone, default None\n Timezone\n logger : Logger, default None\n Logger\n api_url : str, default None\n URL for MeCabService API.\n If None trial URL is used.\n '
super().__init__(config=config, timezone=timezone, logger=logger)
if (not api_url):
self.api_url = 'https://api.uezo.net/mecab/parse'
self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')
else:
self.api_url = api_url |
def parse(self, text):
'\n Parse and annotate using MeCab Service\n\n Parameters\n ----------\n text : str\n Text to analyze\n\n Returns\n -------\n words : list of minette.MeCabServiceNode\n MeCabService nodes\n '
ret = []
if (not text):
return ret
try:
parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()
ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]
except Exception as ex:
self.logger.error(((('MeCab Service parsing error: ' + str(ex)) + '\n') + traceback.format_exc()))
return ret | 356,939,620,011,587,650 | Parse and annotate using MeCab Service
Parameters
----------
text : str
Text to analyze
Returns
-------
words : list of minette.MeCabServiceNode
MeCabService nodes | minette/tagger/mecabservice.py | parse | uezo/minette-python | python | def parse(self, text):
'\n Parse and annotate using MeCab Service\n\n Parameters\n ----------\n text : str\n Text to analyze\n\n Returns\n -------\n words : list of minette.MeCabServiceNode\n MeCabService nodes\n '
ret = []
if (not text):
return ret
try:
parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()
ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]
except Exception as ex:
self.logger.error(((('MeCab Service parsing error: ' + str(ex)) + '\n') + traceback.format_exc()))
return ret |
def get_csv_fieldnames(self):
'Return the field names for the CSV file.'
return ['image_name', 'x_min', 'y_min', 'width', 'height', 'label'] | -1,880,100,204,781,328,600 | Return the field names for the CSV file. | src/discolight/writers/annotation/widthheightcsv.py | get_csv_fieldnames | arunraja-hub/discolight | python | def get_csv_fieldnames(self):
return ['image_name', 'x_min', 'y_min', 'width', 'height', 'label'] |
def get_csv_row(self, image_name, _image, annotation):
'Return the CSV row corresponding to the given annotation.'
return {'image_name': image_name, 'x_min': annotation.x_min, 'y_min': annotation.y_min, 'width': (annotation.x_max - annotation.x_min), 'height': (annotation.y_max - annotation.y_min), 'label': annotation.class_idx} | 1,451,875,289,880,012,500 | Return the CSV row corresponding to the given annotation. | src/discolight/writers/annotation/widthheightcsv.py | get_csv_row | arunraja-hub/discolight | python | def get_csv_row(self, image_name, _image, annotation):
return {'image_name': image_name, 'x_min': annotation.x_min, 'y_min': annotation.y_min, 'width': (annotation.x_max - annotation.x_min), 'height': (annotation.y_max - annotation.y_min), 'label': annotation.class_idx} |
def _wait_for_api_vip(self, hosts, timeout=180):
"Enable some grace time for waiting for API's availability."
return waiting.wait((lambda : self.get_kube_api_ip(hosts=hosts)), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP") | -2,047,560,959,314,300,700 | Enable some grace time for waiting for API's availability. | discovery-infra/test_infra/helper_classes/cluster.py | _wait_for_api_vip | empovit/assisted-test-infra | python | def _wait_for_api_vip(self, hosts, timeout=180):
return waiting.wait((lambda : self.get_kube_api_ip(hosts=hosts)), timeout_seconds=timeout, sleep_seconds=5, waiting_for="API's IP") |
@staticmethod
def is_kubeapi_service_ready(ip_or_dns):
'Validate if kube-api is ready on given address.'
with contextlib.suppress(ValueError):
if (ipaddress.ip_address(ip_or_dns).version == 6):
ip_or_dns = f'[{ip_or_dns}]'
try:
response = requests.get(f'https://{ip_or_dns}:6443/readyz', verify=False, timeout=1)
return response.ok
except BaseException:
return False | 1,817,083,930,350,911,200 | Validate if kube-api is ready on given address. | discovery-infra/test_infra/helper_classes/cluster.py | is_kubeapi_service_ready | empovit/assisted-test-infra | python | @staticmethod
def is_kubeapi_service_ready(ip_or_dns):
with contextlib.suppress(ValueError):
if (ipaddress.ip_address(ip_or_dns).version == 6):
ip_or_dns = f'[{ip_or_dns}]'
try:
response = requests.get(f'https://{ip_or_dns}:6443/readyz', verify=False, timeout=1)
return response.ok
except BaseException:
return False |
def transforms(item, cfg, mode):
'\n :param item: sample = deepcopy(self.items[index])\n :param cfg: cfg\n :return:\n\n eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.\n '
transforms_dataset_factory = {'train': cfg.dataset.train, 'test': cfg.dataset.test}
if (transforms_dataset_factory[mode].before_to_tensor_transform_list is not None):
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if (transforms_dataset_factory[mode].after_to_tensor_transform_list is not None):
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item | 541,067,101,429,177,800 | :param item: sample = deepcopy(self.items[index])
:param cfg: cfg
:return:
eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn. | MDRSREID/utils/data_utils/transforms/torch_transforms/__init__.py | transforms | nickhuang1996/HJL-re-id | python | def transforms(item, cfg, mode):
'\n :param item: sample = deepcopy(self.items[index])\n :param cfg: cfg\n :return:\n\n eval() transform str to list, dict, tuple. Here is a series of the transform methods in turn.\n '
transforms_dataset_factory = {'train': cfg.dataset.train, 'test': cfg.dataset.test}
if (transforms_dataset_factory[mode].before_to_tensor_transform_list is not None):
for t in transforms_dataset_factory[mode].before_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
item = to_tensor(item, cfg)
if (transforms_dataset_factory[mode].after_to_tensor_transform_list is not None):
for t in transforms_dataset_factory[mode].after_to_tensor_transform_list:
item = eval('{}(item, cfg)'.format(t))
return item |
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) | -798,009,169,856,366,800 | 3x3 convolution with padding | arcface/resnet_cbam.py | conv3x3 | DerryHub/the-TaobaoLive-Commodity-Identify-Competition | python | def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) |
def load_image_from_file(self, filename, shape):
'Given a filename, try to open the file. If failed, return None.\n\n Args:\n filename: location of the image file\n shape: the shape of the image file to be scaled\n\n Returns:\n the image if succeeds, None if fails.\n\n Rasies:\n exception if the image was not the right shape.\n '
if (not tf.gfile.Exists(filename)):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).convert('RGB').resize(shape, PIL.Image.BILINEAR))
img = (np.float32(img) / 255.0)
if (not ((len(img.shape) == 3) and (img.shape[2] == 3))):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img | -2,819,391,022,505,540,600 | Given a filename, try to open the file. If failed, return None.
Args:
filename: location of the image file
shape: the shape of the image file to be scaled
Returns:
the image if succeeds, None if fails.
Rasies:
exception if the image was not the right shape. | activation_generator.py | load_image_from_file | Gareth001/tcav | python | def load_image_from_file(self, filename, shape):
'Given a filename, try to open the file. If failed, return None.\n\n Args:\n filename: location of the image file\n shape: the shape of the image file to be scaled\n\n Returns:\n the image if succeeds, None if fails.\n\n Rasies:\n exception if the image was not the right shape.\n '
if (not tf.gfile.Exists(filename)):
tf.logging.error('Cannot find file: {}'.format(filename))
return None
try:
img = np.array(PIL.Image.open(tf.gfile.Open(filename, 'rb')).convert('RGB').resize(shape, PIL.Image.BILINEAR))
img = (np.float32(img) / 255.0)
if (not ((len(img.shape) == 3) and (img.shape[2] == 3))):
return None
else:
return img
except Exception as e:
tf.logging.info(e)
return None
return img |
def load_images_from_files(self, filenames, max_imgs=500, do_shuffle=True, run_parallel=True, shape=(299, 299), num_workers=100):
'Return image arrays from filenames.\n\n Args:\n filenames: locations of image files.\n max_imgs: maximum number of images from filenames.\n do_shuffle: before getting max_imgs files, shuffle the names or not\n run_parallel: get images in parallel or not\n shape: desired shape of the image\n num_workers: number of workers in parallelization.\n\n Returns:\n image arrays\n\n '
imgs = []
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map((lambda filename: self.load_image_from_file(filename, shape)), filenames[:max_imgs])
imgs = [img for img in imgs if (img is not None)]
if (len(imgs) <= 1):
raise ValueError('You must have more than 1 image in each class to run TCAV.')
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if (img is not None):
imgs.append(img)
if (len(imgs) <= 1):
raise ValueError('You must have more than 1 image in each class to run TCAV.')
elif (len(imgs) >= max_imgs):
break
return np.array(imgs) | -2,996,030,287,084,294,700 | Return image arrays from filenames.
Args:
filenames: locations of image files.
max_imgs: maximum number of images from filenames.
do_shuffle: before getting max_imgs files, shuffle the names or not
run_parallel: get images in parallel or not
shape: desired shape of the image
num_workers: number of workers in parallelization.
Returns:
image arrays | activation_generator.py | load_images_from_files | Gareth001/tcav | python | def load_images_from_files(self, filenames, max_imgs=500, do_shuffle=True, run_parallel=True, shape=(299, 299), num_workers=100):
'Return image arrays from filenames.\n\n Args:\n filenames: locations of image files.\n max_imgs: maximum number of images from filenames.\n do_shuffle: before getting max_imgs files, shuffle the names or not\n run_parallel: get images in parallel or not\n shape: desired shape of the image\n num_workers: number of workers in parallelization.\n\n Returns:\n image arrays\n\n '
imgs = []
filenames = filenames[:]
if do_shuffle:
np.random.shuffle(filenames)
if run_parallel:
pool = multiprocessing.Pool(num_workers)
imgs = pool.map((lambda filename: self.load_image_from_file(filename, shape)), filenames[:max_imgs])
imgs = [img for img in imgs if (img is not None)]
if (len(imgs) <= 1):
raise ValueError('You must have more than 1 image in each class to run TCAV.')
else:
for filename in filenames:
img = self.load_image_from_file(filename, shape)
if (img is not None):
imgs.append(img)
if (len(imgs) <= 1):
raise ValueError('You must have more than 1 image in each class to run TCAV.')
elif (len(imgs) >= max_imgs):
break
return np.array(imgs) |
def _check_vars():
'\n Check validity of environment variables.\n\n Look out for any environment variables that start with "MODIN_" prefix\n that are unknown - they might be a typo, so warn a user.\n '
valid_names = {obj.varname for obj in globals().values() if (isinstance(obj, type) and issubclass(obj, EnvironmentVariable) and (not obj.is_abstract))}
found_names = {name for name in os.environ if name.startswith('MODIN_')}
unknown = (found_names - valid_names)
if unknown:
warnings.warn((f"Found unknown environment variable{('s' if (len(unknown) > 1) else '')}, please check {('their' if (len(unknown) > 1) else 'its')} spelling: " + ', '.join(sorted(unknown)))) | 2,108,388,337,274,259,200 | Check validity of environment variables.
Look out for any environment variables that start with "MODIN_" prefix
that are unknown - they might be a typo, so warn a user. | modin/config/envvars.py | _check_vars | atomicai/modin | python | def _check_vars():
'\n Check validity of environment variables.\n\n Look out for any environment variables that start with "MODIN_" prefix\n that are unknown - they might be a typo, so warn a user.\n '
valid_names = {obj.varname for obj in globals().values() if (isinstance(obj, type) and issubclass(obj, EnvironmentVariable) and (not obj.is_abstract))}
found_names = {name for name in os.environ if name.startswith('MODIN_')}
unknown = (found_names - valid_names)
if unknown:
warnings.warn((f"Found unknown environment variable{('s' if (len(unknown) > 1) else )}, please check {('their' if (len(unknown) > 1) else 'its')} spelling: " + ', '.join(sorted(unknown)))) |
@classmethod
def _get_raw_from_config(cls) -> str:
'\n Read the value from environment variable.\n\n Returns\n -------\n str\n Config raw value.\n\n Raises\n ------\n KeyError\n If value is absent.\n '
return os.environ[cls.varname] | -6,251,456,654,797,590,000 | Read the value from environment variable.
Returns
-------
str
Config raw value.
Raises
------
KeyError
If value is absent. | modin/config/envvars.py | _get_raw_from_config | atomicai/modin | python | @classmethod
def _get_raw_from_config(cls) -> str:
'\n Read the value from environment variable.\n\n Returns\n -------\n str\n Config raw value.\n\n Raises\n ------\n KeyError\n If value is absent.\n '
return os.environ[cls.varname] |
@classmethod
def get_help(cls) -> str:
'\n Generate user-presentable help for the config.\n\n Returns\n -------\n str\n '
help = f'''{cls.varname}: {dedent((cls.__doc__ or 'Unknown')).strip()}
Provide {_TYPE_PARAMS[cls.type].help}'''
if cls.choices:
help += f" (valid examples are: {', '.join((str(c) for c in cls.choices))})"
return help | -6,810,046,508,520,344,000 | Generate user-presentable help for the config.
Returns
-------
str | modin/config/envvars.py | get_help | atomicai/modin | python | @classmethod
def get_help(cls) -> str:
'\n Generate user-presentable help for the config.\n\n Returns\n -------\n str\n '
help = f'{cls.varname}: {dedent((cls.__doc__ or 'Unknown')).strip()}
Provide {_TYPE_PARAMS[cls.type].help}'
if cls.choices:
help += f" (valid examples are: {', '.join((str(c) for c in cls.choices))})"
return help |
@classmethod
def _get_default(cls):
'\n Get default value of the config.\n\n Returns\n -------\n str\n '
if IsDebug.get():
return 'Python'
try:
import ray
except ImportError:
pass
else:
if (version.parse(ray.__version__) < version.parse('1.4.0')):
raise ImportError('Please `pip install modin[ray]` to install compatible Ray version.')
return 'Ray'
try:
import dask
import distributed
except ImportError:
pass
else:
if ((version.parse(dask.__version__) < version.parse('2.22.0')) or (version.parse(distributed.__version__) < version.parse('2.22.0'))):
raise ImportError('Please `pip install modin[dask]` to install compatible Dask version.')
return 'Dask'
try:
import omniscidbe
except ImportError:
try:
import dbe
except ImportError:
pass
else:
return 'Native'
else:
return 'Native'
raise ImportError('Please refer to installation documentation page to install an engine') | 6,933,771,059,849,099,000 | Get default value of the config.
Returns
-------
str | modin/config/envvars.py | _get_default | atomicai/modin | python | @classmethod
def _get_default(cls):
'\n Get default value of the config.\n\n Returns\n -------\n str\n '
if IsDebug.get():
return 'Python'
try:
import ray
except ImportError:
pass
else:
if (version.parse(ray.__version__) < version.parse('1.4.0')):
raise ImportError('Please `pip install modin[ray]` to install compatible Ray version.')
return 'Ray'
try:
import dask
import distributed
except ImportError:
pass
else:
if ((version.parse(dask.__version__) < version.parse('2.22.0')) or (version.parse(distributed.__version__) < version.parse('2.22.0'))):
raise ImportError('Please `pip install modin[dask]` to install compatible Dask version.')
return 'Dask'
try:
import omniscidbe
except ImportError:
try:
import dbe
except ImportError:
pass
else:
return 'Native'
else:
return 'Native'
raise ImportError('Please refer to installation documentation page to install an engine') |
@classmethod
def _get_default(cls):
'\n Get default value of the config.\n\n Returns\n -------\n int\n '
import multiprocessing
return multiprocessing.cpu_count() | 173,000,487,574,057,300 | Get default value of the config.
Returns
-------
int | modin/config/envvars.py | _get_default | atomicai/modin | python | @classmethod
def _get_default(cls):
'\n Get default value of the config.\n\n Returns\n -------\n int\n '
import multiprocessing
return multiprocessing.cpu_count() |
@classmethod
def _put(cls, value):
"\n Put specific value if NPartitions wasn't set by a user yet.\n\n Parameters\n ----------\n value : int\n Config value to set.\n\n Notes\n -----\n This method is used to set NPartitions from cluster resources internally\n and should not be called by a user.\n "
if (cls.get_value_source() == ValueSource.DEFAULT):
cls.put(value) | 2,939,723,670,533,166,600 | Put specific value if NPartitions wasn't set by a user yet.
Parameters
----------
value : int
Config value to set.
Notes
-----
This method is used to set NPartitions from cluster resources internally
and should not be called by a user. | modin/config/envvars.py | _put | atomicai/modin | python | @classmethod
def _put(cls, value):
"\n Put specific value if NPartitions wasn't set by a user yet.\n\n Parameters\n ----------\n value : int\n Config value to set.\n\n Notes\n -----\n This method is used to set NPartitions from cluster resources internally\n and should not be called by a user.\n "
if (cls.get_value_source() == ValueSource.DEFAULT):
cls.put(value) |
@classmethod
def _get_default(cls):
'\n Get default value of the config.\n\n Returns\n -------\n int\n '
if (Backend.get() == 'Cudf'):
return GpuCount.get()
else:
return CpuCount.get() | 888,611,286,913,522,700 | Get default value of the config.
Returns
-------
int | modin/config/envvars.py | _get_default | atomicai/modin | python | @classmethod
def _get_default(cls):
'\n Get default value of the config.\n\n Returns\n -------\n int\n '
if (Backend.get() == 'Cudf'):
return GpuCount.get()
else:
return CpuCount.get() |
@classmethod
def enable(cls):
'Enable ``ProgressBar`` feature.'
cls.put(True) | 1,718,077,012,427,152,000 | Enable ``ProgressBar`` feature. | modin/config/envvars.py | enable | atomicai/modin | python | @classmethod
def enable(cls):
cls.put(True) |
@classmethod
def disable(cls):
'Disable ``ProgressBar`` feature.'
cls.put(False) | -3,469,939,319,755,450,000 | Disable ``ProgressBar`` feature. | modin/config/envvars.py | disable | atomicai/modin | python | @classmethod
def disable(cls):
cls.put(False) |
@classmethod
def put(cls, value):
'\n Set ``ProgressBar`` value only if synchronous benchmarking is disabled.\n\n Parameters\n ----------\n value : bool\n Config value to set.\n '
if (value and BenchmarkMode.get()):
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value) | 8,472,071,341,530,053,000 | Set ``ProgressBar`` value only if synchronous benchmarking is disabled.
Parameters
----------
value : bool
Config value to set. | modin/config/envvars.py | put | atomicai/modin | python | @classmethod
def put(cls, value):
'\n Set ``ProgressBar`` value only if synchronous benchmarking is disabled.\n\n Parameters\n ----------\n value : bool\n Config value to set.\n '
if (value and BenchmarkMode.get()):
raise ValueError("ProgressBar isn't compatible with BenchmarkMode")
super().put(value) |
@classmethod
def put(cls, value):
'\n Set ``BenchmarkMode`` value only if progress bar feature is disabled.\n\n Parameters\n ----------\n value : bool\n Config value to set.\n '
if (value and ProgressBar.get()):
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value) | -6,901,263,747,557,031,000 | Set ``BenchmarkMode`` value only if progress bar feature is disabled.
Parameters
----------
value : bool
Config value to set. | modin/config/envvars.py | put | atomicai/modin | python | @classmethod
def put(cls, value):
'\n Set ``BenchmarkMode`` value only if progress bar feature is disabled.\n\n Parameters\n ----------\n value : bool\n Config value to set.\n '
if (value and ProgressBar.get()):
raise ValueError("BenchmarkMode isn't compatible with ProgressBar")
super().put(value) |
@classmethod
def get(self):
'\n Get the resulted command-line options.\n\n Decode and merge specified command-line options with the default one.\n\n Returns\n -------\n dict\n Decoded and verified config value.\n '
custom_parameters = super().get()
result = self.default.copy()
result.update({key.replace('-', '_'): value for (key, value) in custom_parameters.items()})
return result | 4,546,177,453,802,994,700 | Get the resulted command-line options.
Decode and merge specified command-line options with the default one.
Returns
-------
dict
Decoded and verified config value. | modin/config/envvars.py | get | atomicai/modin | python | @classmethod
def get(self):
'\n Get the resulted command-line options.\n\n Decode and merge specified command-line options with the default one.\n\n Returns\n -------\n dict\n Decoded and verified config value.\n '
custom_parameters = super().get()
result = self.default.copy()
result.update({key.replace('-', '_'): value for (key, value) in custom_parameters.items()})
return result |
@query_params('interval', 'system_api_version', 'system_id')
def bulk(self, body, doc_type=None, params=None, headers=None):
"\n Used by the monitoring features to send monitoring data.\n\n `<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_\n\n .. warning::\n\n This API is **experimental** so may include breaking changes\n or be removed in a future version\n\n :arg body: The operation definition and data (action-data\n pairs), separated by newlines\n :arg doc_type: Default document type for items which don't\n provide one\n :arg interval: Collection interval (e.g., '10s' or '10000ms') of\n the payload\n :arg system_api_version: API Version of the monitored system\n :arg system_id: Identifier of the monitored system\n "
if (body in SKIP_IN_PATH):
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request('POST', _make_path('_monitoring', doc_type, 'bulk'), params=params, headers=headers, body=body) | 5,742,135,621,379,876,000 | Used by the monitoring features to send monitoring data.
`<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
:arg body: The operation definition and data (action-data
pairs), separated by newlines
:arg doc_type: Default document type for items which don't
provide one
:arg interval: Collection interval (e.g., '10s' or '10000ms') of
the payload
:arg system_api_version: API Version of the monitored system
:arg system_id: Identifier of the monitored system | AB/lambda/elasticindex/elasticsearch/client/monitoring.py | bulk | PatrickJD/AWS | python | @query_params('interval', 'system_api_version', 'system_id')
def bulk(self, body, doc_type=None, params=None, headers=None):
"\n Used by the monitoring features to send monitoring data.\n\n `<https://www.elastic.co/guide/en/elasticsearch/reference/7.10/monitor-elasticsearch-cluster.html>`_\n\n .. warning::\n\n This API is **experimental** so may include breaking changes\n or be removed in a future version\n\n :arg body: The operation definition and data (action-data\n pairs), separated by newlines\n :arg doc_type: Default document type for items which don't\n provide one\n :arg interval: Collection interval (e.g., '10s' or '10000ms') of\n the payload\n :arg system_api_version: API Version of the monitored system\n :arg system_id: Identifier of the monitored system\n "
if (body in SKIP_IN_PATH):
raise ValueError("Empty value passed for a required argument 'body'.")
body = _bulk_body(self.transport.serializer, body)
return self.transport.perform_request('POST', _make_path('_monitoring', doc_type, 'bulk'), params=params, headers=headers, body=body) |
def __init__(self, *args, **kwargs):
'\n Create a new Polar Axes for a polar plot.\n\n The following optional kwargs are supported:\n\n - *resolution*: The number of points of interpolation between\n each pair of data points. Set to 1 to disable\n interpolation.\n '
self.resolution = kwargs.pop('resolution', None)
if (self.resolution not in (None, 1)):
warnings.warn('The resolution kwarg to Polar plots is now ignored.\nIf you need to interpolate data points, consider running\ncbook.simple_linear_interpolation on the data before passing to matplotlib.')
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla() | -3,830,649,146,554,316,000 | Create a new Polar Axes for a polar plot.
The following optional kwargs are supported:
- *resolution*: The number of points of interpolation between
each pair of data points. Set to 1 to disable
interpolation. | lib/python2.7/matplotlib/projections/polar.py | __init__ | ashley8jain/IITD-complaint-system-web | python | def __init__(self, *args, **kwargs):
'\n Create a new Polar Axes for a polar plot.\n\n The following optional kwargs are supported:\n\n - *resolution*: The number of points of interpolation between\n each pair of data points. Set to 1 to disable\n interpolation.\n '
self.resolution = kwargs.pop('resolution', None)
if (self.resolution not in (None, 1)):
warnings.warn('The resolution kwarg to Polar plots is now ignored.\nIf you need to interpolate data points, consider running\ncbook.simple_linear_interpolation on the data before passing to matplotlib.')
Axes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla() |
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale() | 755,314,804,009,636,000 | move this out of __init__ because non-separable axes don't use it | lib/python2.7/matplotlib/projections/polar.py | _init_axis | ashley8jain/IITD-complaint-system-web | python | def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale() |
def set_theta_offset(self, offset):
'\n Set the offset for the location of 0 in radians.\n '
self._theta_offset = offset | -24,000,136,632,674,060 | Set the offset for the location of 0 in radians. | lib/python2.7/matplotlib/projections/polar.py | set_theta_offset | ashley8jain/IITD-complaint-system-web | python | def set_theta_offset(self, offset):
'\n \n '
self._theta_offset = offset |
def get_theta_offset(self):
'\n Get the offset for the location of 0 in radians.\n '
return self._theta_offset | -2,351,204,604,870,762,000 | Get the offset for the location of 0 in radians. | lib/python2.7/matplotlib/projections/polar.py | get_theta_offset | ashley8jain/IITD-complaint-system-web | python | def get_theta_offset(self):
'\n \n '
return self._theta_offset |
def set_theta_zero_location(self, loc):
'\n Sets the location of theta\'s zero. (Calls set_theta_offset\n with the correct value in radians under the hood.)\n\n May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".\n '
mapping = {'N': (np.pi * 0.5), 'NW': (np.pi * 0.75), 'W': np.pi, 'SW': (np.pi * 1.25), 'S': (np.pi * 1.5), 'SE': (np.pi * 1.75), 'E': 0, 'NE': (np.pi * 0.25)}
return self.set_theta_offset(mapping[loc]) | -2,454,019,984,045,059,600 | Sets the location of theta's zero. (Calls set_theta_offset
with the correct value in radians under the hood.)
May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE". | lib/python2.7/matplotlib/projections/polar.py | set_theta_zero_location | ashley8jain/IITD-complaint-system-web | python | def set_theta_zero_location(self, loc):
'\n Sets the location of theta\'s zero. (Calls set_theta_offset\n with the correct value in radians under the hood.)\n\n May be one of "N", "NW", "W", "SW", "S", "SE", "E", or "NE".\n '
mapping = {'N': (np.pi * 0.5), 'NW': (np.pi * 0.75), 'W': np.pi, 'SW': (np.pi * 1.25), 'S': (np.pi * 1.5), 'SE': (np.pi * 1.75), 'E': 0, 'NE': (np.pi * 0.25)}
return self.set_theta_offset(mapping[loc]) |
def set_theta_direction(self, direction):
'\n Set the direction in which theta increases.\n\n clockwise, -1:\n Theta increases in the clockwise direction\n\n counterclockwise, anticlockwise, 1:\n Theta increases in the counterclockwise direction\n '
if (direction in ('clockwise',)):
self._direction = (- 1)
elif (direction in ('counterclockwise', 'anticlockwise')):
self._direction = 1
elif (direction in (1, (- 1))):
self._direction = direction
else:
raise ValueError('direction must be 1, -1, clockwise or counterclockwise') | 6,125,434,307,476,871,000 | Set the direction in which theta increases.
clockwise, -1:
Theta increases in the clockwise direction
counterclockwise, anticlockwise, 1:
Theta increases in the counterclockwise direction | lib/python2.7/matplotlib/projections/polar.py | set_theta_direction | ashley8jain/IITD-complaint-system-web | python | def set_theta_direction(self, direction):
'\n Set the direction in which theta increases.\n\n clockwise, -1:\n Theta increases in the clockwise direction\n\n counterclockwise, anticlockwise, 1:\n Theta increases in the counterclockwise direction\n '
if (direction in ('clockwise',)):
self._direction = (- 1)
elif (direction in ('counterclockwise', 'anticlockwise')):
self._direction = 1
elif (direction in (1, (- 1))):
self._direction = direction
else:
raise ValueError('direction must be 1, -1, clockwise or counterclockwise') |
def get_theta_direction(self):
'\n Get the direction in which theta increases.\n\n -1:\n Theta increases in the clockwise direction\n\n 1:\n Theta increases in the counterclockwise direction\n '
return self._direction | -7,715,639,299,182,240,000 | Get the direction in which theta increases.
-1:
Theta increases in the clockwise direction
1:
Theta increases in the counterclockwise direction | lib/python2.7/matplotlib/projections/polar.py | get_theta_direction | ashley8jain/IITD-complaint-system-web | python | def get_theta_direction(self):
'\n Get the direction in which theta increases.\n\n -1:\n Theta increases in the clockwise direction\n\n 1:\n Theta increases in the counterclockwise direction\n '
return self._direction |
@docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None, **kwargs):
'\n Set the angles at which to place the theta grids (these\n gridlines are equal along the theta dimension). *angles* is in\n degrees.\n\n *labels*, if not None, is a ``len(angles)`` list of strings of\n the labels to use at each angle.\n\n If *labels* is None, the labels will be ``fmt %% angle``\n\n *frac* is the fraction of the polar axes radius at which to\n place the label (1 is the edge). Eg. 1.05 is outside the axes\n and 0.95 is inside the axes.\n\n Return value is a list of tuples (*line*, *label*), where\n *line* is :class:`~matplotlib.lines.Line2D` instances and the\n *label* is :class:`~matplotlib.text.Text` instances.\n\n kwargs are optional text properties for the labels:\n\n %(Text)s\n\n ACCEPTS: sequence of floats\n '
angles = np.asarray(angles, np.float_)
self.set_xticks((angles * (np.pi / 180.0)))
if (labels is not None):
self.set_xticklabels(labels)
elif (fmt is not None):
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if (frac is not None):
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, (1.0 / frac))
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return (self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()) | -2,763,533,836,684,350,500 | Set the angles at which to place the theta grids (these
gridlines are equal along the theta dimension). *angles* is in
degrees.
*labels*, if not None, is a ``len(angles)`` list of strings of
the labels to use at each angle.
If *labels* is None, the labels will be ``fmt %% angle``
*frac* is the fraction of the polar axes radius at which to
place the label (1 is the edge). Eg. 1.05 is outside the axes
and 0.95 is inside the axes.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats | lib/python2.7/matplotlib/projections/polar.py | set_thetagrids | ashley8jain/IITD-complaint-system-web | python | @docstring.dedent_interpd
def set_thetagrids(self, angles, labels=None, frac=None, fmt=None, **kwargs):
'\n Set the angles at which to place the theta grids (these\n gridlines are equal along the theta dimension). *angles* is in\n degrees.\n\n *labels*, if not None, is a ``len(angles)`` list of strings of\n the labels to use at each angle.\n\n If *labels* is None, the labels will be ``fmt %% angle``\n\n *frac* is the fraction of the polar axes radius at which to\n place the label (1 is the edge). Eg. 1.05 is outside the axes\n and 0.95 is inside the axes.\n\n Return value is a list of tuples (*line*, *label*), where\n *line* is :class:`~matplotlib.lines.Line2D` instances and the\n *label* is :class:`~matplotlib.text.Text` instances.\n\n kwargs are optional text properties for the labels:\n\n %(Text)s\n\n ACCEPTS: sequence of floats\n '
angles = np.asarray(angles, np.float_)
self.set_xticks((angles * (np.pi / 180.0)))
if (labels is not None):
self.set_xticklabels(labels)
elif (fmt is not None):
self.xaxis.set_major_formatter(FormatStrFormatter(fmt))
if (frac is not None):
self._theta_label1_position.clear().translate(0.0, frac)
self._theta_label2_position.clear().translate(0.0, (1.0 / frac))
for t in self.xaxis.get_ticklabels():
t.update(kwargs)
return (self.xaxis.get_ticklines(), self.xaxis.get_ticklabels()) |
@docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None, **kwargs):
'\n Set the radial locations and labels of the *r* grids.\n\n The labels will appear at radial distances *radii* at the\n given *angle* in degrees.\n\n *labels*, if not None, is a ``len(radii)`` list of strings of the\n labels to use at each radius.\n\n If *labels* is None, the built-in formatter will be used.\n\n Return value is a list of tuples (*line*, *label*), where\n *line* is :class:`~matplotlib.lines.Line2D` instances and the\n *label* is :class:`~matplotlib.text.Text` instances.\n\n kwargs are optional text properties for the labels:\n\n %(Text)s\n\n ACCEPTS: sequence of floats\n '
radii = np.asarray(radii)
rmin = radii.min()
if (rmin <= 0):
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if (labels is not None):
self.set_yticklabels(labels)
elif (fmt is not None):
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if (angle is None):
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return (self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()) | 3,607,151,283,755,662,300 | Set the radial locations and labels of the *r* grids.
The labels will appear at radial distances *radii* at the
given *angle* in degrees.
*labels*, if not None, is a ``len(radii)`` list of strings of the
labels to use at each radius.
If *labels* is None, the built-in formatter will be used.
Return value is a list of tuples (*line*, *label*), where
*line* is :class:`~matplotlib.lines.Line2D` instances and the
*label* is :class:`~matplotlib.text.Text` instances.
kwargs are optional text properties for the labels:
%(Text)s
ACCEPTS: sequence of floats | lib/python2.7/matplotlib/projections/polar.py | set_rgrids | ashley8jain/IITD-complaint-system-web | python | @docstring.dedent_interpd
def set_rgrids(self, radii, labels=None, angle=None, fmt=None, **kwargs):
'\n Set the radial locations and labels of the *r* grids.\n\n The labels will appear at radial distances *radii* at the\n given *angle* in degrees.\n\n *labels*, if not None, is a ``len(radii)`` list of strings of the\n labels to use at each radius.\n\n If *labels* is None, the built-in formatter will be used.\n\n Return value is a list of tuples (*line*, *label*), where\n *line* is :class:`~matplotlib.lines.Line2D` instances and the\n *label* is :class:`~matplotlib.text.Text` instances.\n\n kwargs are optional text properties for the labels:\n\n %(Text)s\n\n ACCEPTS: sequence of floats\n '
radii = np.asarray(radii)
rmin = radii.min()
if (rmin <= 0):
raise ValueError('radial grids must be strictly positive')
self.set_yticks(radii)
if (labels is not None):
self.set_yticklabels(labels)
elif (fmt is not None):
self.yaxis.set_major_formatter(FormatStrFormatter(fmt))
if (angle is None):
angle = self._r_label_position.to_values()[4]
self._r_label_position._t = (angle, 0.0)
self._r_label_position.invalidate()
for t in self.yaxis.get_ticklabels():
t.update(kwargs)
return (self.yaxis.get_gridlines(), self.yaxis.get_ticklabels()) |
def format_coord(self, theta, r):
'\n Return a format string formatting the coordinate using Unicode\n characters.\n '
theta /= math.pi
return (u'θ=%0.3fπ (%0.3f°), r=%0.3f' % (theta, (theta * 180.0), r)) | -1,078,132,295,707,459,000 | Return a format string formatting the coordinate using Unicode
characters. | lib/python2.7/matplotlib/projections/polar.py | format_coord | ashley8jain/IITD-complaint-system-web | python | def format_coord(self, theta, r):
'\n Return a format string formatting the coordinate using Unicode\n characters.\n '
theta /= math.pi
return (u'θ=%0.3fπ (%0.3f°), r=%0.3f' % (theta, (theta * 180.0), r)) |
def get_data_ratio(self):
'\n Return the aspect ratio of the data itself. For a polar plot,\n this should always be 1.0\n '
return 1.0 | -9,119,961,156,597,160,000 | Return the aspect ratio of the data itself. For a polar plot,
this should always be 1.0 | lib/python2.7/matplotlib/projections/polar.py | get_data_ratio | ashley8jain/IITD-complaint-system-web | python | def get_data_ratio(self):
'\n Return the aspect ratio of the data itself. For a polar plot,\n this should always be 1.0\n '
return 1.0 |
def can_zoom(self):
'\n Return *True* if this axes supports the zoom box button functionality.\n\n Polar axes do not support zoom boxes.\n '
return False | -1,113,074,475,683,004,900 | Return *True* if this axes supports the zoom box button functionality.
Polar axes do not support zoom boxes. | lib/python2.7/matplotlib/projections/polar.py | can_zoom | ashley8jain/IITD-complaint-system-web | python | def can_zoom(self):
'\n Return *True* if this axes supports the zoom box button functionality.\n\n Polar axes do not support zoom boxes.\n '
return False |
def can_pan(self):
'\n Return *True* if this axes supports the pan/zoom button functionality.\n\n For polar axes, this is slightly misleading. Both panning and\n zooming are performed by the same button. Panning is performed\n in azimuth while zooming is done along the radial.\n '
return True | -7,162,355,877,948,546,000 | Return *True* if this axes supports the pan/zoom button functionality.
For polar axes, this is slightly misleading. Both panning and
zooming are performed by the same button. Panning is performed
in azimuth while zooming is done along the radial. | lib/python2.7/matplotlib/projections/polar.py | can_pan | ashley8jain/IITD-complaint-system-web | python | def can_pan(self):
'\n Return *True* if this axes supports the pan/zoom button functionality.\n\n For polar axes, this is slightly misleading. Both panning and\n zooming are performed by the same button. Panning is performed\n in azimuth while zooming is done along the radial.\n '
return True |
def __init__(self, scale_transform, limits):
'\n *limits* is the view limit of the data. The only part of\n its bounds that is used is ymax (for the radius maximum).\n The theta range is always fixed to (0, 2pi).\n '
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None | 3,391,083,076,465,484,300 | *limits* is the view limit of the data. The only part of
its bounds that is used is ymax (for the radius maximum).
The theta range is always fixed to (0, 2pi). | lib/python2.7/matplotlib/projections/polar.py | __init__ | ashley8jain/IITD-complaint-system-web | python | def __init__(self, scale_transform, limits):
'\n *limits* is the view limit of the data. The only part of\n its bounds that is used is ymax (for the radius maximum).\n The theta range is always fixed to (0, 2pi).\n '
Affine2DBase.__init__(self)
self._scale_transform = scale_transform
self._limits = limits
self.set_children(scale_transform, limits)
self._mtx = None |
def handle_template(self, template, subdir):
"\n Determines where the app or project templates are.\n Use django.__path__[0] as the default because we don't\n know into which directory Django has been installed.\n "
if (template is None):
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError(("couldn't handle %s template %s." % (self.app_or_project, template))) | 5,342,258,727,169,901,000 | Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed. | django/core/management/templates.py | handle_template | LuanP/django | python | def handle_template(self, template, subdir):
"\n Determines where the app or project templates are.\n Use django.__path__[0] as the default because we don't\n know into which directory Django has been installed.\n "
if (template is None):
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError(("couldn't handle %s template %s." % (self.app_or_project, template))) |
def download(self, url):
'\n Downloads the given URL and returns the file name.\n '
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[(- 1)]
if url.endswith('/'):
display_url = (tmp + '/')
else:
display_url = url
return (filename, display_url)
prefix = ('django_%s_template_' % self.app_or_project)
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
(filename, display_url) = cleanup_url(url)
if (self.verbosity >= 2):
self.stdout.write(('Downloading %s\n' % display_url))
try:
(the_path, info) = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError(("couldn't download URL %s to %s: %s" % (url, filename, e)))
used_name = the_path.split('/')[(- 1)]
content_disposition = info.get('content-disposition')
if content_disposition:
(_, params) = cgi.parse_header(content_disposition)
guessed_filename = (params.get('filename') or used_name)
else:
guessed_filename = used_name
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if ((not ext) and content_type):
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
if (used_name != guessed_filename):
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
return the_path | -5,480,616,050,715,580,000 | Downloads the given URL and returns the file name. | django/core/management/templates.py | download | LuanP/django | python | def download(self, url):
'\n \n '
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[(- 1)]
if url.endswith('/'):
display_url = (tmp + '/')
else:
display_url = url
return (filename, display_url)
prefix = ('django_%s_template_' % self.app_or_project)
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
(filename, display_url) = cleanup_url(url)
if (self.verbosity >= 2):
self.stdout.write(('Downloading %s\n' % display_url))
try:
(the_path, info) = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError(("couldn't download URL %s to %s: %s" % (url, filename, e)))
used_name = the_path.split('/')[(- 1)]
content_disposition = info.get('content-disposition')
if content_disposition:
(_, params) = cgi.parse_header(content_disposition)
guessed_filename = (params.get('filename') or used_name)
else:
guessed_filename = used_name
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if ((not ext) and content_type):
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
if (used_name != guessed_filename):
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
return the_path |
def splitext(self, the_path):
'\n Like os.path.splitext, but takes off .tar, too\n '
(base, ext) = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = (base[(- 4):] + ext)
base = base[:(- 4)]
return (base, ext) | 7,964,358,720,822,540,000 | Like os.path.splitext, but takes off .tar, too | django/core/management/templates.py | splitext | LuanP/django | python | def splitext(self, the_path):
'\n \n '
(base, ext) = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = (base[(- 4):] + ext)
base = base[:(- 4)]
return (base, ext) |
def extract(self, filename):
'\n Extracts the given file to a temporarily and returns\n the path of the directory with the extracted content.\n '
prefix = ('django_%s_template_' % self.app_or_project)
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if (self.verbosity >= 2):
self.stdout.write(('Extracting %s\n' % filename))
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError(("couldn't extract file %s to %s: %s" % (filename, tempdir, e))) | -5,862,873,927,603,246,000 | Extracts the given file to a temporarily and returns
the path of the directory with the extracted content. | django/core/management/templates.py | extract | LuanP/django | python | def extract(self, filename):
'\n Extracts the given file to a temporarily and returns\n the path of the directory with the extracted content.\n '
prefix = ('django_%s_template_' % self.app_or_project)
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if (self.verbosity >= 2):
self.stdout.write(('Extracting %s\n' % filename))
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError(("couldn't extract file %s to %s: %s" % (filename, tempdir, e))) |
def is_url(self, template):
'\n Returns True if the name looks like a URL\n '
if (':' not in template):
return False
scheme = template.split(':', 1)[0].lower()
return (scheme in self.url_schemes) | -5,899,986,127,205,529,000 | Returns True if the name looks like a URL | django/core/management/templates.py | is_url | LuanP/django | python | def is_url(self, template):
'\n \n '
if (':' not in template):
return False
scheme = template.split(':', 1)[0].lower()
return (scheme in self.url_schemes) |
def make_writeable(self, filename):
'\n Make sure that the file is writeable.\n Useful if our source is read-only.\n '
if sys.platform.startswith('java'):
return
if (not os.access(filename, os.W_OK)):
st = os.stat(filename)
new_permissions = (stat.S_IMODE(st.st_mode) | stat.S_IWUSR)
os.chmod(filename, new_permissions) | -8,087,604,734,570,663,000 | Make sure that the file is writeable.
Useful if our source is read-only. | django/core/management/templates.py | make_writeable | LuanP/django | python | def make_writeable(self, filename):
'\n Make sure that the file is writeable.\n Useful if our source is read-only.\n '
if sys.platform.startswith('java'):
return
if (not os.access(filename, os.W_OK)):
st = os.stat(filename)
new_permissions = (stat.S_IMODE(st.st_mode) | stat.S_IWUSR)
os.chmod(filename, new_permissions) |
def parse(csvfilename):
"\n Reads CSV file named csvfilename, parses\n it's content and returns the data within\n the file as a list of lists.\n "
table = []
with open(csvfilename, 'r') as csvfile:
csvreader = csv.reader(csvfile, skipinitialspace=True)
for row in csvreader:
table.append(row)
return table | -1,531,225,135,421,307,000 | Reads CSV file named csvfilename, parses
it's content and returns the data within
the file as a list of lists. | Rice-Python-Data-Analysis/week3/examples3_csvmodule.py | parse | Abu-Kaisar/Courses- | python | def parse(csvfilename):
"\n Reads CSV file named csvfilename, parses\n it's content and returns the data within\n the file as a list of lists.\n "
table = []
with open(csvfilename, 'r') as csvfile:
csvreader = csv.reader(csvfile, skipinitialspace=True)
for row in csvreader:
table.append(row)
return table |
def print_table(table):
'\n Print out table, which must be a list\n of lists, in a nicely formatted way.\n '
for row in table:
print('{:<19}'.format(row[0]), end='')
for col in row[1:]:
print('{:>4}'.format(col), end='')
print('', end='\n') | 5,542,860,294,133,022,000 | Print out table, which must be a list
of lists, in a nicely formatted way. | Rice-Python-Data-Analysis/week3/examples3_csvmodule.py | print_table | Abu-Kaisar/Courses- | python | def print_table(table):
'\n Print out table, which must be a list\n of lists, in a nicely formatted way.\n '
for row in table:
print('{:<19}'.format(row[0]), end=)
for col in row[1:]:
print('{:>4}'.format(col), end=)
print(, end='\n') |
def collect(conf, conn):
'Collect ICD-XX-PCS procedures.\n '
URL = 'https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-PCS-Long-Abbrev-Titles.zip'
FILE = 'icd10pcs_order_2016.txt'
VERSION = 'ICD-10-PCS'
LAST_UPDATED = '2015-10-01'
zip = requests.get(URL).content
file = zipfile.ZipFile(io.BytesIO(zip)).open(FILE)
count = 0
for line in file:
data = {'code': line[6:(6 + 7)].strip(), 'is_header': line[14:(14 + 1)].strip(), 'short_description': line[16:(16 + 60)].strip(), 'long_description': line[77:].strip(), 'version': VERSION, 'last_updated': LAST_UPDATED}
record = Record.create(URL, data)
record.write(conf, conn)
count += 1
if (not (count % 100)):
logger.info('Collected %s "%s" interventions', count, record.table) | -7,279,392,835,362,786,000 | Collect ICD-XX-PCS procedures. | collectors/icdpcs/collector.py | collect | almeidaah/collectors | python | def collect(conf, conn):
'\n '
URL = 'https://www.cms.gov/Medicare/Coding/ICD10/Downloads/2016-PCS-Long-Abbrev-Titles.zip'
FILE = 'icd10pcs_order_2016.txt'
VERSION = 'ICD-10-PCS'
LAST_UPDATED = '2015-10-01'
zip = requests.get(URL).content
file = zipfile.ZipFile(io.BytesIO(zip)).open(FILE)
count = 0
for line in file:
data = {'code': line[6:(6 + 7)].strip(), 'is_header': line[14:(14 + 1)].strip(), 'short_description': line[16:(16 + 60)].strip(), 'long_description': line[77:].strip(), 'version': VERSION, 'last_updated': LAST_UPDATED}
record = Record.create(URL, data)
record.write(conf, conn)
count += 1
if (not (count % 100)):
logger.info('Collected %s "%s" interventions', count, record.table) |
@timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"\n Cross-iteration redundancies elimination.\n\n Parameters\n ----------\n cluster : Cluster\n Input Cluster, subject of the optimization pass.\n mode : str\n The transformation mode. Accepted: ['invariants', 'sops'].\n * 'invariants' is for sub-expressions that are invariant w.r.t. one or\n more Dimensions.\n * 'sops' stands for sums-of-products, that is redundancies are searched\n across all expressions in sum-of-product form.\n sregistry : SymbolRegistry\n The symbol registry, to create unique temporary names.\n options : dict\n The optimization options.\n Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].\n * 'min-storage': if True, the pass will try to minimize the amount of\n storage introduced for the tensor temporaries. This might also reduce\n the operation count. On the other hand, this might affect fusion and\n therefore data locality. Defaults to False (legacy).\n * 'cire-maxpar': if True, privilege parallelism over working set size,\n that is the pass will try to create as many parallel loops as possible,\n even though this will require more space (Dimensions) for the temporaries.\n Defaults to False.\n * 'cire-rotate': if True, the pass will use modulo indexing for the\n outermost Dimension iterated over by the temporaries. This will sacrifice\n a parallel loop for a reduced working set size. Defaults to False (legacy).\n * 'cire-maxalias': if True, capture the largest redundancies. This will\n minimize the flop count while maximizing the number of tensor temporaries,\n thus increasing the working set size.\n platform : Platform\n The underlying platform. Used to optimize the shape of the introduced\n tensor symbols.\n\n Examples\n --------\n 1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`\n\n t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]\n\n which after CIRE becomes\n\n t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])\n t0 = t1[x,y,z]*c[t,x,y,z]\n\n 2) 'sops'. Below we see two expressions in sum-of-product form (in this\n case, the sum degenerates to a single product).\n\n t0 = 2.0*a[x,y,z]*b[x,y,z]\n t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]\n\n CIRE detects that these two expressions are actually redundant and rewrites\n them as:\n\n t2[x,y,z] = a[x,y,z]*b[x,y,z]\n t0 = 2.0*t2[x,y,z]\n t1 = 3.0*t2[x,y,z+1]\n "
if (mode == 'invariants'):
space = ('inv-basic', 'inv-compound')
elif (mode in ('sops',)):
space = (mode,)
else:
assert False, ('Unknown CIRE mode `%s`' % mode)
processed = []
for c in clusters:
if (not c.is_dense):
processed.append(c)
continue
context = Context(c).process(clusters)
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed | 3,623,312,860,309,811,700 | Cross-iteration redundancies elimination.
Parameters
----------
cluster : Cluster
Input Cluster, subject of the optimization pass.
mode : str
The transformation mode. Accepted: ['invariants', 'sops'].
* 'invariants' is for sub-expressions that are invariant w.r.t. one or
more Dimensions.
* 'sops' stands for sums-of-products, that is redundancies are searched
across all expressions in sum-of-product form.
sregistry : SymbolRegistry
The symbol registry, to create unique temporary names.
options : dict
The optimization options.
Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].
* 'min-storage': if True, the pass will try to minimize the amount of
storage introduced for the tensor temporaries. This might also reduce
the operation count. On the other hand, this might affect fusion and
therefore data locality. Defaults to False (legacy).
* 'cire-maxpar': if True, privilege parallelism over working set size,
that is the pass will try to create as many parallel loops as possible,
even though this will require more space (Dimensions) for the temporaries.
Defaults to False.
* 'cire-rotate': if True, the pass will use modulo indexing for the
outermost Dimension iterated over by the temporaries. This will sacrifice
a parallel loop for a reduced working set size. Defaults to False (legacy).
* 'cire-maxalias': if True, capture the largest redundancies. This will
minimize the flop count while maximizing the number of tensor temporaries,
thus increasing the working set size.
platform : Platform
The underlying platform. Used to optimize the shape of the introduced
tensor symbols.
Examples
--------
1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`
t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]
which after CIRE becomes
t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])
t0 = t1[x,y,z]*c[t,x,y,z]
2) 'sops'. Below we see two expressions in sum-of-product form (in this
case, the sum degenerates to a single product).
t0 = 2.0*a[x,y,z]*b[x,y,z]
t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]
CIRE detects that these two expressions are actually redundant and rewrites
them as:
t2[x,y,z] = a[x,y,z]*b[x,y,z]
t0 = 2.0*t2[x,y,z]
t1 = 3.0*t2[x,y,z+1] | devito/passes/clusters/aliases.py | cire | ccuetom/devito | python | @timed_pass(name='cire')
def cire(clusters, mode, sregistry, options, platform):
"\n Cross-iteration redundancies elimination.\n\n Parameters\n ----------\n cluster : Cluster\n Input Cluster, subject of the optimization pass.\n mode : str\n The transformation mode. Accepted: ['invariants', 'sops'].\n * 'invariants' is for sub-expressions that are invariant w.r.t. one or\n more Dimensions.\n * 'sops' stands for sums-of-products, that is redundancies are searched\n across all expressions in sum-of-product form.\n sregistry : SymbolRegistry\n The symbol registry, to create unique temporary names.\n options : dict\n The optimization options.\n Accepted: ['min-storage', 'cire-maxpar', 'cire-rotate', 'cire-maxalias'].\n * 'min-storage': if True, the pass will try to minimize the amount of\n storage introduced for the tensor temporaries. This might also reduce\n the operation count. On the other hand, this might affect fusion and\n therefore data locality. Defaults to False (legacy).\n * 'cire-maxpar': if True, privilege parallelism over working set size,\n that is the pass will try to create as many parallel loops as possible,\n even though this will require more space (Dimensions) for the temporaries.\n Defaults to False.\n * 'cire-rotate': if True, the pass will use modulo indexing for the\n outermost Dimension iterated over by the temporaries. This will sacrifice\n a parallel loop for a reduced working set size. Defaults to False (legacy).\n * 'cire-maxalias': if True, capture the largest redundancies. This will\n minimize the flop count while maximizing the number of tensor temporaries,\n thus increasing the working set size.\n platform : Platform\n The underlying platform. Used to optimize the shape of the introduced\n tensor symbols.\n\n Examples\n --------\n 1) 'invariants'. Here's an expensive expression invariant w.r.t. `t`\n\n t0 = (cos(a[x,y,z])*sin(b[x,y,z]))*c[t,x,y,z]\n\n which after CIRE becomes\n\n t1[x,y,z] = cos(a[x,y,z])*sin(b[x,y,z])\n t0 = t1[x,y,z]*c[t,x,y,z]\n\n 2) 'sops'. Below we see two expressions in sum-of-product form (in this\n case, the sum degenerates to a single product).\n\n t0 = 2.0*a[x,y,z]*b[x,y,z]\n t1 = 3.0*a[x,y,z+1]*b[x,y,z+1]\n\n CIRE detects that these two expressions are actually redundant and rewrites\n them as:\n\n t2[x,y,z] = a[x,y,z]*b[x,y,z]\n t0 = 2.0*t2[x,y,z]\n t1 = 3.0*t2[x,y,z+1]\n "
if (mode == 'invariants'):
space = ('inv-basic', 'inv-compound')
elif (mode in ('sops',)):
space = (mode,)
else:
assert False, ('Unknown CIRE mode `%s`' % mode)
processed = []
for c in clusters:
if (not c.is_dense):
processed.append(c)
continue
context = Context(c).process(clusters)
transformed = _cire(c, context, space, sregistry, options, platform)
processed.extend(transformed)
return processed |
def collect(extracted, ispace, min_storage):
'\n Find groups of aliasing expressions.\n\n We shall introduce the following (loose) terminology:\n\n * A ``terminal`` is the leaf of a mathematical operation. Terminals\n can be numbers (n), literals (l), or Indexeds (I).\n * ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,\n ``R(I) = J``, where ``J`` has the same base as ``I`` but with all\n offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.\n * A ``relaxed expression`` is an expression in which all of the\n terminals are relaxed.\n\n Now we define the concept of aliasing. We say that an expression A\n aliases an expression B if:\n\n * ``R(A) == R(B)``\n * all pairwise Indexeds in A and B access memory locations at a\n fixed constant distance along each Dimension.\n\n For example, consider the following expressions:\n\n * a[i+1] + b[i+1]\n * a[i+1] + b[j+1]\n * a[i] + c[i]\n * a[i+2] - b[i+2]\n * a[i+2] + b[i]\n * a[i-1] + b[i-1]\n\n Out of the expressions above, the following alias to `a[i] + b[i]`:\n\n * a[i+1] + b[i+1] : same operands and operations, distance along i: 1\n * a[i-1] + b[i-1] : same operands and operations, distance along i: -1\n\n Whereas the following do not:\n\n * a[i+1] + b[j+1] : because at least one index differs\n * a[i] + c[i] : because at least one of the operands differs\n * a[i+2] - b[i+2] : because at least one operation differs\n * a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)\n '
found = []
for expr in extracted:
assert (not expr.is_Equality)
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for (e, ai) in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, (e - ai)))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if ((not indexeds) or (len(bases) == len(indexeds))):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
if (not compare_ops(c.expr, u.expr)):
continue
if (not c.translated(u)):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for (d, v) in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for (d, v) in mapper.items()}
mapper = {}
for (d, intervals) in intervalss.items():
impacted = [g for g in groups if (d in g.dimensions)]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(((distance is not None) for distance in found.values())):
mapper[interval] = found
break
if (len(mapper) == len(intervalss)):
break
smallest = len(min(groups, key=len))
fallback = groups
(groups, remainder) = split(groups, (lambda g: (len(g) > smallest)))
if groups:
queue.append(remainder)
elif (len(remainder) > 1):
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for (i, v) in mapper.items()])
offsets = [LabeledVector([(l, (v[l] + distances[l])) for l in v.labels]) for v in c.offsets]
subs = {i: i.function[[(l + v.fromlabel(l, 0)) for l in b]] for (i, b, v) in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
aliaseds = [extracted[i.expr] for i in g]
distances = []
for i in g:
distance = [o.distance(v) for (o, v) in zip(i.offsets, offsets)]
distance = [(d, set(v)) for (d, v) in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for (d, v) in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases | 4,766,641,733,024,113,000 | Find groups of aliasing expressions.
We shall introduce the following (loose) terminology:
* A ``terminal`` is the leaf of a mathematical operation. Terminals
can be numbers (n), literals (l), or Indexeds (I).
* ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,
``R(I) = J``, where ``J`` has the same base as ``I`` but with all
offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.
* A ``relaxed expression`` is an expression in which all of the
terminals are relaxed.
Now we define the concept of aliasing. We say that an expression A
aliases an expression B if:
* ``R(A) == R(B)``
* all pairwise Indexeds in A and B access memory locations at a
fixed constant distance along each Dimension.
For example, consider the following expressions:
* a[i+1] + b[i+1]
* a[i+1] + b[j+1]
* a[i] + c[i]
* a[i+2] - b[i+2]
* a[i+2] + b[i]
* a[i-1] + b[i-1]
Out of the expressions above, the following alias to `a[i] + b[i]`:
* a[i+1] + b[i+1] : same operands and operations, distance along i: 1
* a[i-1] + b[i-1] : same operands and operations, distance along i: -1
Whereas the following do not:
* a[i+1] + b[j+1] : because at least one index differs
* a[i] + c[i] : because at least one of the operands differs
* a[i+2] - b[i+2] : because at least one operation differs
* a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0) | devito/passes/clusters/aliases.py | collect | ccuetom/devito | python | def collect(extracted, ispace, min_storage):
'\n Find groups of aliasing expressions.\n\n We shall introduce the following (loose) terminology:\n\n * A ``terminal`` is the leaf of a mathematical operation. Terminals\n can be numbers (n), literals (l), or Indexeds (I).\n * ``R`` is the relaxation operator := ``R(n) = n``, ``R(l) = l``,\n ``R(I) = J``, where ``J`` has the same base as ``I`` but with all\n offsets stripped away. For example, ``R(a[i+2,j-1]) = a[i,j]``.\n * A ``relaxed expression`` is an expression in which all of the\n terminals are relaxed.\n\n Now we define the concept of aliasing. We say that an expression A\n aliases an expression B if:\n\n * ``R(A) == R(B)``\n * all pairwise Indexeds in A and B access memory locations at a\n fixed constant distance along each Dimension.\n\n For example, consider the following expressions:\n\n * a[i+1] + b[i+1]\n * a[i+1] + b[j+1]\n * a[i] + c[i]\n * a[i+2] - b[i+2]\n * a[i+2] + b[i]\n * a[i-1] + b[i-1]\n\n Out of the expressions above, the following alias to `a[i] + b[i]`:\n\n * a[i+1] + b[i+1] : same operands and operations, distance along i: 1\n * a[i-1] + b[i-1] : same operands and operations, distance along i: -1\n\n Whereas the following do not:\n\n * a[i+1] + b[j+1] : because at least one index differs\n * a[i] + c[i] : because at least one of the operands differs\n * a[i+2] - b[i+2] : because at least one operation differs\n * a[i+2] + b[i] : because the distances along ``i`` differ (+2 and +0)\n '
found = []
for expr in extracted:
assert (not expr.is_Equality)
indexeds = retrieve_indexed(expr)
bases = []
offsets = []
for i in indexeds:
ii = IterationInstance(i)
if ii.is_irregular:
break
base = []
offset = []
for (e, ai) in zip(ii, ii.aindices):
if q_constant(e):
base.append(e)
else:
base.append(ai)
offset.append((ai, (e - ai)))
bases.append(tuple(base))
offsets.append(LabeledVector(offset))
if ((not indexeds) or (len(bases) == len(indexeds))):
found.append(Candidate(expr, ispace, indexeds, bases, offsets))
mapper = OrderedDict()
unseen = list(found)
while unseen:
c = unseen.pop(0)
group = [c]
for u in list(unseen):
if (not compare_ops(c.expr, u.expr)):
continue
if (not c.translated(u)):
continue
group.append(u)
unseen.remove(u)
group = Group(group)
if min_storage:
k = group.dimensions_translated
else:
k = group.dimensions
mapper.setdefault(k, []).append(group)
aliases = AliasMapper()
queue = list(mapper.values())
while queue:
groups = queue.pop(0)
while groups:
mapper = defaultdict(int)
for g in list(groups):
try:
mapper.update({d: max(mapper[d], v) for (d, v) in g.diameter.items()})
except ValueError:
groups.remove(g)
intervalss = {d: make_rotations_table(d, v) for (d, v) in mapper.items()}
mapper = {}
for (d, intervals) in intervalss.items():
impacted = [g for g in groups if (d in g.dimensions)]
for interval in list(intervals):
found = {g: g.find_rotation_distance(d, interval) for g in impacted}
if all(((distance is not None) for distance in found.values())):
mapper[interval] = found
break
if (len(mapper) == len(intervalss)):
break
smallest = len(min(groups, key=len))
fallback = groups
(groups, remainder) = split(groups, (lambda g: (len(g) > smallest)))
if groups:
queue.append(remainder)
elif (len(remainder) > 1):
queue.append(fallback[1:])
groups = [fallback.pop(0)]
else:
break
for g in groups:
c = g.pivot
distances = defaultdict(int, [(i.dim, v.get(g)) for (i, v) in mapper.items()])
offsets = [LabeledVector([(l, (v[l] + distances[l])) for l in v.labels]) for v in c.offsets]
subs = {i: i.function[[(l + v.fromlabel(l, 0)) for l in b]] for (i, b, v) in zip(c.indexeds, c.bases, offsets)}
alias = uxreplace(c.expr, subs)
aliaseds = [extracted[i.expr] for i in g]
distances = []
for i in g:
distance = [o.distance(v) for (o, v) in zip(i.offsets, offsets)]
distance = [(d, set(v)) for (d, v) in LabeledVector.transpose(*distance)]
distances.append(LabeledVector([(d, v.pop()) for (d, v) in distance]))
aliases.add(alias, list(mapper), aliaseds, distances)
return aliases |
def choose(aliases, exprs, mapper, selector):
'\n Analyze the detected aliases and, after applying a cost model to rule out\n the aliases with a bad flops/memory trade-off, inject them into the original\n expressions.\n '
tot = 0
retained = AliasMapper()
candidates = OrderedDict()
aliaseds = []
others = []
for (e, v) in aliases.items():
score = selector(e, len(v.aliaseds))
if (score > 0):
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
if (not candidates):
return (exprs, retained, tot)
mapper = {k: v for (k, v) in mapper.items() if (v.free_symbols & set(aliaseds))}
templated = [uxreplace(e, mapper) for e in exprs]
owset = wset((others + templated))
for (e, v) in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if ((score > 1) or ((score == 1) and (max(len(wset(e)), 1) > len((wset(e) & owset))))):
retained[e] = v
tot += score
if (not retained):
return (exprs, retained, tot)
mapper = {k: v for (k, v) in mapper.items() if (v.free_symbols & set(retained.aliaseds))}
exprs = [uxreplace(e, mapper) for e in exprs]
return (exprs, retained, tot) | -8,288,166,598,663,380,000 | Analyze the detected aliases and, after applying a cost model to rule out
the aliases with a bad flops/memory trade-off, inject them into the original
expressions. | devito/passes/clusters/aliases.py | choose | ccuetom/devito | python | def choose(aliases, exprs, mapper, selector):
'\n Analyze the detected aliases and, after applying a cost model to rule out\n the aliases with a bad flops/memory trade-off, inject them into the original\n expressions.\n '
tot = 0
retained = AliasMapper()
candidates = OrderedDict()
aliaseds = []
others = []
for (e, v) in aliases.items():
score = selector(e, len(v.aliaseds))
if (score > 0):
candidates[e] = score
aliaseds.extend(v.aliaseds)
else:
others.append(e)
if (not candidates):
return (exprs, retained, tot)
mapper = {k: v for (k, v) in mapper.items() if (v.free_symbols & set(aliaseds))}
templated = [uxreplace(e, mapper) for e in exprs]
owset = wset((others + templated))
for (e, v) in aliases.items():
try:
score = candidates[e]
except KeyError:
score = 0
if ((score > 1) or ((score == 1) and (max(len(wset(e)), 1) > len((wset(e) & owset))))):
retained[e] = v
tot += score
if (not retained):
return (exprs, retained, tot)
mapper = {k: v for (k, v) in mapper.items() if (v.free_symbols & set(retained.aliaseds))}
exprs = [uxreplace(e, mapper) for e in exprs]
return (exprs, retained, tot) |
def lower_aliases(cluster, aliases, in_writeto, maxpar):
'\n Create a Schedule from an AliasMapper.\n '
dmapper = {}
processed = []
for (alias, v) in aliases.items():
imapper = {**{i.dim: i for i in v.intervals}, **{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
intervals.append(i)
continue
assert (i.stamp >= interval.stamp)
if (not (writeto or (interval != interval.zero()) or in_writeto(i.dim, cluster))):
intervals.append(i)
continue
assert (not i.dim.is_NonlinearDerived)
interval = interval.lift(i.stamp)
interval = interval.lift((interval.stamp + int(maxpar)))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
m = (i.dim.symbolic_min - i.dim.parent.symbolic_min)
else:
m = 0
d = dmapper[i.dim] = IncrDimension(('%ss' % i.dim.name), i.dim, m, dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
for (distance, indices) in zip(v.distances, indicess):
indices.append(((d - interval.lower) + distance[interval.dim]))
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
processed = sorted(processed, key=(lambda i: cit(cluster.ispace, i.ispace)))
return Schedule(*processed, dmapper=dmapper) | 8,807,849,933,634,017,000 | Create a Schedule from an AliasMapper. | devito/passes/clusters/aliases.py | lower_aliases | ccuetom/devito | python | def lower_aliases(cluster, aliases, in_writeto, maxpar):
'\n \n '
dmapper = {}
processed = []
for (alias, v) in aliases.items():
imapper = {**{i.dim: i for i in v.intervals}, **{i.dim.parent: i for i in v.intervals if i.dim.is_NonlinearDerived}}
intervals = []
writeto = []
sub_iterators = {}
indicess = [[] for _ in v.distances]
for i in cluster.ispace.intervals:
try:
interval = imapper[i.dim]
except KeyError:
intervals.append(i)
continue
assert (i.stamp >= interval.stamp)
if (not (writeto or (interval != interval.zero()) or in_writeto(i.dim, cluster))):
intervals.append(i)
continue
assert (not i.dim.is_NonlinearDerived)
interval = interval.lift(i.stamp)
interval = interval.lift((interval.stamp + int(maxpar)))
writeto.append(interval)
intervals.append(interval)
if i.dim.is_Incr:
try:
d = dmapper[i.dim]
except KeyError:
dd = i.dim.parent
assert dd.is_Incr
if dd.parent.is_Incr:
m = (i.dim.symbolic_min - i.dim.parent.symbolic_min)
else:
m = 0
d = dmapper[i.dim] = IncrDimension(('%ss' % i.dim.name), i.dim, m, dd.symbolic_size, 1, dd.step)
sub_iterators[i.dim] = d
else:
d = i.dim
for (distance, indices) in zip(v.distances, indicess):
indices.append(((d - interval.lower) + distance[interval.dim]))
writeto = IterationSpace(IntervalGroup(writeto), sub_iterators)
intervals = IntervalGroup(intervals, cluster.ispace.relations)
ispace = IterationSpace(intervals, cluster.sub_iterators, cluster.directions)
ispace = ispace.augment(sub_iterators)
processed.append(ScheduledAlias(alias, writeto, ispace, v.aliaseds, indicess))
processed = sorted(processed, key=(lambda i: cit(cluster.ispace, i.ispace)))
return Schedule(*processed, dmapper=dmapper) |
def optimize_schedule(cluster, schedule, platform, sregistry, options):
'\n Rewrite the schedule for performance optimization.\n '
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule | 6,498,303,570,085,755,000 | Rewrite the schedule for performance optimization. | devito/passes/clusters/aliases.py | optimize_schedule | ccuetom/devito | python | def optimize_schedule(cluster, schedule, platform, sregistry, options):
'\n \n '
if options['cire-rotate']:
schedule = _optimize_schedule_rotations(schedule, sregistry)
schedule = _optimize_schedule_padding(cluster, schedule, platform)
return schedule |
def _optimize_schedule_rotations(schedule, sregistry):
'\n Transform the schedule such that the tensor temporaries "rotate" along\n the outermost Dimension. This trades a parallel Dimension for a smaller\n working set size.\n '
ridx = 0
rmapper = defaultdict(list)
processed = []
for (k, group) in groupby(schedule, key=(lambda i: i.writeto)):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
processed.extend(g)
continue
n = candidate.min_size
assert (n > 0)
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension(('%sii' % d), ds, iis, incr=iib)
cd = CustomDimension(name=('%s%s' % (d, d)), symbolic_min=ii, symbolic_max=iib, symbolic_size=n)
dsi = ModuloDimension(('%si' % ds), cd, ((cd + ds) - iis), n)
mapper = OrderedDict()
for i in g:
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix=('%sr' % d.name))
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [((indices[:ridx] + [md]) + indices[(ridx + 1):]) for (md, indices) in zip(mds, i.indicess)]
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
alias = i.alias.xreplace({d: (d + cd)})
d1 = writeto[(ridx + 1)].dim
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: (mds + [ii])})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper) | -8,454,179,542,547,144,000 | Transform the schedule such that the tensor temporaries "rotate" along
the outermost Dimension. This trades a parallel Dimension for a smaller
working set size. | devito/passes/clusters/aliases.py | _optimize_schedule_rotations | ccuetom/devito | python | def _optimize_schedule_rotations(schedule, sregistry):
'\n Transform the schedule such that the tensor temporaries "rotate" along\n the outermost Dimension. This trades a parallel Dimension for a smaller\n working set size.\n '
ridx = 0
rmapper = defaultdict(list)
processed = []
for (k, group) in groupby(schedule, key=(lambda i: i.writeto)):
g = list(group)
candidate = k[ridx]
d = candidate.dim
try:
ds = schedule.dmapper[d]
except KeyError:
processed.extend(g)
continue
n = candidate.min_size
assert (n > 0)
iis = candidate.lower
iib = candidate.upper
ii = ModuloDimension(('%sii' % d), ds, iis, incr=iib)
cd = CustomDimension(name=('%s%s' % (d, d)), symbolic_min=ii, symbolic_max=iib, symbolic_size=n)
dsi = ModuloDimension(('%si' % ds), cd, ((cd + ds) - iis), n)
mapper = OrderedDict()
for i in g:
mds = []
for indices in i.indicess:
v = indices[ridx]
try:
md = mapper[v]
except KeyError:
name = sregistry.make_name(prefix=('%sr' % d.name))
md = mapper.setdefault(v, ModuloDimension(name, ds, v, n))
mds.append(md)
indicess = [((indices[:ridx] + [md]) + indices[(ridx + 1):]) for (md, indices) in zip(mds, i.indicess)]
intervals = k.intervals.switch(d, dsi).zero(dsi)
sub_iterators = dict(k.sub_iterators)
sub_iterators[d] = dsi
writeto = IterationSpace(intervals, sub_iterators)
alias = i.alias.xreplace({d: (d + cd)})
d1 = writeto[(ridx + 1)].dim
intervals = IntervalGroup(Interval(cd, 0, 0), relations={(d, cd, d1)})
rispace = IterationSpace(intervals, {cd: dsi}, {cd: Forward})
aispace = i.ispace.zero(d)
aispace = aispace.augment({d: (mds + [ii])})
ispace = IterationSpace.union(rispace, aispace)
processed.append(ScheduledAlias(alias, writeto, ispace, i.aliaseds, indicess))
rmapper[d].extend(list(mapper.values()))
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=rmapper) |
def _optimize_schedule_padding(cluster, schedule, platform):
'\n Round up the innermost IterationInterval of the tensor temporaries IterationSpace\n to a multiple of the SIMD vector length. This is not always possible though (it\n depends on how much halo is safely accessible in all read Functions).\n '
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[(- 1)]
if (ROUNDABLE in cluster.properties[it.dim]):
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, (it.interval.size % vl)))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds, i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper) | 3,954,155,820,396,321,300 | Round up the innermost IterationInterval of the tensor temporaries IterationSpace
to a multiple of the SIMD vector length. This is not always possible though (it
depends on how much halo is safely accessible in all read Functions). | devito/passes/clusters/aliases.py | _optimize_schedule_padding | ccuetom/devito | python | def _optimize_schedule_padding(cluster, schedule, platform):
'\n Round up the innermost IterationInterval of the tensor temporaries IterationSpace\n to a multiple of the SIMD vector length. This is not always possible though (it\n depends on how much halo is safely accessible in all read Functions).\n '
processed = []
for i in schedule:
try:
it = i.ispace.itintervals[(- 1)]
if (ROUNDABLE in cluster.properties[it.dim]):
vl = platform.simd_items_per_reg(cluster.dtype)
ispace = i.ispace.add(Interval(it.dim, 0, (it.interval.size % vl)))
else:
ispace = i.ispace
processed.append(ScheduledAlias(i.alias, i.writeto, ispace, i.aliaseds, i.indicess))
except (TypeError, KeyError):
processed.append(i)
return Schedule(*processed, dmapper=schedule.dmapper, rmapper=schedule.rmapper) |
def lower_schedule(cluster, schedule, sregistry, options):
'\n Turn a Schedule into a sequence of Clusters.\n '
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
make = Array
clusters = []
subs = {}
for (alias, writeto, ispace, aliaseds, indicess) in schedule:
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
dimensions = [(d.parent if d.is_Sub else d) for d in writeto.itdimensions]
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
indices = []
for i in writeto:
try:
sub_iterators = writeto.sub_iterators[i.dim]
assert (len(sub_iterators) == 1)
indices.append(sub_iterators[0])
except KeyError:
indices.append((i.dim - i.lower))
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = (lambda idx: obj[idx])
else:
assert (writeto.size == 0)
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = (lambda idx: obj)
subs.update({aliased: callback(indices) for (aliased, indices) in zip(aliaseds, indicess)})
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed for (k, v) in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
properties = dict(cluster.properties)
for (d, v) in cluster.properties.items():
if any((i.is_Modulo for i in ispace.sub_iterators[d])):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif (d not in writeto.dimensions):
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace, dspace=dspace, properties=properties))
return (clusters, subs) | -1,341,575,166,606,335,700 | Turn a Schedule into a sequence of Clusters. | devito/passes/clusters/aliases.py | lower_schedule | ccuetom/devito | python | def lower_schedule(cluster, schedule, sregistry, options):
'\n \n '
ftemps = options['cire-ftemps']
if ftemps:
make = TempFunction
else:
make = Array
clusters = []
subs = {}
for (alias, writeto, ispace, aliaseds, indicess) in schedule:
name = sregistry.make_name()
dtype = cluster.dtype
if writeto:
dimensions = [(d.parent if d.is_Sub else d) for d in writeto.itdimensions]
halo = [(abs(i.lower), abs(i.upper)) for i in writeto]
indices = []
for i in writeto:
try:
sub_iterators = writeto.sub_iterators[i.dim]
assert (len(sub_iterators) == 1)
indices.append(sub_iterators[0])
except KeyError:
indices.append((i.dim - i.lower))
obj = make(name=name, dimensions=dimensions, halo=halo, dtype=dtype)
expression = Eq(obj[indices], alias)
callback = (lambda idx: obj[idx])
else:
assert (writeto.size == 0)
obj = Symbol(name=name, dtype=dtype)
expression = Eq(obj, alias)
callback = (lambda idx: obj)
subs.update({aliased: callback(indices) for (aliased, indices) in zip(aliaseds, indicess)})
accesses = detect_accesses(expression)
parts = {k: IntervalGroup(build_intervals(v)).add(ispace.intervals).relaxed for (k, v) in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
properties = dict(cluster.properties)
for (d, v) in cluster.properties.items():
if any((i.is_Modulo for i in ispace.sub_iterators[d])):
properties[d] = normalize_properties(v, {SEQUENTIAL})
elif (d not in writeto.dimensions):
properties[d] = normalize_properties(v, {PARALLEL_IF_PVT})
clusters.append(cluster.rebuild(exprs=expression, ispace=ispace, dspace=dspace, properties=properties))
return (clusters, subs) |
def pick_best(variants):
'\n Use the variant score and heuristics to return the variant with the best\n trade-off between operation count reduction and working set increase.\n '
best = variants.pop(0)
for i in variants:
(best_flop_score, best_ws_score) = best.score
if (best_flop_score == 0):
best = i
continue
(i_flop_score, i_ws_score) = i.score
delta = (i_ws_score - best_ws_score)
if (((delta > 0) and ((i_flop_score / best_flop_score) > 100)) or ((delta == 0) and (i_flop_score > best_flop_score)) or ((delta < 0) and ((best_flop_score / i_flop_score) <= 100))):
best = i
(schedule, exprs, _) = best
return (schedule, exprs) | 1,714,703,760,381,377,500 | Use the variant score and heuristics to return the variant with the best
trade-off between operation count reduction and working set increase. | devito/passes/clusters/aliases.py | pick_best | ccuetom/devito | python | def pick_best(variants):
'\n Use the variant score and heuristics to return the variant with the best\n trade-off between operation count reduction and working set increase.\n '
best = variants.pop(0)
for i in variants:
(best_flop_score, best_ws_score) = best.score
if (best_flop_score == 0):
best = i
continue
(i_flop_score, i_ws_score) = i.score
delta = (i_ws_score - best_ws_score)
if (((delta > 0) and ((i_flop_score / best_flop_score) > 100)) or ((delta == 0) and (i_flop_score > best_flop_score)) or ((delta < 0) and ((best_flop_score / i_flop_score) <= 100))):
best = i
(schedule, exprs, _) = best
return (schedule, exprs) |
def rebuild(cluster, exprs, subs, schedule):
'\n Plug the optimized aliases into the input Cluster. This leads to creating\n a new Cluster with suitable IterationSpace and DataSpace.\n '
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed for (k, v) in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace) | -6,467,477,386,370,209,000 | Plug the optimized aliases into the input Cluster. This leads to creating
a new Cluster with suitable IterationSpace and DataSpace. | devito/passes/clusters/aliases.py | rebuild | ccuetom/devito | python | def rebuild(cluster, exprs, subs, schedule):
'\n Plug the optimized aliases into the input Cluster. This leads to creating\n a new Cluster with suitable IterationSpace and DataSpace.\n '
exprs = [uxreplace(e, subs) for e in exprs]
ispace = cluster.ispace.augment(schedule.dmapper)
ispace = ispace.augment(schedule.rmapper)
accesses = detect_accesses(exprs)
parts = {k: IntervalGroup(build_intervals(v)).relaxed for (k, v) in accesses.items() if k}
dspace = DataSpace(cluster.dspace.intervals, parts)
return cluster.rebuild(exprs=exprs, ispace=ispace, dspace=dspace) |
def make_rotations_table(d, v):
'\n All possible rotations of `range(v+1)`.\n '
m = np.array([[((j - i) if (j > i) else 0) for j in range((v + 1))] for i in range((v + 1))])
m = (m - m.T)[::(- 1), :]
m = np.roll(m, int((- np.floor((v / 2)))), axis=0)
m = [Interval(d, min(i), max(i)) for i in m]
return m | 2,894,974,119,365,010,400 | All possible rotations of `range(v+1)`. | devito/passes/clusters/aliases.py | make_rotations_table | ccuetom/devito | python | def make_rotations_table(d, v):
'\n \n '
m = np.array([[((j - i) if (j > i) else 0) for j in range((v + 1))] for i in range((v + 1))])
m = (m - m.T)[::(- 1), :]
m = np.roll(m, int((- np.floor((v / 2)))), axis=0)
m = [Interval(d, min(i), max(i)) for i in m]
return m |
def cit(ispace0, ispace1):
'\n The Common IterationIntervals of two IterationSpaces.\n '
found = []
for (it0, it1) in zip(ispace0.itintervals, ispace1.itintervals):
if (it0 == it1):
found.append(it0)
else:
break
return tuple(found) | 1,056,912,240,640,778,400 | The Common IterationIntervals of two IterationSpaces. | devito/passes/clusters/aliases.py | cit | ccuetom/devito | python | def cit(ispace0, ispace1):
'\n \n '
found = []
for (it0, it1) in zip(ispace0.itintervals, ispace1.itintervals):
if (it0 == it1):
found.append(it0)
else:
break
return tuple(found) |
def maybe_coeff_key(grid, expr):
'\n True if `expr` could be the coefficient of an FD derivative, False otherwise.\n '
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(((not (set(grid.dimensions) <= set(i.function.dimensions))) for i in indexeds)) | -365,349,668,373,705,540 | True if `expr` could be the coefficient of an FD derivative, False otherwise. | devito/passes/clusters/aliases.py | maybe_coeff_key | ccuetom/devito | python | def maybe_coeff_key(grid, expr):
'\n \n '
if expr.is_Number:
return True
indexeds = [i for i in expr.free_symbols if i.is_Indexed]
return any(((not (set(grid.dimensions) <= set(i.function.dimensions))) for i in indexeds)) |
def wset(exprs):
'\n Extract the working set out of a set of equations.\n '
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)]) if i.function.is_AbstractFunction} | 2,398,974,317,648,662,000 | Extract the working set out of a set of equations. | devito/passes/clusters/aliases.py | wset | ccuetom/devito | python | def wset(exprs):
'\n \n '
return {i.function for i in flatten([e.free_symbols for e in as_tuple(exprs)]) if i.function.is_AbstractFunction} |
def potential_max_deriv_order(exprs):
'\n The maximum FD derivative order in a list of expressions.\n '
nadds = (lambda e: ((int(e.is_Add) + max([nadds(a) for a in e.args], default=0)) if (not q_leaf(e)) else 0))
return max([nadds(e) for e in exprs], default=0) | -7,012,757,534,154,908,000 | The maximum FD derivative order in a list of expressions. | devito/passes/clusters/aliases.py | potential_max_deriv_order | ccuetom/devito | python | def potential_max_deriv_order(exprs):
'\n \n '
nadds = (lambda e: ((int(e.is_Add) + max([nadds(a) for a in e.args], default=0)) if (not q_leaf(e)) else 0))
return max([nadds(e) for e in exprs], default=0) |
def search_potential_deriv(expr, n, c=0):
'\n Retrieve the expressions at depth `n` that potentially stem from FD derivatives.\n '
assert (n >= c >= 0)
if (q_leaf(expr) or expr.is_Pow):
return []
elif expr.is_Mul:
if (c == n):
return [expr]
else:
return flatten([search_potential_deriv(a, n, (c + 1)) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args]) | -8,169,505,188,614,247,000 | Retrieve the expressions at depth `n` that potentially stem from FD derivatives. | devito/passes/clusters/aliases.py | search_potential_deriv | ccuetom/devito | python | def search_potential_deriv(expr, n, c=0):
'\n \n '
assert (n >= c >= 0)
if (q_leaf(expr) or expr.is_Pow):
return []
elif expr.is_Mul:
if (c == n):
return [expr]
else:
return flatten([search_potential_deriv(a, n, (c + 1)) for a in expr.args])
else:
return flatten([search_potential_deriv(a, n, c) for a in expr.args]) |
def translated(self, other):
'\n True if ``self`` is translated w.r.t. ``other``, False otherwise.\n\n Examples\n --------\n Two candidates are translated if their bases are the same and\n their offsets are pairwise translated.\n\n c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}\n u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}\n\n Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`\n '
if (len(self.Toffsets) != len(other.Toffsets)):
return False
if (len(self.bases) != len(other.bases)):
return False
if any(((b0 != b1) for (b0, b1) in zip(self.bases, other.bases))):
return False
for ((d0, o0), (d1, o1)) in zip(self.Toffsets, other.Toffsets):
if (d0 is not d1):
return False
distance = set((o0 - o1))
if (len(distance) != 1):
return False
return True | -8,815,069,943,959,899,000 | True if ``self`` is translated w.r.t. ``other``, False otherwise.
Examples
--------
Two candidates are translated if their bases are the same and
their offsets are pairwise translated.
c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}
u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}
Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}` | devito/passes/clusters/aliases.py | translated | ccuetom/devito | python | def translated(self, other):
'\n True if ``self`` is translated w.r.t. ``other``, False otherwise.\n\n Examples\n --------\n Two candidates are translated if their bases are the same and\n their offsets are pairwise translated.\n\n c := A[i,j] op A[i,j+1] -> Toffsets = {i: [0,0], j: [0,1]}\n u := A[i+1,j] op A[i+1,j+1] -> Toffsets = {i: [1,1], j: [0,1]}\n\n Then `c` is translated w.r.t. `u` with distance `{i: 1, j: 0}`\n '
if (len(self.Toffsets) != len(other.Toffsets)):
return False
if (len(self.bases) != len(other.bases)):
return False
if any(((b0 != b1) for (b0, b1) in zip(self.bases, other.bases))):
return False
for ((d0, o0), (d1, o1)) in zip(self.Toffsets, other.Toffsets):
if (d0 is not d1):
return False
distance = set((o0 - o1))
if (len(distance) != 1):
return False
return True |
def find_rotation_distance(self, d, interval):
'\n The distance from the Group pivot of a rotation along Dimension ``d`` that\n can safely iterate over the ``interval``.\n '
assert (d is interval.dim)
for (rotation, distance) in self._pivot_legal_rotations[d]:
if (rotation.union(interval) != rotation):
continue
min_interval = self._pivot_min_intervals[d].translate((- distance))
if (interval.union(min_interval) == interval):
return distance
return None | -2,571,492,589,286,373,400 | The distance from the Group pivot of a rotation along Dimension ``d`` that
can safely iterate over the ``interval``. | devito/passes/clusters/aliases.py | find_rotation_distance | ccuetom/devito | python | def find_rotation_distance(self, d, interval):
'\n The distance from the Group pivot of a rotation along Dimension ``d`` that\n can safely iterate over the ``interval``.\n '
assert (d is interval.dim)
for (rotation, distance) in self._pivot_legal_rotations[d]:
if (rotation.union(interval) != rotation):
continue
min_interval = self._pivot_min_intervals[d].translate((- distance))
if (interval.union(min_interval) == interval):
return distance
return None |
@cached_property
def diameter(self):
'\n The size of the iteration space required to evaluate all aliasing expressions\n in this Group, along each Dimension.\n '
ret = defaultdict(int)
for i in self.Toffsets:
for (d, v) in i:
try:
distance = int((max(v) - min(v)))
except TypeError:
if (len(set(v)) == 1):
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret | 2,070,731,786,563,981,600 | The size of the iteration space required to evaluate all aliasing expressions
in this Group, along each Dimension. | devito/passes/clusters/aliases.py | diameter | ccuetom/devito | python | @cached_property
def diameter(self):
'\n The size of the iteration space required to evaluate all aliasing expressions\n in this Group, along each Dimension.\n '
ret = defaultdict(int)
for i in self.Toffsets:
for (d, v) in i:
try:
distance = int((max(v) - min(v)))
except TypeError:
if (len(set(v)) == 1):
continue
else:
raise ValueError
ret[d] = max(ret[d], distance)
return ret |
@property
def pivot(self):
'\n A deterministically chosen Candidate for this Group.\n '
return self[0] | 4,219,656,167,425,094,700 | A deterministically chosen Candidate for this Group. | devito/passes/clusters/aliases.py | pivot | ccuetom/devito | python | @property
def pivot(self):
'\n \n '
return self[0] |
@cached_property
def _pivot_legal_rotations(self):
'\n All legal rotations along each Dimension for the Group pivot.\n '
ret = {}
for (d, (maxd, mini)) in self._pivot_legal_shifts.items():
v = (mini - maxd)
m = make_rotations_table(d, v)
distances = []
for rotation in m:
distance = (maxd - rotation.lower)
assert (distance == (mini - rotation.upper))
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret | -5,917,378,959,376,706,000 | All legal rotations along each Dimension for the Group pivot. | devito/passes/clusters/aliases.py | _pivot_legal_rotations | ccuetom/devito | python | @cached_property
def _pivot_legal_rotations(self):
'\n \n '
ret = {}
for (d, (maxd, mini)) in self._pivot_legal_shifts.items():
v = (mini - maxd)
m = make_rotations_table(d, v)
distances = []
for rotation in m:
distance = (maxd - rotation.lower)
assert (distance == (mini - rotation.upper))
distances.append(distance)
ret[d] = list(zip(m, distances))
return ret |
@cached_property
def _pivot_min_intervals(self):
'\n The minimum Interval along each Dimension such that by evaluating the\n pivot, all Candidates are evaluated too.\n '
c = self.pivot
ret = defaultdict((lambda : [np.inf, (- np.inf)]))
for i in self:
distance = [o.distance(v) for (o, v) in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for (d, v) in LabeledVector.transpose(*distance)]
for (d, v) in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for (d, (m, M)) in ret.items()}
return ret | -5,017,529,699,877,890,000 | The minimum Interval along each Dimension such that by evaluating the
pivot, all Candidates are evaluated too. | devito/passes/clusters/aliases.py | _pivot_min_intervals | ccuetom/devito | python | @cached_property
def _pivot_min_intervals(self):
'\n The minimum Interval along each Dimension such that by evaluating the\n pivot, all Candidates are evaluated too.\n '
c = self.pivot
ret = defaultdict((lambda : [np.inf, (- np.inf)]))
for i in self:
distance = [o.distance(v) for (o, v) in zip(i.offsets, c.offsets)]
distance = [(d, set(v)) for (d, v) in LabeledVector.transpose(*distance)]
for (d, v) in distance:
value = v.pop()
ret[d][0] = min(ret[d][0], value)
ret[d][1] = max(ret[d][1], value)
ret = {d: Interval(d, m, M) for (d, (m, M)) in ret.items()}
return ret |
@cached_property
def _pivot_legal_shifts(self):
'\n The max decrement and min increment along each Dimension such that the\n Group pivot does not go OOB.\n '
c = self.pivot
ret = defaultdict((lambda : ((- np.inf), np.inf)))
for (i, ofs) in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
hsize = sum(f._size_halo[l])
(lower, upper) = c.shifts[l].offsets
try:
maxd = min(0, max(ret[l][0], ((- ofs[l]) - lower)))
mini = max(0, min(ret[l][1], ((hsize - ofs[l]) - upper)))
ret[l] = (maxd, mini)
except TypeError:
ret[l] = (0, 0)
return ret | 4,179,159,710,888,336,400 | The max decrement and min increment along each Dimension such that the
Group pivot does not go OOB. | devito/passes/clusters/aliases.py | _pivot_legal_shifts | ccuetom/devito | python | @cached_property
def _pivot_legal_shifts(self):
'\n The max decrement and min increment along each Dimension such that the\n Group pivot does not go OOB.\n '
c = self.pivot
ret = defaultdict((lambda : ((- np.inf), np.inf)))
for (i, ofs) in zip(c.indexeds, c.offsets):
f = i.function
for l in ofs.labels:
hsize = sum(f._size_halo[l])
(lower, upper) = c.shifts[l].offsets
try:
maxd = min(0, max(ret[l][0], ((- ofs[l]) - lower)))
mini = max(0, min(ret[l][1], ((hsize - ofs[l]) - upper)))
ret[l] = (maxd, mini)
except TypeError:
ret[l] = (0, 0)
return ret |
def merge_dicts(a, b):
'\nMerge two dictionaries. If there is a key collision, `b` overrides `a`.\n :param a: Dictionary of default settings\n :param b: Dictionary of override settings\n :rtype : dict\n '
try:
a.update(b)
except Exception as exc:
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n{0}\n\nDictionary B:\n\n{1}\n\nException: {2}'.format(a, b, exc))
return a | -2,853,009,548,386,194,400 | Merge two dictionaries. If there is a key collision, `b` overrides `a`.
:param a: Dictionary of default settings
:param b: Dictionary of override settings
:rtype : dict | MasterScripts/systemprep-linuxmaster.py | merge_dicts | plus3it/SystemPrep | python | def merge_dicts(a, b):
'\nMerge two dictionaries. If there is a key collision, `b` overrides `a`.\n :param a: Dictionary of default settings\n :param b: Dictionary of override settings\n :rtype : dict\n '
try:
a.update(b)
except Exception as exc:
raise SystemError('Failed to merge dictionaries. Dictionary A:\n\n{0}\n\nDictionary B:\n\n{1}\n\nException: {2}'.format(a, b, exc))
return a |
def get_scripts_to_execute(system, workingdir, **scriptparams):
"\nReturns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.\n'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.\n'Parameters' is a hashtable of parameters to pass to the script.\nUse `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.\n :param system: str, the system type as returned from `platform.system`\n :param workingdir: str, the working directory where content should be saved\n :param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts\n :rtype : dict\n "
if ('Linux' in system):
scriptstoexecute = ({'ScriptSource': 'https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py', 'Parameters': merge_dicts({'yumrepomap': [{'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo', 'dist': 'amazon', 'epel_version': '6'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo', 'dist': 'redhat', 'epel_version': '6'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo', 'dist': 'centos', 'epel_version': '6'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo', 'dist': 'redhat', 'epel_version': '7'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo', 'dist': 'centos', 'epel_version': '7'}]}, scriptparams)}, {'ScriptSource': 'https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py', 'Parameters': merge_dicts({'saltinstallmethod': 'yum', 'saltcontentsource': 'https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip', 'formulastoinclude': ['https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip', 'https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip'], 'formulaterminationstrings': ['-master', '-latest'], 'saltstates': 'Highstate', 'entenv': 'False', 'salt_results_log': '/var/log/saltcall.results.log', 'salt_debug_log': '/var/log/saltcall.debug.log', 'sourceiss3bucket': 'True'}, scriptparams)})
elif ('Windows' in system):
scriptstoexecute = ({'ScriptSource': 'https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1', 'Parameters': merge_dicts({'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir), 'saltcontentsource': 'https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip', 'formulastoinclude': ['https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip'], 'formulaterminationstrings': ['-latest'], 'ashrole': 'MemberServer', 'entenv': 'False', 'saltstates': 'Highstate'}, scriptparams)},)
else:
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute | -2,843,389,748,017,365,000 | Returns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.
'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.
'Parameters' is a hashtable of parameters to pass to the script.
Use `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.
:param system: str, the system type as returned from `platform.system`
:param workingdir: str, the working directory where content should be saved
:param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts
:rtype : dict | MasterScripts/systemprep-linuxmaster.py | get_scripts_to_execute | plus3it/SystemPrep | python | def get_scripts_to_execute(system, workingdir, **scriptparams):
"\nReturns an array of hashtables. Each hashtable has two keys: 'ScriptUrl' and 'Parameters'.\n'ScriptSource' is the path to the script to be executed. Only supports http/s sources currently.\n'Parameters' is a hashtable of parameters to pass to the script.\nUse `merge_dicts({yourdict}, scriptparams)` to merge command line parameters with a set of default parameters.\n :param system: str, the system type as returned from `platform.system`\n :param workingdir: str, the working directory where content should be saved\n :param scriptparams: dict, parameters passed to the master script which should be relayed to the content scripts\n :rtype : dict\n "
if ('Linux' in system):
scriptstoexecute = ({'ScriptSource': 'https://systemprep.s3.amazonaws.com/ContentScripts/systemprep-linuxyumrepoinstall.py', 'Parameters': merge_dicts({'yumrepomap': [{'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-amzn.repo', 'dist': 'amazon', 'epel_version': '6'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo', 'dist': 'redhat', 'epel_version': '6'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el6.repo', 'dist': 'centos', 'epel_version': '6'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo', 'dist': 'redhat', 'epel_version': '7'}, {'url': 'https://s3.amazonaws.com/systemprep-repo/linux/saltstack/salt/yum.repos/salt-reposync-el7.repo', 'dist': 'centos', 'epel_version': '7'}]}, scriptparams)}, {'ScriptSource': 'https://systemprep.s3.amazonaws.com/ContentScripts/SystemPrep-LinuxSaltInstall.py', 'Parameters': merge_dicts({'saltinstallmethod': 'yum', 'saltcontentsource': 'https://systemprep-content.s3.amazonaws.com/linux/salt/salt-content.zip', 'formulastoinclude': ['https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/ash-linux-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/join-domain-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/scc-formula-master.zip', 'https://s3.amazonaws.com/salt-formulas/name-computer-formula-master.zip'], 'formulaterminationstrings': ['-master', '-latest'], 'saltstates': 'Highstate', 'entenv': 'False', 'salt_results_log': '/var/log/saltcall.results.log', 'salt_debug_log': '/var/log/saltcall.debug.log', 'sourceiss3bucket': 'True'}, scriptparams)})
elif ('Windows' in system):
scriptstoexecute = ({'ScriptSource': 'https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/SystemPrep-WindowsSaltInstall.ps1', 'Parameters': merge_dicts({'saltworkingdir': '{0}\\SystemContent\\Windows\\Salt'.format(workingdir), 'saltcontentsource': 'https://systemprep.s3.amazonaws.com/SystemContent/Windows/Salt/salt-content.zip', 'formulastoinclude': ['https://salt-formulas.s3.amazonaws.com/systemprep-formula-master.zip', 'https://salt-formulas.s3.amazonaws.com/ash-windows-formula-master.zip'], 'formulaterminationstrings': ['-latest'], 'ashrole': 'MemberServer', 'entenv': 'False', 'saltstates': 'Highstate'}, scriptparams)},)
else:
raise SystemError('System, {0}, is not recognized?'.format(system))
return scriptstoexecute |
def create_working_dir(basedir, dirprefix):
'\nCreates a directory in `basedir` with a prefix of `dirprefix`.\nThe directory will have a random 5 character string appended to `dirprefix`.\nReturns the path to the working directory.\n :rtype : str\n :param basedir: str, the directory in which to create the working directory\n :param dirprefix: str, prefix to prepend to the working directory\n '
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
raise SystemError('Could not create workingdir in {0}.\nException: {1}'.format(basedir, exc))
return workingdir | 3,796,915,593,746,301,000 | Creates a directory in `basedir` with a prefix of `dirprefix`.
The directory will have a random 5 character string appended to `dirprefix`.
Returns the path to the working directory.
:rtype : str
:param basedir: str, the directory in which to create the working directory
:param dirprefix: str, prefix to prepend to the working directory | MasterScripts/systemprep-linuxmaster.py | create_working_dir | plus3it/SystemPrep | python | def create_working_dir(basedir, dirprefix):
'\nCreates a directory in `basedir` with a prefix of `dirprefix`.\nThe directory will have a random 5 character string appended to `dirprefix`.\nReturns the path to the working directory.\n :rtype : str\n :param basedir: str, the directory in which to create the working directory\n :param dirprefix: str, prefix to prepend to the working directory\n '
workingdir = None
try:
workingdir = tempfile.mkdtemp(prefix=dirprefix, dir=basedir)
except Exception as exc:
raise SystemError('Could not create workingdir in {0}.\nException: {1}'.format(basedir, exc))
return workingdir |
def get_system_params(system):
'\nReturns a dictionary of OS platform-specific parameters.\n :param system: str, the system type as returned by `platform.system`\n :rtype : dict\n '
a = {}
workingdirprefix = 'systemprep-'
if ('Linux' in system):
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif ('Windows' in system):
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\\system32\\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a | 123,622,459,867,249,700 | Returns a dictionary of OS platform-specific parameters.
:param system: str, the system type as returned by `platform.system`
:rtype : dict | MasterScripts/systemprep-linuxmaster.py | get_system_params | plus3it/SystemPrep | python | def get_system_params(system):
'\nReturns a dictionary of OS platform-specific parameters.\n :param system: str, the system type as returned by `platform.system`\n :rtype : dict\n '
a = {}
workingdirprefix = 'systemprep-'
if ('Linux' in system):
tempdir = '/usr/tmp/'
a['pathseparator'] = '/'
a['readyfile'] = '/var/run/system-is-ready'
a['restart'] = 'shutdown -r +1 &'
elif ('Windows' in system):
systemroot = os.environ['SYSTEMROOT']
systemdrive = os.environ['SYSTEMDRIVE']
tempdir = os.environ['TEMP']
a['pathseparator'] = '\\'
a['readyfile'] = '{0}\\system-is-ready'.format(systemdrive)
a['restart'] = '{0}\\system32\\shutdown.exe/r /t 30 /d p:2:4 /c "SystemPrep complete. Rebooting computer."'.format(systemroot)
else:
raise SystemError('System, {0}, is not recognized?'.format(system))
a['workingdir'] = create_working_dir(tempdir, workingdirprefix)
return a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.