content
stringlengths 5
1.05M
|
---|
import json
import os
_data = {"files": []}
_directory = os.path.expanduser('~')
_bpl_file = '/.bpl'
def set_working_dir(dir):
os.chdir(dir)
def add_file(name):
_data["files"].append({"name": name})
def add_breakpoint(file_name, bp):
contained = False
for file in _data["files"]:
if file["name"] == file_name:
file["breakpoints"].append(bp)
contained = True
if not contained:
file_json = {"name": file_name, "breakpoints": [bp]}
_data["files"].append(file_json)
def to_json():
return json.dumps(_data)
def read():
global _data
file_path = str(_directory + _bpl_file)
if os.path.isfile(file_path):
f = open(file_path, 'r')
with f:
bpl_json = f.read()
data = json.loads(bpl_json)
_data = data
return data
def get_files():
return _data["files"]
def get_breakpoints(file_name):
for file in _data["files"]:
if file["name"] == file_name:
return file["breakpoints"]
return []
def set_breakpoints(file_name, bps):
contained = False
for file in _data["files"]:
if file["name"] == file_name:
file["breakpoints"] = bps
contained = True
if not contained:
file_json = {"name": file_name, "breakpoints": bps}
_data["files"].append(file_json)
def save():
file_path = str(_directory + _bpl_file)
f = open(file_path, 'w')
with f:
f.write(json.dumps(_data))
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import os
import time
from time import sleep, time as get_time
import json
from threading import Thread
import speech_recognition as sr
import pyaudio
from pyee import EventEmitter
from requests import RequestException
from requests.exceptions import ConnectionError
from mycroft_voice_satellite.configuration import CONFIGURATION
from mycroft_voice_satellite.speech.mic import MutableMicrophone, \
ResponsiveRecognizer
from ovos_plugin_manager.wakewords import OVOSWakeWordFactory
from ovos_plugin_manager.stt import OVOSSTTFactory
from queue import Queue, Empty
from ovos_utils.log import LOG
from mycroft_voice_satellite.playback import play_audio, play_mp3, play_ogg, \
play_wav, resolve_resource_file
MAX_MIC_RESTARTS = 20
AUDIO_DATA = 0
STREAM_START = 1
STREAM_DATA = 2
STREAM_STOP = 3
def find_input_device(device_name):
""" Find audio input device by name.
Arguments:
device_name: device name or regex pattern to match
Returns: device_index (int) or None if device wasn't found
"""
LOG.info('Searching for input device: {}'.format(device_name))
LOG.debug('Devices: ')
pa = pyaudio.PyAudio()
pattern = re.compile(device_name)
for device_index in range(pa.get_device_count()):
dev = pa.get_device_info_by_index(device_index)
LOG.debug(' {}'.format(dev['name']))
if dev['maxInputChannels'] > 0 and pattern.match(dev['name']):
LOG.debug(' ^-- matched')
return device_index
return None
class AudioStreamHandler:
def __init__(self, queue):
self.queue = queue
def stream_start(self):
self.queue.put((STREAM_START, None, None))
def stream_chunk(self, chunk, source=None):
self.queue.put((STREAM_DATA, chunk, source))
def stream_stop(self):
self.queue.put((STREAM_STOP, None, None))
class AudioProducer(Thread):
"""AudioProducer
Given a mic and a recognizer implementation, continuously listens to the
mic for potential speech chunks and pushes them onto the queue.
"""
def __init__(self, state, queue, mic, recognizer, emitter, stream_handler):
super(AudioProducer, self).__init__()
self.daemon = True
self.state = state
self.queue = queue
self.mic = mic
self.recognizer = recognizer
self.emitter = emitter
self.stream_handler = stream_handler
def run(self):
restart_attempts = 0
with self.mic as source:
LOG.info("Adjusting for ambient noise, be silent!!!")
self.recognizer.adjust_for_ambient_noise(source)
LOG.info("Ambient noise profile has been created")
while self.state.running:
try:
audio, language = self.recognizer.listen(source, self.emitter,
self.stream_handler)
if audio is not None:
self.queue.put((AUDIO_DATA, audio, source, language))
else:
LOG.warning("Audio contains no data.")
except IOError as e:
# IOError will be thrown if the read is unsuccessful.
# If self.recognizer.overflow_exc is False (default)
# input buffer overflow IOErrors due to not consuming the
# buffers quickly enough will be silently ignored.
LOG.exception('IOError Exception in AudioProducer')
if e.errno == pyaudio.paInputOverflowed:
pass # Ignore overflow errors
elif restart_attempts < MAX_MIC_RESTARTS:
# restart the mic
restart_attempts += 1
LOG.info('Restarting the microphone...')
source.restart()
LOG.info('Restarted...')
else:
LOG.error('Restarting mic doesn\'t seem to work. '
'Stopping...')
raise
except Exception:
LOG.exception('Exception in AudioProducer')
raise
else:
# Reset restart attempt counter on sucessful audio read
restart_attempts = 0
finally:
if self.stream_handler is not None:
self.stream_handler.stream_stop()
def stop(self):
"""Stop producer thread."""
self.state.running = False
self.recognizer.stop()
class AudioConsumer(Thread):
"""AudioConsumer
Consumes AudioData chunks off the queue
"""
# In seconds, the minimum audio size to be sent to remote STT
MIN_AUDIO_SIZE = 0.5
def __init__(self, state, queue, emitter, stt, wakeup_recognizer):
super(AudioConsumer, self).__init__()
self.daemon = True
self.queue = queue
self.state = state
self.emitter = emitter
self.stt = stt
self.wakeup_recognizer = wakeup_recognizer
data_path = os.path.expanduser(CONFIGURATION["data_dir"])
listener_config = CONFIGURATION["listener"]
self.save_utterances = listener_config.get('record_utterances', False)
self.saved_utterances_dir = os.path.join(data_path, 'utterances')
if not os.path.isdir(data_path):
os.makedirs(data_path)
if not os.path.isdir(self.saved_utterances_dir):
os.makedirs(self.saved_utterances_dir)
def run(self):
while self.state.running:
self.read()
def read(self):
try:
audio = self.queue.get(timeout=0.5)
except Empty:
return
if audio is None:
return
tag, data, source, language = audio
if tag == AUDIO_DATA:
if data is not None:
if self.state.sleeping:
self.wake_up(data)
else:
self.process(data, source, language)
elif tag == STREAM_START:
self.stt.stream_start()
elif tag == STREAM_DATA:
self.stt.stream_data(data)
elif tag == STREAM_STOP:
self.stt.stream_stop()
else:
LOG.error("Unknown audio queue type %r" % audio)
def wake_up(self, audio):
if self.wakeup_recognizer.found_wake_word(audio.frame_data):
self.state.sleeping = False
self.emitter.emit('recognizer_loop:awoken')
@staticmethod
def _audio_length(audio):
return float(len(audio.frame_data)) / (
audio.sample_rate * audio.sample_width)
# TODO: Localization
def process(self, audio, source=None, language=None):
if source:
LOG.debug("Muting microphone during STT")
source.mute()
if self._audio_length(audio) < self.MIN_AUDIO_SIZE:
LOG.warning("Audio too short to be processed")
else:
transcription = self.transcribe(audio, language=language)
if transcription:
# STT succeeded, send the transcribed speech on for processing
payload = {
'utterances': [transcription],
'lang': language or self.stt.lang
}
self.emitter.emit("recognizer_loop:utterance", payload)
if source:
LOG.debug("Unmuting microphone")
source.unmute()
def _compile_metadata(self, utterance):
timestamp = str(int(1000 * get_time()))
if utterance:
name = utterance.replace(" ", "_").lower() + "_" + timestamp + ".wav"
else:
name = "UNK_" + timestamp + ".wav"
return {
'name': name,
'transcript': utterance,
'engine': self.stt.__class__.__name__,
'time': timestamp
}
@staticmethod
def play_error():
# If enabled, play a wave file with a short sound to audibly
# indicate speech recognition failed
sound = CONFIGURATION["listener"].get('error_sound')
audio_file = resolve_resource_file(sound)
if audio_file:
try:
if audio_file.endswith(".wav"):
play_wav(audio_file).wait()
elif audio_file.endswith(".mp3"):
play_mp3(audio_file).wait()
elif audio_file.endswith(".ogg"):
play_ogg(audio_file).wait()
else:
play_audio(audio_file).wait()
except Exception as e:
LOG.warning(e)
def save_utt(self, text, audio):
if self.save_utterances:
LOG.debug("saving utterance")
mtd = self._compile_metadata(text)
filename = os.path.join(self.saved_utterances_dir, mtd["name"])
with open(filename, 'wb') as f:
f.write(audio.get_wav_data())
filename = os.path.join(self.saved_utterances_dir,
mtd["name"].replace(".wav", ".json"))
with open(filename, 'w') as f:
json.dump(mtd, f, indent=4)
def transcribe(self, audio, language=None):
def send_unknown_intent():
""" Send message that nothing was transcribed. """
self.emitter.emit('recognizer_loop:speech.recognition.unknown')
try:
# Invoke the STT engine on the audio clip
text = self.stt.execute(audio, language = language)
if text is not None:
text = text.lower().strip()
LOG.debug("STT: " + text)
else:
send_unknown_intent()
LOG.info('no words were transcribed')
self.save_utt(text, audio)
return text
except sr.RequestError as e:
LOG.error("Could not request Speech Recognition {0}".format(e))
except ConnectionError as e:
LOG.error("Connection Error: {0}".format(e))
self.emitter.emit("recognizer_loop:no_internet")
except RequestException as e:
LOG.error(e.__class__.__name__ + ': ' + str(e))
except sr.UnknownValueError:
LOG.error("Speech Recognition could not understand audio")
except Exception as e:
send_unknown_intent()
LOG.exception(e)
LOG.error("Speech Recognition Error")
self.play_error()
self.save_utt("", audio)
return None
class RecognizerLoopState:
def __init__(self):
self.running = False
self.sleeping = False
class RecognizerLoop(EventEmitter):
""" EventEmitter loop running speech recognition.
Local wake word recognizer and remote general speech recognition.
"""
def __init__(self, config=None):
super(RecognizerLoop, self).__init__()
self.mute_calls = 0
self.config = config or CONFIGURATION
self._load_config(config)
def _load_config(self, config=None):
"""Load configuration parameters from configuration."""
config = config or self.config
self.config_core = config
self.lang = config.get('lang', 'en-us')
self.config = config.get('listener')
rate = self.config.get('sample_rate')
device_index = self.config.get('device_index')
device_name = self.config.get('device_name')
if not device_index and device_name:
device_index = find_input_device(device_name)
LOG.debug('Using microphone (None = default): ' + str(device_index))
self.microphone = MutableMicrophone(device_index, rate,
mute=self.mute_calls > 0)
# TODO - localization
self.wakeup_recognizer = self.create_wakeup_recognizer()
self.hotword_engines = {}
self.create_hotword_engines()
self.responsive_recognizer = ResponsiveRecognizer(self.hotword_engines)
self.state = RecognizerLoopState()
def create_hotword_engines(self):
LOG.info("creating hotword engines")
hot_words = self.config_core.get("hotwords", {})
for word in hot_words:
data = hot_words[word]
if word == self.wakeup_recognizer.key_phrase \
or not data.get("active", True):
continue
sound = data.get("sound")
utterance = data.get("utterance")
listen = data.get("listen", False)
lang = data.get("lang", self.lang)
engine = OVOSWakeWordFactory.create_hotword(word,
loop=self,
config=hot_words,
lang=lang)
self.hotword_engines[word] = {"engine": engine,
"sound": sound,
"utterance": utterance,
"listen": listen,
"lang": lang}
def create_wakeup_recognizer(self):
LOG.info("creating stand up word engine")
word = self.config.get("stand_up_word", "wake up")
return OVOSWakeWordFactory.create_hotword(word, lang=self.lang,
config=self.config_core,
loop=self)
def start_async(self):
"""Start consumer and producer threads."""
self.state.running = True
stt = OVOSSTTFactory.create(self.config_core)
queue = Queue()
stream_handler = None
if stt.can_stream:
stream_handler = AudioStreamHandler(queue)
LOG.debug("Using STT engine: " + stt.__class__.__name__)
self.producer = AudioProducer(self.state, queue, self.microphone,
self.responsive_recognizer, self,
stream_handler)
self.producer.start()
self.consumer = AudioConsumer(self.state, queue, self,
stt, self.wakeup_recognizer)
self.consumer.start()
def stop(self):
self.state.running = False
self.producer.stop()
# wait for threads to shutdown
self.producer.join()
self.consumer.join()
def mute(self):
"""Mute microphone and increase number of requests to mute."""
self.mute_calls += 1
if self.microphone:
self.microphone.mute()
def unmute(self):
"""Unmute mic if as many unmute calls as mute calls have been received.
"""
if self.mute_calls > 0:
self.mute_calls -= 1
if self.mute_calls <= 0 and self.microphone:
self.microphone.unmute()
self.mute_calls = 0
def force_unmute(self):
"""Completely unmute mic regardless of the number of calls to mute."""
self.mute_calls = 0
self.unmute()
def is_muted(self):
if self.microphone:
return self.microphone.is_muted()
else:
return True # consider 'no mic' muted
def sleep(self):
self.state.sleeping = True
def awaken(self):
self.state.sleeping = False
def run(self):
"""Start and reload mic and STT handling threads as needed.
Wait for KeyboardInterrupt and shutdown cleanly.
"""
try:
self.start_async()
except Exception:
LOG.exception('Starting producer/consumer threads for listener '
'failed.')
return
# Handle reload of consumer / producer if config changes
while self.state.running:
try:
time.sleep(1)
except KeyboardInterrupt as e:
LOG.error(e)
self.stop()
raise # Re-raise KeyboardInterrupt
except Exception:
LOG.exception('Exception in RecognizerLoop')
raise
def reload(self):
"""Reload configuration and restart consumer and producer."""
self.stop()
for hw in self.hotword_engines:
try:
self.hotword_engines[hw]["engine"].stop()
except Exception as e:
LOG.exception(e)
# load config
self._load_config()
# restart
self.start_async()
|
URL = "url"
IOTA = "iota"
DOC = "document"
URL_TYPES = [URL, IOTA, DOC]
|
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
somap = somavaloter = maior = 0
for l in range(0, 3):
for c in range(0, 3):
n = ' '
while n.isnumeric() == False:
n = input(f'digite um valor para [{l}, {c}]: ')
if n.isnumeric() == False:
print('o valor digitado não e um número')
n = int(n)
matriz[l][c] = n
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
if matriz[l][c] % 2 == 0:
somap = somap + matriz[l][c]
print()
for c in range(0, 3):
somavaloter = somavaloter + matriz[c][2]
if c == 0 or matriz[1][c] > maior:
maior = matriz[1][c]
print(f'a soma de todos os valores pares digitados e de {somap}')
print(f'a soma de todos os valores da terceira coluna e de {somavaloter}')
print(f'o maior valor da segunda linha foi {maior}')
|
import logging
import math
import cv2
import numpy as np
import tensorflow as tf
from neuralgym.ops.gan_ops import *
from neuralgym.ops.layers import *
from neuralgym.ops.layers import resize
from neuralgym.ops.loss_ops import *
from neuralgym.ops.summary_ops import *
from PIL import Image, ImageDraw
from tensorflow.contrib.framework.python.ops import add_arg_scope
logger = logging.getLogger()
np.random.seed(2018)
@add_arg_scope
def gen_conv(x, cnum, ksize, stride=1, rate=1, name='conv',
padding='SAME', activation=tf.nn.elu, training=True):
"""Define conv for generator.
Args:
x: Input.
cnum: Channel number.
ksize: Kernel size.
Stride: Convolution stride.
Rate: Rate for or dilated conv.
name: Name of layers.
padding: Default to SYMMETRIC.
activation: Activation function after convolution.
training: If current graph is for training or inference, used for bn.
Returns:
tf.Tensor: output
"""
assert padding in ['SYMMETRIC', 'SAME', 'REFELECT']
if padding == 'SYMMETRIC' or padding == 'REFELECT':
p = int(rate*(ksize-1)/2)
x = tf.pad(x, [[0,0], [p, p], [p, p], [0,0]], mode=padding)
padding = 'VALID'
x = tf.layers.conv2d(
x, cnum, ksize, stride, dilation_rate=rate,
activation=None, padding=padding, name=name)
if cnum == 3 or activation is None:
# conv for output
return x
x, y = tf.split(x, 2, 3)
x = activation(x)
y = tf.nn.sigmoid(y)
x = x * y
return x
@add_arg_scope
def gen_deconv(x, cnum, name='upsample', padding='SAME', training=True):
"""Define deconv for generator.
The deconv is defined to be a x2 resize_nearest_neighbor operation with
additional gen_conv operation.
Args:
x: Input.
cnum: Channel number.
name: Name of layers.
training: If current graph is for training or inference, used for bn.
Returns:
tf.Tensor: output
"""
with tf.variable_scope(name):
x = resize(x, func=tf.compat.v1.image.resize_nearest_neighbor)
x = gen_conv(
x, cnum, 3, 1, name=name+'_conv', padding=padding,
training=training)
return x
@add_arg_scope
def dis_conv(x, cnum, ksize=5, stride=2, name='conv', training=True):
"""Define conv for discriminator.
Activation is set to leaky_relu.
Args:
x: Input.
cnum: Channel number.
ksize: Kernel size.
Stride: Convolution stride.
name: Name of layers.
training: If current graph is for training or inference, used for bn.
Returns:
tf.Tensor: output
"""
x = conv2d_spectral_norm(x, cnum, ksize, stride, 'SAME', name=name)
x = tf.nn.leaky_relu(x)
return x
def random_bbox(FLAGS):
"""Generate a random tlhw.
Returns:
tuple: (top, left, height, width)
"""
img_shape = FLAGS.img_shapes
img_height = img_shape[0]
img_width = img_shape[1]
maxt = img_height - FLAGS.vertical_margin - FLAGS.height
maxl = img_width - FLAGS.horizontal_margin - FLAGS.width
t = tf.random_uniform(
[], minval=FLAGS.vertical_margin, maxval=maxt, dtype=tf.int32)
l = tf.random_uniform(
[], minval=FLAGS.horizontal_margin, maxval=maxl, dtype=tf.int32)
h = tf.constant(FLAGS.height)
w = tf.constant(FLAGS.width)
return (t, l, h, w)
def bbox2mask(FLAGS, bbox, name='mask'):
"""Generate mask tensor from bbox.
Args:
bbox: tuple, (top, left, height, width)
Returns:
tf.Tensor: output with shape [1, H, W, 1]
"""
def npmask(bbox, height, width, delta_h, delta_w):
mask = np.zeros((1, height, width, 1), np.float32)
h = np.random.randint(delta_h//2+1)
w = np.random.randint(delta_w//2+1)
mask[:, bbox[0]+h:bbox[0]+bbox[2]-h,
bbox[1]+w:bbox[1]+bbox[3]-w, :] = 1.
return mask
with tf.variable_scope(name), tf.device('/cpu:0'):
img_shape = FLAGS.img_shapes
height = img_shape[0]
width = img_shape[1]
mask = tf.py_func(
npmask,
[bbox, height, width,
FLAGS.max_delta_height, FLAGS.max_delta_width],
tf.float32, stateful=False)
mask.set_shape([1] + [height, width] + [1])
return mask
def brush_stroke_mask(FLAGS, name='mask'):
"""Generate mask tensor from bbox.
Returns:
tf.Tensor: output with shape [1, H, W, 1]
"""
min_num_vertex = 4
max_num_vertex = 12
mean_angle = 2*math.pi / 5
angle_range = 2*math.pi / 15
min_width = 12
max_width = 40
def generate_mask(H, W):
average_radius = math.sqrt(H*H+W*W) / 8
mask = Image.new('L', (W, H), 0)
for _ in range(np.random.randint(1, 4)):
num_vertex = np.random.randint(min_num_vertex, max_num_vertex)
angle_min = mean_angle - np.random.uniform(0, angle_range)
angle_max = mean_angle + np.random.uniform(0, angle_range)
angles = []
vertex = []
for i in range(num_vertex):
if i % 2 == 0:
angles.append(2*math.pi - np.random.uniform(angle_min, angle_max))
else:
angles.append(np.random.uniform(angle_min, angle_max))
h, w = mask.size
vertex.append((int(np.random.randint(0, w)), int(np.random.randint(0, h))))
for i in range(num_vertex):
r = np.clip(
np.random.normal(loc=average_radius, scale=average_radius//2),
0, 2*average_radius)
new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
vertex.append((int(new_x), int(new_y)))
draw = ImageDraw.Draw(mask)
width = int(np.random.uniform(min_width, max_width))
draw.line(vertex, fill=1, width=width)
for v in vertex:
draw.ellipse((v[0] - width//2,
v[1] - width//2,
v[0] + width//2,
v[1] + width//2),
fill=1)
if np.random.normal() > 0:
mask.transpose(Image.FLIP_LEFT_RIGHT)
if np.random.normal() > 0:
mask.transpose(Image.FLIP_TOP_BOTTOM)
mask = np.asarray(mask, np.float32)
mask = np.reshape(mask, (1, H, W, 1))
return mask
with tf.variable_scope(name), tf.device('/cpu:0'):
img_shape = FLAGS.img_shapes
height = img_shape[0]
width = img_shape[1]
mask = tf.py_func(
generate_mask,
[height, width],
tf.float32, stateful=True)
mask.set_shape([1] + [height, width] + [1])
return mask
def local_patch(x, bbox):
"""Crop local patch according to bbox.
Args:
x: input
bbox: (top, left, height, width)
Returns:
tf.Tensor: local patch
"""
x = tf.image.crop_to_bounding_box(x, bbox[0], bbox[1], bbox[2], bbox[3])
return x
def resize_mask_like(mask, x):
"""Resize mask like shape of x.
Args:
mask: Original mask.
x: To shape of x.
Returns:
tf.Tensor: resized mask
"""
mask_resize = resize(
mask, to_shape=x.get_shape().as_list()[1:3],
func=tf.image.resize_nearest_neighbor)
return mask_resize
def contextual_attention(f, b, mask=None, ksize=3, stride=1, rate=1,
fuse_k=3, softmax_scale=10., training=True, fuse=True):
""" Contextual attention layer implementation.
Contextual attention is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
Args:
x: Input feature to match (foreground).
t: Input feature for match (background).
mask: Input mask for t, indicating patches not available.
ksize: Kernel size for contextual attention.
stride: Stride for extracting patches from t.
rate: Dilation for matching.
softmax_scale: Scaled softmax for attention.
training: Indicating if current graph is training or inference.
Returns:
tf.Tensor: output
"""
# get shapes
raw_fs = tf.shape(f)
raw_int_fs = f.get_shape().as_list()
raw_int_bs = b.get_shape().as_list()
# extract patches from background with stride and rate
kernel = 2*rate
raw_w = tf.extract_image_patches(
b, [1,kernel,kernel,1], [1,rate*stride,rate*stride,1], [1,1,1,1], padding='SAME')
raw_w = tf.reshape(raw_w, [raw_int_bs[0], -1, kernel, kernel, raw_int_bs[3]])
raw_w = tf.transpose(raw_w, [0, 2, 3, 4, 1]) # transpose to b*k*k*c*hw
# downscaling foreground option: downscaling both foreground and
# background for matching and use original background for reconstruction.
f = resize(f, scale=1./rate, func=tf.image.resize_nearest_neighbor)
b = resize(b, to_shape=[int(raw_int_bs[1]/rate), int(raw_int_bs[2]/rate)], func=tf.image.resize_nearest_neighbor) # https://github.com/tensorflow/tensorflow/issues/11651
if mask is not None:
mask = resize(mask, scale=1./rate, func=tf.image.resize_nearest_neighbor)
fs = tf.shape(f)
int_fs = f.get_shape().as_list()
f_groups = tf.split(f, int_fs[0], axis=0)
# from t(H*W*C) to w(b*k*k*c*h*w)
bs = tf.shape(b)
int_bs = b.get_shape().as_list()
w = tf.extract_image_patches(
b, [1,ksize,ksize,1], [1,stride,stride,1], [1,1,1,1], padding='SAME')
w = tf.reshape(w, [int_fs[0], -1, ksize, ksize, int_fs[3]])
w = tf.transpose(w, [0, 2, 3, 4, 1]) # transpose to b*k*k*c*hw
# process mask
if mask is None:
mask = tf.zeros([1, bs[1], bs[2], 1])
m = tf.extract_image_patches(
mask, [1,ksize,ksize,1], [1,stride,stride,1], [1,1,1,1], padding='SAME')
m = tf.reshape(m, [1, -1, ksize, ksize, 1])
m = tf.transpose(m, [0, 2, 3, 4, 1]) # transpose to b*k*k*c*hw
m = m[0]
mm = tf.cast(tf.equal(tf.reduce_mean(m, axis=[0,1,2], keep_dims=True), 0.), tf.float32)
w_groups = tf.split(w, int_bs[0], axis=0)
raw_w_groups = tf.split(raw_w, int_bs[0], axis=0)
y = []
offsets = []
k = fuse_k
scale = softmax_scale
fuse_weight = tf.reshape(tf.eye(k), [k, k, 1, 1])
for xi, wi, raw_wi in zip(f_groups, w_groups, raw_w_groups):
# conv for compare
wi = wi[0]
wi_normed = wi / tf.maximum(tf.sqrt(tf.reduce_sum(tf.square(wi), axis=[0,1,2])), 1e-4)
yi = tf.nn.conv2d(xi, wi_normed, strides=[1,1,1,1], padding="SAME")
# conv implementation for fuse scores to encourage large patches
if fuse:
yi = tf.reshape(yi, [1, fs[1]*fs[2], bs[1]*bs[2], 1])
yi = tf.nn.conv2d(yi, fuse_weight, strides=[1,1,1,1], padding='SAME')
yi = tf.reshape(yi, [1, fs[1], fs[2], bs[1], bs[2]])
yi = tf.transpose(yi, [0, 2, 1, 4, 3])
yi = tf.reshape(yi, [1, fs[1]*fs[2], bs[1]*bs[2], 1])
yi = tf.nn.conv2d(yi, fuse_weight, strides=[1,1,1,1], padding='SAME')
yi = tf.reshape(yi, [1, fs[2], fs[1], bs[2], bs[1]])
yi = tf.transpose(yi, [0, 2, 1, 4, 3])
yi = tf.reshape(yi, [1, fs[1], fs[2], bs[1]*bs[2]])
# softmax to match
yi *= mm # mask
yi = tf.nn.softmax(yi*scale, 3)
yi *= mm # mask
offset = tf.argmax(yi, axis=3, output_type=tf.int32)
offset = tf.stack([offset // fs[2], offset % fs[2]], axis=-1)
# deconv for patch pasting
# 3.1 paste center
wi_center = raw_wi[0]
yi = tf.nn.conv2d_transpose(yi, wi_center, tf.concat([[1], raw_fs[1:]], axis=0), strides=[1,rate,rate,1]) / 4.
y.append(yi)
offsets.append(offset)
y = tf.concat(y, axis=0)
y.set_shape(raw_int_fs)
offsets = tf.concat(offsets, axis=0)
offsets.set_shape(int_bs[:3] + [2])
# case1: visualize optical flow: minus current position
h_add = tf.tile(tf.reshape(tf.range(bs[1]), [1, bs[1], 1, 1]), [bs[0], 1, bs[2], 1])
w_add = tf.tile(tf.reshape(tf.range(bs[2]), [1, 1, bs[2], 1]), [bs[0], bs[1], 1, 1])
offsets = offsets - tf.concat([h_add, w_add], axis=3)
# to flow image
flow = flow_to_image_tf(offsets)
# # case2: visualize which pixels are attended
# flow = highlight_flow_tf(offsets * tf.cast(mask, tf.int32))
if rate != 1:
flow = resize(flow, scale=rate, func=tf.image.resize_bilinear)
return y, flow
def test_contextual_attention(args):
"""Test contextual attention layer with 3-channel image input
(instead of n-channel feature).
"""
import cv2
import os
# run on cpu
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
rate = 2
stride = 1
grid = rate*stride
b = cv2.imread(args.imageA)
b = cv2.resize(b, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
h, w, _ = b.shape
b = b[:h//grid*grid, :w//grid*grid, :]
b = np.expand_dims(b, 0)
logger.info('Size of imageA: {}'.format(b.shape))
f = cv2.imread(args.imageB)
h, w, _ = f.shape
f = f[:h//grid*grid, :w//grid*grid, :]
f = np.expand_dims(f, 0)
logger.info('Size of imageB: {}'.format(f.shape))
with tf.Session() as sess:
bt = tf.constant(b, dtype=tf.float32)
ft = tf.constant(f, dtype=tf.float32)
yt, flow = contextual_attention(
ft, bt, stride=stride, rate=rate,
training=False, fuse=False)
y = sess.run(yt)
cv2.imwrite(args.imageOut, y[0])
def make_color_wheel():
RY, YG, GC, CB, BM, MR = (15, 6, 4, 11, 13, 6)
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
COLORWHEEL = make_color_wheel()
def compute_color(u,v):
h, w = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
# colorwheel = COLORWHEEL
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def flow_to_image(flow):
"""Transfer flow map to image.
Part of code forked from flownet.
"""
out = []
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxrad = -1
for i in range(flow.shape[0]):
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
idxunknow = (abs(u) > 1e7) | (abs(v) > 1e7)
u[idxunknow] = 0
v[idxunknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(maxrad, np.max(rad))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
out.append(img)
return np.float32(np.uint8(out))
def flow_to_image_tf(flow, name='flow_to_image'):
"""Tensorflow ops for computing flow to image.
"""
with tf.variable_scope(name), tf.device('/cpu:0'):
img = tf.py_func(flow_to_image, [flow], tf.float32, stateful=False)
img.set_shape(flow.get_shape().as_list()[0:-1]+[3])
img = img / 127.5 - 1.
return img
def highlight_flow(flow):
"""Convert flow into middlebury color code image.
"""
out = []
s = flow.shape
for i in range(flow.shape[0]):
img = np.ones((s[1], s[2], 3)) * 144.
u = flow[i, :, :, 0]
v = flow[i, :, :, 1]
for h in range(s[1]):
for w in range(s[1]):
ui = u[h,w]
vi = v[h,w]
img[ui, vi, :] = 255.
out.append(img)
return np.float32(np.uint8(out))
def highlight_flow_tf(flow, name='flow_to_image'):
"""Tensorflow ops for highlight flow.
"""
with tf.variable_scope(name), tf.device('/cpu:0'):
img = tf.py_func(highlight_flow, [flow], tf.float32, stateful=False)
img.set_shape(flow.get_shape().as_list()[0:-1]+[3])
img = img / 127.5 - 1.
return img
def image2edge(image):
"""Convert image to edges.
"""
out = []
for i in range(image.shape[0]):
img = cv2.Laplacian(image[i, :, :, :], cv2.CV_64F, ksize=3, scale=2)
out.append(img)
return np.float32(np.uint8(out))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--imageA', default='', type=str, help='Image A as background patches to reconstruct image B.')
parser.add_argument('--imageB', default='', type=str, help='Image B is reconstructed with image A.')
parser.add_argument('--imageOut', default='result.png', type=str, help='Image B is reconstructed with image A.')
args = parser.parse_args()
test_contextual_attention(args)
|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.reference import Reference
from pycatia.part_interfaces.draft_domains import DraftDomains
from pycatia.part_interfaces.dress_up_shape import DressUpShape
class Draft(DressUpShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.Shape
| PartInterfaces.DressUpShape
| Draft
|
| Represents the draft shape.
| A draft shape is made up of draft domains (at least one) and of a parting
| element.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.draft = com_object
@property
def draft_domains(self) -> DraftDomains:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property DraftDomains() As DraftDomains (Read Only)
|
| Returns the collection of draft domains.
|
| Example:
| The following example returns in list the collection of draft domains
| of the firstDraft draft:
|
| Set list = firstDraft.DraftDomains
:return: DraftDomains
:rtype: DraftDomains
"""
return DraftDomains(self.draft.DraftDomains)
@property
def mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Mode() As CatDraftMode
|
| Returns or sets the draft mode.
|
| Example:
| The following example returns in mode the draft mode of the firstDraft
| draft, and then sets it to
| CatReflectKeepFaceDraftMode:
|
| Set mode = firstDraft.Mode
| Set firstDraft.Mode = CatReflectKeepFaceDraftMode
:return: int
:rtype: int
"""
return self.draft.Mode
@mode.setter
def mode(self, value: int):
"""
:param int value:
"""
self.draft.Mode = value
@property
def parting_element(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property PartingElement() As Reference
|
| Returns or sets the draft parting element.
| To set the property, you can use the following Boundary object:
| PlanarFace.
|
| Example:
| The following example returns in element the parting element of the
| firstDraft draft, and then sets it to the element2 geometrical
| element:
|
| Set element = firstDraft.PartingElement
| Set firstDraft.PartingElement = element2
:return: Reference
:rtype: Reference
"""
return Reference(self.draft.PartingElement)
@parting_element.setter
def parting_element(self, value: Reference):
"""
:param Reference value:
"""
self.draft.PartingElement = value
def __repr__(self):
return f'Draft(name="{self.name}")'
|
"""Actions for compiling Haskell source code"""
load(":private/java.bzl", "java_interop_info")
load(":private/path_utils.bzl",
"declare_compiled",
"target_unique_name",
"module_name",
"module_unique_name",
"get_external_libs_path",
)
load(":private/pkg_id.bzl", "pkg_id")
load(":private/providers.bzl", "DefaultCompileInfo")
load(":private/set.bzl", "set")
load("@bazel_skylib//:lib.bzl", "dicts", "paths")
def _make_ghc_defs_dump(hs, cpp_defines):
"""Generate a file containing GHC default pre-processor definitions.
Args:
hs: Haskell context.
cpp_defines: Location of cpp_defines pattern file.
Returns:
File: The file with GHC definitions.
"""
raw_filename = "ghc-defs-dump-{0}.hs".format(hs.name)
dummy_src = hs.actions.declare_file(raw_filename)
ghc_defs_dump_raw = hs.actions.declare_file(paths.replace_extension(raw_filename, ".hspp"))
ghc_defs_dump = hs.actions.declare_file(paths.replace_extension(raw_filename, ".h"))
hs.actions.write(dummy_src, "")
args = hs.actions.args()
args.add([
"-E",
"-optP-dM",
"-cpp",
dummy_src.path,
])
hs.actions.run(
inputs = [dummy_src] + hs.extra_binaries,
outputs = [ghc_defs_dump_raw],
mnemonic = "HaskellCppDefines",
executable = hs.tools.ghc,
arguments = [args],
env = hs.env,
)
hs.actions.run_shell(
inputs = [ghc_defs_dump_raw, cpp_defines, hs.tools.grep],
outputs = [ghc_defs_dump],
command = """
grep "^[^#]" {cpp_defines} | while IFS= read -r patt; do
grep "$patt" {raw} >> {filtered}
done
""".format(
cpp_defines = cpp_defines.path,
raw = ghc_defs_dump_raw.path,
filtered = ghc_defs_dump.path,
),
env = hs.env,
)
return ghc_defs_dump
def _process_hsc_file(hs, cc, ghc_defs_dump, hsc_file):
"""Process a single hsc file.
Args:
hs: Haskell context.
cc: CcInteropInfo, information about C dependencies.
ghc_defs_dump: File with GHC definitions.
hsc_file: hsc file to process.
Returns:
(File, string): Haskell source file created by processing hsc_file and
new import directory containing the produced file.
"""
args = hs.actions.args()
# Output a Haskell source file.
hsc_dir_raw = target_unique_name(hs, "hsc")
hs_out = declare_compiled(hs, hsc_file, ".hs", directory=hsc_dir_raw)
args.add([hsc_file.path, "-o", hs_out.path])
args.add(["--cflag=" + f for f in cc.cpp_flags])
args.add(["--cflag=" + f for f in cc.include_args])
args.add("-I{0}".format(ghc_defs_dump.dirname))
args.add("-i{0}".format(ghc_defs_dump.basename))
hs.actions.run(
inputs = depset(transitive = [
depset(cc.hdrs),
depset([hs.tools.gcc]),
depset([hsc_file, ghc_defs_dump]),
]),
outputs = [hs_out],
mnemonic = "HaskellHsc2hs",
executable = hs.tools.hsc2hs,
arguments = [args],
env = hs.env,
)
idir = paths.join(
hs.bin_dir.path,
hs.label.package,
hsc_dir_raw,
)
return hs_out, idir
def _process_chs_file(hs, cc, ghc_defs_dump, chs_file, chi_files=[]):
"""Process a single chs file.
Args:
hs: Haskell context.
cc: CcInteropInfo, information about C dependencies.
ghc_defs_dump: File with GHC definitions.
chs_file: chs file to process.
chi_files: .chi files that should be available to c2hs.
Returns:
(File, File, string): Haskell source file created by processing
chs_file, .chi file produced by the same file, and new import
directory containing the generated source file.
"""
args = hs.actions.args()
# Output a Haskell source file.
chs_dir_raw = target_unique_name(hs, "chs")
hs_out = declare_compiled(hs, chs_file, ".hs", directory=chs_dir_raw)
chi_out = declare_compiled(hs, chs_file, ".chi", directory=chs_dir_raw)
args.add([chs_file.path, "-o", hs_out.path])
args.add(["-C-E"])
args.add(["--cpp", hs.tools.gcc.path])
args.add(["-C-I{0}".format(ghc_defs_dump.dirname)])
args.add(["-C-include{0}".format(ghc_defs_dump.basename)])
args.add(["-C" + x for x in cc.cpp_flags])
args.add(["-C" + x for x in cc.include_args])
chi_include_root = paths.join(
hs.bin_dir.path,
hs.label.workspace_root,
hs.label.package,
chs_dir_raw,
)
args.add(["-i" + chi_include_root])
hs.actions.run(
inputs = depset(transitive = [
depset(cc.hdrs),
depset([hs.tools.gcc]),
depset([chs_file, ghc_defs_dump]),
depset(chi_files),
]),
outputs = [hs_out, chi_out],
executable = hs.tools.c2hs,
mnemonic = "HaskellC2Hs",
arguments = [args],
env = hs.env,
)
idir = paths.join(
hs.bin_dir.path,
hs.label.package,
chs_dir_raw,
)
return hs_out, chi_out, idir
def _output_file_ext(base, dynamic, profiling_enabled):
"""Return extension that output of compilation should have depending on the
following inputs:
Args:
base: usually "o" for object files and "hi" for interface files. Preceding
dot "." will be preserved in the output.
dynamic: bool, whether we're compiling dynamic object files.
profiling_enabled: bool, whether profiling is enabled.
Returns:
String, extension of Haskell object file.
"""
with_dot = False
ext = ""
if base[0] == '.':
with_dot = True
ext = base[1:]
else:
ext = base
if dynamic:
ext = "dyn_" + ext
if profiling_enabled:
ext = "p_" + ext
return ("." if with_dot else "") + ext
def _compilation_defaults(hs, cc, java, dep_info, srcs, extra_srcs, cpp_defines, compiler_flags, with_profiling, main_file = None, my_pkg_id = None):
"""Declare default compilation targets and create default compiler arguments.
Returns:
DefaultCompileInfo: Populated default compilation settings.
"""
ghc_args = []
# GHC expects the CC compiler as the assembler, but segregates the
# set of flags to pass to it when used as an assembler. So we have
# to set both -optc and -opta.
# cc_args = [
# "-optc" + f for f in cc.compiler_flags
# ] + [
# "-opta" + f for f in cc.compiler_flags
# ]
# ghc_args += cc.compiler_flags
# Declare file directories
objects_dir_raw = target_unique_name(hs, "objects")
objects_dir = paths.join(
hs.bin_dir.path,
hs.label.workspace_root,
hs.label.package,
objects_dir_raw,
)
interfaces_dir_raw = target_unique_name(hs, "interfaces")
interfaces_dir = paths.join(
hs.bin_dir.path,
hs.label.workspace_root,
hs.label.package,
interfaces_dir_raw,
)
# Default compiler flags.
ghc_args += hs.toolchain.compiler_flags
ghc_args += compiler_flags
ghc_args.append("-hide-all-packages")
# Work around macOS linker limits. This fix has landed in GHC HEAD, but is
# not yet in a release; plus, we still want to support older versions of
# GHC. For details, see: https://phabricator.haskell.org/D4714
if hs.toolchain.is_darwin:
ghc_args += ["-optl-Wl,-dead_strip_dylibs"]
# Expose all prebuilt dependencies
for prebuilt_dep in set.to_list(dep_info.direct_prebuilt_deps):
ghc_args += ["-package", prebuilt_dep]
# Expose all bazel dependencies
for package in set.to_list(dep_info.package_ids):
if package != my_pkg_id:
ghc_args += ["-package-id", package]
# Only include package DBs for deps, prebuilt deps should be found
# auto-magically by GHC.
for cache in set.to_list(dep_info.package_caches):
ghc_args += ["-package-db", cache.dirname]
# We want object and dynamic objects from all inputs.
object_files = []
object_dyn_files = []
# We need to keep interface files we produce so we can import
# modules cross-package.
interface_files = []
header_files = []
boot_files = []
source_files = set.empty()
modules = set.empty()
# Add import hierarchy root.
# Note that this is not perfect, since GHC requires hs-boot files
# to be in the same directory as the corresponding .hs file. Thus
# the two must both have the same root; i.e., both plain files,
# both in bin_dir, or both in genfiles_dir.
import_dirs = set.from_list([
hs.src_root,
paths.join(hs.bin_dir.path, hs.src_root),
paths.join(hs.genfiles_dir.path, hs.src_root),
])
# Output object files are named after modules, not after input file names.
# The difference is only visible in the case of Main module because it may
# be placed in a file with a name different from "Main.hs". In that case
# still Main.o will be produced.
ghc_defs_dump = _make_ghc_defs_dump(hs, cpp_defines)
chi_files_so_far = []
for s in srcs:
if s.extension == "h":
header_files.append(s)
if s.extension in ["hs-boot", "lhs-boot"]:
boot_files.append(s)
elif s.extension in ["hs", "lhs", "hsc", "chs"]:
if not main_file or s != main_file:
if s.extension == "hsc":
s0, idir = _process_hsc_file(hs, cc, ghc_defs_dump, s)
set.mutable_insert(source_files, s0)
set.mutable_insert(import_dirs, idir)
elif s.extension == "chs":
s0, chi, idir = _process_chs_file(hs, cc, ghc_defs_dump, s, chi_files_so_far)
set.mutable_insert(source_files, s0)
set.mutable_insert(import_dirs, idir)
chi_files_so_far.append(chi)
else:
set.mutable_insert(source_files, s)
set.mutable_insert(modules, module_name(hs, s))
object_files.append(
declare_compiled(
hs,
s,
_output_file_ext(".o", False, with_profiling),
directory=objects_dir_raw
)
)
if not with_profiling:
object_dyn_files.append(
declare_compiled(
hs,
s,
_output_file_ext(".o", True, with_profiling),
directory=objects_dir_raw
)
)
interface_files.append(
declare_compiled(
hs,
s,
_output_file_ext(".hi", False, with_profiling),
directory=interfaces_dir_raw
)
)
if not with_profiling:
interface_files.append(
declare_compiled(
hs,
s,
_output_file_ext(".hi", True, with_profiling),
directory=interfaces_dir_raw
)
)
else:
if s.extension == "hsc":
s0 = _process_hsc_file(hs, cc, ghc_defs_dump, s)
set.mutable_insert(source_files, s0)
elif s.extension == "chs":
s0, chi = _process_chs_file(hs, cc, ghc_defs_dump, s, chi_files_so_far)
set.mutable_insert(source_files, s0)
chi_files_so_far.append(chi)
else:
set.mutable_insert(source_files, s)
set.mutable_insert(modules, "Main")
object_files.append(
hs.actions.declare_file(
paths.join(
objects_dir_raw,
paths.replace_extension(
"Main",
_output_file_ext(".o", False, with_profiling)
)
)
)
)
if not with_profiling:
object_dyn_files.append(
hs.actions.declare_file(
paths.join(
objects_dir_raw,
paths.replace_extension(
"Main",
_output_file_ext(".o", True, with_profiling),
)
)
)
)
interface_files.append(
hs.actions.declare_file(
paths.join(
interfaces_dir_raw,
paths.replace_extension(
"Main",
_output_file_ext(".hi", False, with_profiling),
)
)
)
)
if not with_profiling:
interface_files.append(
hs.actions.declare_file(
paths.join(
interfaces_dir_raw,
paths.replace_extension(
"Main",
_output_file_ext(".hi", True, with_profiling),
)
)
)
)
ghc_args += ["-i{0}".format(d) for d in set.to_list(import_dirs)]
ghc_args += ["-optP" + f for f in cc.cpp_flags]
ghc_args += cc.include_args
locale_archive_depset = (
depset([hs.toolchain.locale_archive])
if hs.toolchain.locale_archive != None else depset()
)
# This is absolutely required otherwise GHC doesn't know what package it's
# creating `Name`s for to put them in Haddock interface files which then
# results in Haddock not being able to find names for linking in
# environment after reading its interface file later.
if my_pkg_id != None:
unit_id_args = [
"-this-unit-id", pkg_id.to_string(my_pkg_id),
"-optP-DCURRENT_PACKAGE_KEY=\"{}\"".format(pkg_id.to_string(my_pkg_id))
]
ghc_args += unit_id_args
args = hs.actions.args()
args.add(ghc_args)
# Compilation mode and explicit user flags
if hs.mode == "opt":
args.add("-O2")
args.add(["-static"])
# NOTE We can't have profiling and dynamic code at the same time, see:
# https://ghc.haskell.org/trac/ghc/ticket/15394
if with_profiling:
args.add("-prof")
else:
args.add(["-dynamic-too"])
# Common flags
args.add([
"-v0",
"-c",
"--make",
"-fPIC",
"-hide-all-packages",
])
# Output directories
args.add([
"-odir", objects_dir,
"-hidir", interfaces_dir,
])
# Output file extensions
args.add([
"-osuf", _output_file_ext("o", False, with_profiling),
"-dynosuf", _output_file_ext("o", True, with_profiling),
"-hisuf", _output_file_ext("hi", False, with_profiling),
"-dynhisuf", _output_file_ext("hi", True, with_profiling),
])
# Pass source files
for f in set.to_list(source_files):
args.add(f)
return DefaultCompileInfo(
args = args,
ghc_args = ghc_args,
inputs = depset(transitive = [
depset(header_files),
depset(boot_files),
set.to_depset(source_files),
extra_srcs,
depset(cc.hdrs),
set.to_depset(dep_info.package_confs),
set.to_depset(dep_info.package_caches),
set.to_depset(dep_info.interface_files),
depset(dep_info.static_libraries),
depset(dep_info.static_libraries_prof),
set.to_depset(dep_info.dynamic_libraries),
depset(dep_info.external_libraries.values()),
java.inputs,
locale_archive_depset,
]),
objects_dir = objects_dir,
interfaces_dir = interfaces_dir,
outputs = object_files + object_dyn_files + interface_files,
object_files = object_files,
object_dyn_files = object_dyn_files,
interface_files = interface_files,
modules = modules,
header_files = set.from_list(cc.hdrs + header_files),
boot_files = set.from_list(boot_files),
source_files = source_files,
extra_source_files = extra_srcs,
import_dirs = import_dirs,
env = dicts.add({
"LD_LIBRARY_PATH": get_external_libs_path(set.from_list(dep_info.external_libraries.values())),
},
java.env,
hs.env,
),
)
def compile_binary(hs, cc, java, dep_info, srcs, extra_srcs, cpp_defines, compiler_flags, with_profiling, main_file, main_function):
"""Compile a Haskell target into object files suitable for linking.
Returns:
struct with the following fields:
object_files: list of static object files
object_dyn_files: list of dynamic object files
modules: set of module names
source_files: set of Haskell source files
"""
c = _compilation_defaults(hs, cc, java, dep_info, srcs, extra_srcs, cpp_defines, compiler_flags, with_profiling, main_file = main_file)
c.args.add(["-main-is", main_function])
hs.actions.run(
inputs = c.inputs + hs.extra_binaries,
outputs = c.outputs,
mnemonic = "HaskellBuildBinary",
progress_message = "HaskellBuildBinary {}".format(hs.label),
env = c.env,
executable = hs.tools.ghc,
arguments = [c.args]
)
return struct(
object_files = c.object_files,
object_dyn_files = c.object_dyn_files,
modules = c.modules,
source_files = c.source_files,
import_dirs = c.import_dirs,
ghc_args = c.ghc_args,
header_files = c.header_files,
)
def compile_library(hs, cc, java, dep_info, srcs, extra_srcs, cpp_defines, compiler_flags, with_profiling, my_pkg_id):
"""Build arguments for Haskell package build.
Returns:
struct with the following fields:
interfaces_dir: directory containing interface files
interface_files: list of interface files
object_files: list of static object files
object_dyn_files: list of dynamic object files
ghc_args: list of string arguments suitable for Haddock
modules: set of module names
source_files: set of Haskell module files
import_dirs: import directories that should make all modules visible (for GHCi)
"""
c = _compilation_defaults(hs, cc, java, dep_info, srcs, extra_srcs, cpp_defines, compiler_flags, with_profiling, my_pkg_id=my_pkg_id)
hs.actions.run(
inputs = c.inputs + hs.extra_binaries,
outputs = c.outputs,
mnemonic = "HaskellBuildLibrary",
progress_message = "HaskellBuildLibrary {}".format(hs.label),
env = c.env,
executable = hs.tools.ghc,
arguments = [c.args],
)
return struct(
interfaces_dir = c.interfaces_dir,
interface_files = c.interface_files,
object_files = c.object_files,
object_dyn_files = c.object_dyn_files,
ghc_args = c.ghc_args,
modules = c.modules,
header_files = c.header_files,
boot_files = c.boot_files,
source_files = c.source_files,
extra_source_files = c.extra_source_files,
import_dirs = c.import_dirs,
)
|
from django.apps import AppConfig
class SysadminConfig(AppConfig):
name = 'sysadmin'
|
class Solution:
# @param {character[][]} matrix
# @return {integer}
def maximalSquare(self, matrix):
if not matrix:
return 0
max_square = 0
self.visited = []
for line in matrix:
row = []
for c in line:
row.append(0)
self.visited.append(row)
self.visited[0] = [int(c) for c in matrix[0]]
max_square = max(self.visited[0])
for i, row in enumerate(self.visited):
k = int(matrix[i][0])
if k > max_square:
max_square = k
row[0] = k
for i in range(1, len(matrix)):
for j in range(1, len(matrix[0])):
if matrix[i][j] == "1":
self.visited[i][j] = (
min(
self.visited[i - 1][j],
self.visited[i][j - 1],
self.visited[i - 1][j - 1],
)
+ 1
)
if self.visited[i][j] > max_square:
max_square = self.visited[i][j]
return max_square ** 2
|
print("1 commit in 1 day")
|
from builtins import str
import os, os.path
import subprocess
import sys
import tarfile
import wget
from htrc.volumes import download_volumes
from htrc.workset import path_to_volumes
MALLET_DIR = os.path.expanduser('~/mallet')
# Mallet is downloaded and intalled in user's home directory
def install_mallet():
if not os.path.exists(MALLET_DIR):
os.makedirs(MALLET_DIR)
mallet_zip = wget.download('http://mallet.cs.umass.edu/dist/mallet-2.0.8RC3.tar.gz')
mallet_dir = tarfile.open(mallet_zip, "r:gz")
mallet_dir.extractall(path=MALLET_DIR)
mallet_dir.close()
def main(path, topics, iterations, output_dir='/media/secure_volume/workset/'):
if not os.path.exists(MALLET_DIR):
if not os.path.exists('/media/secure_volume/'):
print('Installing Mallet ...')
install_mallet()
print('\n')
else:
print('Mallet not installed, but capsule is in secure mode.')
print('Switch to maintenance mode and run this command again')
print('to install Mallet. Then, switch to secure mode to train')
print('topic models.')
sys.exit(1)
if not os.path.isdir(path):
try:
volumes = path_to_volumes(path)
except ValueError as e:
print("Could not process workset. {}".format(str(e)))
sys.exit(1)
try:
download_volumes(volumes, output_dir)
except OSError as e:
if not os.path.exists('/media/secure_volume/'):
print('Secure volume not mounted. Could not download volumes')
sys.exit(1)
else:
print("Could not download volumes. {} {}".format(e.strerror, e.filename))
sys.exit(1)
except RuntimeError as e:
if not args.debug:
print("Could not download volumes. {}".format(str(e)))
sys.exit(1)
else:
raise e
path = output_dir
# import the workset to MALLET format.
subprocess.check_call([
'{}/mallet-2.0.8RC3/bin/mallet'.format(MALLET_DIR),
'import-dir',
'--input', path,
'--output', os.path.join(path, '../corpus.mallet'),
'--keep-sequence',
'--remove-stopwords'
])
subprocess.check_call([
'{}/mallet-2.0.8RC3/bin/mallet'.format(MALLET_DIR),
'train-topics',
'--input', os.path.join(path, '../corpus.mallet'),
'--num-topics', str(topics),
'--output-state', os.path.join(path, '../mallet_state.gz'),
'--output-topic-keys', os.path.join(path, '../mallet_topic-keys.txt'),
'--output-doc-topics', os.path.join(path, '../mallet_doc-topics.txt'),
'--num-iterations', str(iterations)
])
def populate_parser(parser=None):
if parser is None:
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-k', help="number of topics", required=True)
parser.add_argument('--iter', help="number of iterations", default=200)
parser.add_argument('--workset-path', help="Location to store workset download.",
default='/media/secure_volume/workset/')
parser.add_argument('path', default='/media/secure_volume/workset/',
nargs='?')
return parser
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="MALLET tools for the HTRC")
populate_parser(parser)
args = parser.parse_args()
main(args.path, args.k, args.iter, args.workset_path)
|
class Formatter:
"""Inherited class with methods called by numerous classes."""
def __init__(self, info_type, info):
self._type = info_type
self._info = info
def __str__(self):
return self._create_string()
def _create_string(self):
"""Create a multi-line string representation of environment info"""
lst = [self._type, '------']
for k, v in self._info().items():
lst.append(self._format_string(k, v))
lst.append('\n')
return '\n'.join(lst)
@staticmethod
def _format_string(key, value):
return f'{key:16} : {value}'
@staticmethod
def _format_size(num_bytes, binary=False, strip=True):
"""
Format a number of bytes as a human readable size.
Parameters
----------
num_bytes : int
The size to format.
binary : bool, optional
The base to group the number of bytes.
strip : bool, optional
If trailing zeros should be keeped or stripped.
Returns
-------
str
The human readable file size.
"""
size_units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
if binary:
base = 2**10
else:
base = 10**3
for i, unit in reversed(list(enumerate(size_units))):
divider = base**i
if num_bytes >= divider:
formatted = '{:0.2f}'.format(num_bytes / divider, unit)
if strip:
formatted = formatted.rstrip('0').rstrip('.')
formatted = '{} {}'.format(formatted, unit)
return formatted
# Failed to match a unit
return '0 {}'.format(size_units[0])
|
"""Action for creating packages and registering them with ghc-pkg"""
load("@bazel_skylib//lib:paths.bzl", "paths")
load(":private/packages.bzl", "ghc_pkg_recache", "write_package_conf")
load(":private/path_utils.bzl", "get_lib_name", "is_hs_library", "target_unique_name")
load(":private/pkg_id.bzl", "pkg_id")
load(":private/cc_libraries.bzl", "get_extra_libs")
def _get_extra_libraries(hs, posix, with_shared, cc_libraries_info, cc_info):
"""Get directories and library names for extra library dependencies.
Args:
cc_info: Combined CcInfo provider of the package's dependencies.
Returns:
(dirs, libs):
dirs: list: Library search directories for extra library dependencies.
libs: list: Extra library dependencies.
"""
# NOTE This is duplicated from path_utils.bzl link_libraries. This whole
# function can go away once we track libraries outside of package
# configuration files.
(static_libs, dynamic_libs) = get_extra_libs(
hs,
posix,
cc_libraries_info,
cc_info,
pic = with_shared,
)
# This test is a hack. When a CC library has a Haskell library
# as a dependency, we need to be careful to filter it out,
# otherwise it will end up polluting the linker flags. GHC
# already uses hs-libraries to link all Haskell libraries.
#
# TODO Get rid of this hack. See
# https://github.com/tweag/rules_haskell/issues/873.
cc_static_libs = depset(direct = [
lib
for lib in static_libs.to_list()
if not is_hs_library(lib)
])
cc_dynamic_libs = depset(direct = [
lib
for lib in dynamic_libs.to_list()
if not is_hs_library(lib)
])
cc_libs = cc_static_libs.to_list() + cc_dynamic_libs.to_list()
lib_dirs = depset(direct = [
lib.dirname
for lib in cc_libs
])
lib_names = [
get_lib_name(lib)
for lib in cc_libs
]
return (lib_dirs.to_list(), lib_names)
def package(
hs,
posix,
dep_info,
cc_libraries_info,
cc_info,
with_shared,
exposed_modules_file,
other_modules,
my_pkg_id,
has_hs_library):
"""Create GHC package using ghc-pkg.
Args:
hs: Haskell context.
posix: POSIX toolchain.
dep_info: Combined HaskellInfo of dependencies.
cc_info: Combined CcInfo of dependencies.
with_shared: Whether to link dynamic libraries.
exposed_modules_file: File holding list of exposed modules.
other_modules: List of hidden modules.
my_pkg_id: Package id object for this package.
has_hs_library: Whether hs-libraries should be non-null.
Returns:
(File, File): GHC package conf file, GHC package cache file
"""
pkg_db_dir = pkg_id.to_string(my_pkg_id)
conf_file = hs.actions.declare_file(
paths.join(pkg_db_dir, "{0}.conf".format(pkg_db_dir)),
)
import_dir = paths.join(
"${pkgroot}",
paths.join(pkg_db_dir, "_iface"),
)
(extra_lib_dirs, extra_libs) = _get_extra_libraries(hs, posix, with_shared, cc_libraries_info, cc_info)
# Create a file from which ghc-pkg will create the actual package
# from. List of exposed modules generated below.
metadata_file = hs.actions.declare_file(target_unique_name(hs, "metadata"))
write_package_conf(hs, metadata_file, {
"name": my_pkg_id.package_name,
"version": my_pkg_id.version,
"id": pkg_id.to_string(my_pkg_id),
"key": pkg_id.to_string(my_pkg_id),
"exposed": "True",
"hidden-modules": other_modules,
"import-dirs": [import_dir],
"library-dirs": ["${pkgroot}"] + extra_lib_dirs,
"dynamic-library-dirs": ["${pkgroot}"] + extra_lib_dirs,
"hs-libraries": [pkg_id.library_name(hs, my_pkg_id)] if has_hs_library else [],
"extra-libraries": extra_libs,
"depends": hs.package_ids,
})
# Combine exposed modules and other metadata to form the package
# configuration file.
hs.actions.run_shell(
inputs = [metadata_file, exposed_modules_file],
outputs = [conf_file],
command = """
"$1" $2 > $4
echo "exposed-modules: `"$1" $3`" >> $4
""",
arguments = [
posix.commands["cat"],
metadata_file.path,
exposed_modules_file.path,
conf_file.path,
],
)
cache_file = ghc_pkg_recache(hs, posix, conf_file)
return conf_file, cache_file
|
#-------------
# HYPERPARAMS
#-------------
num_neg = 6
latent_features = 8
epochs = 20
batch_size = 256
learning_rate = 0.001
#-------------------------
# TENSORFLOW GRAPH
#-------------------------
graph = tf.Graph()
with graph.as_default():
# Define input placeholders for user, item and label.
user = tf.placeholder(tf.int32, shape=(None, 1))
item = tf.placeholder(tf.int32, shape=(None, 1))
label = tf.placeholder(tf.int32, shape=(None, 1))
# User embedding for MLP
mlp_u_var = tf.Variable(tf.random_normal([len(users), 32], stddev=0.05),
name='mlp_user_embedding')
mlp_user_embedding = tf.nn.embedding_lookup(mlp_u_var, user)
# Item embedding for MLP
mlp_i_var = tf.Variable(tf.random_normal([len(items), 32], stddev=0.05),
name='mlp_item_embedding')
mlp_item_embedding = tf.nn.embedding_lookup(mlp_i_var, item)
# User embedding for GMF
gmf_u_var = tf.Variable(tf.random_normal([len(users), latent_features],
stddev=0.05), name='gmf_user_embedding')
gmf_user_embedding = tf.nn.embedding_lookup(gmf_u_var, user)
# Item embedding for GMF
gmf_i_var = tf.Variable(tf.random_normal([len(items), latent_features],
stddev=0.05), name='gmf_item_embedding')
gmf_item_embedding = tf.nn.embedding_lookup(gmf_i_var, item)
# Our GMF layers
gmf_user_embed = tf.keras.layers.Flatten()(gmf_user_embedding)
gmf_item_embed = tf.keras.layers.Flatten()(gmf_item_embedding)
gmf_matrix = tf.multiply(gmf_user_embed, gmf_item_embed)
# Our MLP layers
mlp_user_embed = tf.keras.layers.Flatten()(mlp_user_embedding)
mlp_item_embed = tf.keras.layers.Flatten()(mlp_item_embedding)
mlp_concat = tf.keras.layers.concatenate([mlp_user_embed, mlp_item_embed])
mlp_dropout = tf.keras.layers.Dropout(0.2)(mlp_concat)
mlp_layer_1 = tf.keras.layers.Dense(64, activation='relu', name='layer1')(mlp_dropout)
mlp_batch_norm1 = tf.keras.layers.BatchNormalization(name='batch_norm1')(mlp_layer_1)
mlp_dropout1 = tf.keras.layers.Dropout(0.2, name='dropout1')(mlp_batch_norm1)
mlp_layer_2 = tf.keras.layers.Dense(32, activation='relu', name='layer2')(mlp_dropout1)
mlp_batch_norm2 = tf.keras.layers.BatchNormalization(name='batch_norm1')(mlp_layer_2)
mlp_dropout2 = tf.keras.layers.Dropout(0.2, name='dropout1')(mlp_batch_norm2)
mlp_layer_3 = tf.keras.layers.Dense(16, activation='relu', name='layer3')(mlp_dropout2)
mlp_layer_4 = tf.keras.layers.Dense(8, activation='relu', name='layer4')(mlp_layer_3)
# We merge the two networks together
merged_vector = tf.keras.layers.concatenate([gmf_matrix, mlp_layer_4])
# Our final single neuron output layer.
output_layer = tf.keras.layers.Dense(1,
kernel_initializer="lecun_uniform",
name='output_layer')(merged_vector)
# Our loss function as a binary cross entropy.
loss = tf.losses.sigmoid_cross_entropy(label, output_layer)
# Train using the Adam optimizer to minimize our loss.
opt = tf.train.AdamOptimizer(learning_rate = learning_rate)
step = opt.minimize(loss)
# Initialize all tensorflow variables.
init = tf.global_variables_initializer()
session = tf.Session(config=None, graph=graph)
session.run(init)
|
from datetime import datetime
armstrong = datetime(1969, 7, 21, 14, 56, 15)
armstrong.year # 1969
armstrong.month # 7
armstrong.day # 21
armstrong.hour # 14
armstrong.minute # 56
armstrong.second # 15
armstrong.microsecond # 0
|
import requests
import json
from time import sleep
# lyrics
with open('lyrics.txt', 'r') as f:
all_lines = f.readlines()
token = "your token here"
while True:
for i in range(len(all_lines)):
#loop through lyrics.txt
content = {
"custom_status": {"text": all_lines[i]}
}
requests.patch("https://ptb.discordapp.com/api/v8/users/@me/settings", headers={"authorization": token}, json=content)
#good rule of thumb is to delay for 4-5 seconds or more, to avoid insta ban
sleep(5)
|
from keras_segmentation.predict import predict,predict_multiple,predict_video
from keras_segmentation.models.all_models import model_from_name
test_image_path = "/home/mirap/database_folder/Menziesdata/ROI_examples_5_fold_whitehole/test_images/"
checkpoints_saving_path = "checkpoints/"
dataset_abbr = "MBf"
out_folder = "out_frame/"+dataset_abbr
model_list = [
# "fcn_16_vgg",
# "fcn_32_vgg",
# "fcn_8_vgg",
# "fcn_8_resnet50", # big size over 11GB
# "fcn_16_resnet50",
# "fcn_32_resnet50", # big size over 11GB
# "fcn_8_mobilenet",
# "fcn_16_mobilenet",
# "fcn_32_mobilenet",
# "pspnet", # core dump error
# "vgg_pspnet", # core dump error
# "resnet50_pspnet", # core dump error
# "pspnet_50", # big size over 11GB
# "pspnet_101",
# "unet_mini",
# "unet",
"vgg_unet",
# "resnet50_unet",
# "mobilenet_unet",
# "segnet",
# "vgg_segnet",
# "resnet50_segnet",
# "mobilenet_segnet"
]
# load model
# model_weight_path = 'model.h5'
# model = vgg_unet(n_classes=6, input_height=640, input_width=640)
# model.load_weights(model_weight_path, by_name=True)
for i in range(1,6):
for model_name in model_list:
print("--------- predict",dataset_abbr,i,"_",model_name,"------------")
# Single Predict
# predict(
# checkpoints_path="checkpoints/mobilenet_segnet",
# inp="database/IMAS_Salmon/train_images/untitled-10.jpg",
# out_fname="out_frame/output_SaMobilenet_segnet_Predic.png",
# overlay_img=True
# )
print("--- using",checkpoints_saving_path+dataset_abbr+str(i)+model_name,"---")
# Multi Predict
predict_multiple(
checkpoints_path=checkpoints_saving_path+dataset_abbr+str(i)+model_name,
inp_dir=test_image_path,
out_dir=out_folder+str(i)+"/",
overlay_img=True,
class_names=None, show_legends=False,
prediction_width=None, prediction_height=None,
)
# Video Predict
# predict_video(
# checkpoints_path="checkpoints/vgg_unet_1",
# inp=test_image_path, # should be avi file!
# out_fname="output.avi"
# )
|
import torch
import numpy as np
import open3d as o3d
import trimesh
import mcubes
class SoftL1(torch.nn.Module):
def __init__(self):
super(SoftL1, self).__init__()
def forward(self, input, target, eps=0.0):
l1 = torch.abs(input - target)
ret = l1 - eps
ret = torch.clamp(ret, min=0.0, max=100.0)
return ret, torch.mean(l1.detach())
class LearningRateSchedule:
def get_learning_rate(self, epoch):
pass
class StepLearningRateSchedule(LearningRateSchedule):
def __init__(self, specs):
print(specs)
self.initial = specs['initial']
self.interval = specs['interval']
self.factor = specs['factor']
def get_learning_rate(self, epoch):
return self.initial * (self.factor ** (epoch // self.interval))
def adjust_learning_rate(lr_schedules, optimizer, epoch):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedules[i].get_learning_rate(epoch)
def latent_size_regul(latent_codes, indices, component_mean=None, component_std=None):
# OneCodePerFrame
latent_codes_squared = latent_codes[indices, ...].pow(2) # [batch_size, 1, code_dim]
if component_std is not None:
latent_codes_squared = latent_codes_squared / component_std.pow(2)
latent_loss = torch.mean(latent_codes_squared, dim=-1) # [batch_size, 1]
latent_loss = torch.mean(latent_loss)
return latent_loss
def latent_size_regul_no_index(latent_codes):
# OneCodePerFrame
latent_codes_squared = latent_codes.pow(2) # [batch_size, 1, code_dim]
latent_loss = torch.mean(latent_codes_squared, dim=-1) # [batch_size, 1]
latent_loss = torch.mean(latent_loss)
return latent_loss
def empirical_stat(latent_vecs, indices):
lat_mat = torch.zeros(0).cuda()
for ind in indices:
lat_mat = torch.cat([lat_mat, latent_vecs[ind]], 0)
mean = torch.mean(lat_mat, 0)
var = torch.var(lat_mat, 0)
return mean, var
def get_mean_latent_vector_magnitude_old(latent_codes):
host_vectors = np.array(
[vec.detach().cpu().numpy().squeeze() for vec in latent_codes]
)
return np.mean(np.linalg.norm(host_vectors, axis=1))
def get_mean_latent_code_magnitude(latent_codes):
host_latent_codes = latent_codes.detach().cpu().numpy()
assert len(host_latent_codes.shape) == 3
return np.mean(np.linalg.norm(host_latent_codes, axis=2))
def threshold_min_max(tensor, min_vec, max_vec):
return torch.min(max_vec, torch.max(tensor, min_vec))
def project_latent_codes_onto_sphere(latent_codes, radius):
length = torch.norm(latent_codes, dim=-1, keepdim=True).detach()
latent_codes.data = latent_codes.mul(radius / length)
####################################################################################
####################################################################################
def create_mesh_from_code(decoder, latent_code, shape_codes_dim, N=256, max_batch=32 ** 3):
latent_code.requires_grad = False
# Get shape codes for batch samples
shape_codes_batch = latent_code
assert shape_codes_batch.shape[1] == 1, shape_codes_batch.shape
decoder.eval()
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
# voxel_origin = [-1, -1, -1]
bbox_min = -0.5
bbox_max = 0.5
voxel_origin = [bbox_min] * 3
voxel_size = (bbox_max - bbox_min) / (N - 1)
overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
samples = torch.zeros(N ** 3, 4)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index.long() // N) % N
samples[:, 0] = ((overall_index.long() // N) // N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
num_samples = N ** 3
samples.requires_grad = False
head = 0
while head < num_samples:
sample_subset = samples[head : min(head + max_batch, num_samples), 0:3].cuda()
# Run forward pass.
# Extent latent code to all sampled points
shape_codes_repeat = shape_codes_batch.expand(-1, sample_subset.shape[0], -1) # [bs, N, C]
shape_codes_inputs = shape_codes_repeat.reshape(-1, shape_codes_dim) # [bs*N, C]
shape_inputs = torch.cat([shape_codes_inputs, sample_subset], 1)
sdf_pred_i = decoder(shape_inputs).squeeze(1).detach().cpu()
samples[head : min(head + max_batch, num_samples), 3] = (
sdf_pred_i
)
head += max_batch
sdf_values = samples[:, 3]
sdf_values = sdf_values.reshape(N, N, N)
# Extract mesh with Marching cubes.
vertices, triangles = mcubes.marching_cubes(sdf_values.numpy(), 0)
# Normalize vertices to be in [-1, 1]
step = (bbox_max - bbox_min) / (N - 1)
vertices = np.multiply(vertices, step)
vertices += [bbox_min, bbox_min, bbox_min]
return trimesh.Trimesh(vertices, triangles)
def create_mesh(decoder, latent_code, identity_ids, shape_codes_dim, N=256, max_batch=32 ** 3):
latent_code.requires_grad = False
# Get shape codes for batch samples
assert len(identity_ids) == 1 and identity_ids[0] < latent_code.shape[0], f"Identity id {identity_ids[0]} is out of range of latent code of shape {latent_code.shape}"
shape_codes_batch = latent_code[identity_ids, ...] # [bs, 1, C]
assert shape_codes_batch.shape[1] == 1, shape_codes_batch.shape
decoder.eval()
# NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle
# voxel_origin = [-1, -1, -1]
bbox_min = -0.5
bbox_max = 0.5
voxel_origin = [bbox_min] * 3
voxel_size = (bbox_max - bbox_min) / (N - 1)
overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
samples = torch.zeros(N ** 3, 4)
# transform first 3 columns
# to be the x, y, z index
samples[:, 2] = overall_index % N
samples[:, 1] = (overall_index.long() // N) % N
samples[:, 0] = ((overall_index.long() // N) // N) % N
# transform first 3 columns
# to be the x, y, z coordinate
samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
num_samples = N ** 3
samples.requires_grad = False
head = 0
while head < num_samples:
sample_subset = samples[head : min(head + max_batch, num_samples), 0:3].cuda()
# Run forward pass.
# Extent latent code to all sampled points
shape_codes_repeat = shape_codes_batch.expand(-1, sample_subset.shape[0], -1) # [bs, N, C]
shape_codes_inputs = shape_codes_repeat.reshape(-1, shape_codes_dim) # [bs*N, C]
shape_inputs = torch.cat([shape_codes_inputs, sample_subset], 1)
sdf_pred_i = decoder(shape_inputs).squeeze(1).detach().cpu()
samples[head : min(head + max_batch, num_samples), 3] = (
sdf_pred_i
)
head += max_batch
sdf_values = samples[:, 3]
sdf_values = sdf_values.reshape(N, N, N)
# Extract mesh with Marching cubes.
vertices, triangles = mcubes.marching_cubes(sdf_values.numpy(), 0)
# Normalize vertices to be in [-1, 1]
step = (bbox_max - bbox_min) / (N - 1)
vertices = np.multiply(vertices, step)
vertices += [bbox_min, bbox_min, bbox_min]
return trimesh.Trimesh(vertices, triangles)
def compute_trimesh_chamfer(
gt_points, gen_points
):
from scipy.spatial import cKDTree as KDTree
import trimesh
"""
This function computes a symmetric chamfer distance, i.e. the sum of both chamfers.
"""
gen_points_kd_tree = KDTree(gen_points)
one_distances, one_vertex_ids = gen_points_kd_tree.query(gt_points)
gt_to_gen_temp = np.square(one_distances)
gt_to_gen_chamfer = np.mean(gt_to_gen_temp)
gt_points_kd_tree = KDTree(gt_points)
two_distances, two_vertex_ids = gt_points_kd_tree.query(gen_points)
gen_to_gt_temp = np.square(two_distances)
gen_to_gt_chamfer = np.mean(gen_to_gt_temp)
squared_chamfer = gt_to_gen_chamfer + gen_to_gt_chamfer
# For easier understanding, compute the "unsquared" version
unsquared_chamfer = np.mean(one_distances) + np.mean(two_distances)
return {
'squared_chamfer': squared_chamfer,
'unsquared_chamfer': unsquared_chamfer
}
|
import os
import json
import logging
import requests
from urllib.parse import urljoin
logging.basicConfig(level=logging.INFO)
REQUEST_TIMEOUT = 10 # in seconds
API_V3_BASE = "https://api.github.com"
def api_request(url: str, http_request: str = 'get', check_response: bool = True, **kwargs):
url = urljoin(API_V3_BASE, url)
try:
requests_action = getattr(requests, http_request)
response = requests_action(
url,
headers={
"Authorization": f"Bearer {os.getenv('GIT_APP_TOKEN')}",
"Accept": "application/vnd.github.luke-cage-preview+json",
},
timeout=REQUEST_TIMEOUT,
**kwargs
)
if check_response:
try:
response = response.json()
except json.JSONDecodeError as exc:
raise RuntimeError(f'Failed to jsonify response.\n{exc!r}')
return response
except Exception as err:
raise err
def remove_branch_protection():
url = f'/repos/{os.getenv("GITHUB_REPOSITORY", "")}/branches/master/protection/required_pull_request_reviews'
logging.info('Looking for current branch protection rules.')
response = api_request(url)
data = {
"dismiss_stale_reviews": response.get("dismiss_stale_reviews", False),
"require_code_owner_reviews": response.get("require_code_owner_reviews", False),
"required_approving_review_count": response.get("required_approving_review_count", 1)
}
if 'organization' in api_request(f'/repos/{os.getenv("GITHUB_REPOSITORY", "")}'):
data["dismissal_restrictions"] = {
"users": [
_.get("login")
for _ in response.get("dismissal_restrictions", {}).get("users", [])
],
"teams": [
_.get("slug")
for _ in response.get("dismissal_restrictions", {}).get("teams", [])
]
}
logging.info('Saving protection rules file.')
with open('tmp_protection_rules.json', 'w') as handle:
json.dump(data, handle)
logging.info('Removing branch protection.')
api_request(url, http_request='delete', check_response=False)
def re_add_branch_protection():
logging.info('Reading protection rules file.')
with open('tmp_protection_rules.json', 'r') as handle:
data = json.load(handle)
url = f'/repos/{os.getenv("GITHUB_REPOSITORY", "")}/branches/master/protection/required_pull_request_reviews'
logging.info('Re-adding protection branch rules.')
api_request(url, http_request='patch', json=data, check_response=False)
def git_add_and_commit():
logging.info('Pushing to remote Github.')
os.system('git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"')
os.system('git config --global user.name "github-actions[bot]"')
os.system('git add --all -- ":!tmp_protection_rules.json"')
os.system('git commit -m "Updated by Github Actions :)"')
os.system('git push origin master')
def main():
logging.info('Initializing...')
remove_branch_protection()
git_add_and_commit()
re_add_branch_protection()
logging.info('Finished...')
if __name__ == "__main__":
main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _get_end_to_end_test_cases():
test_cases = (
{
"testcase_name": "test_strings_soft_vocab_cap",
# Create an array where 'earth' is the most frequent term, followed by
# 'wind', then 'and', then 'fire'. This ensures that the vocab
# accumulator is sorting by frequency.
"vocab_data":
np.array([["fire"], ["earth"], ["earth"], ["earth"], ["earth"],
["wind"], ["wind"], ["wind"], ["and"], ["and"]]),
"input_data":
np.array([["earth"], ["wind"], ["and"], ["fire"], ["fire"],
["and"], ["earth"], ["michigan"]]),
"kwargs": {
"max_tokens": None,
},
"expected_output": [[2], [3], [4], [5], [5], [4], [2], [1]],
"input_dtype":
dtypes.string
},
)
crossed_test_cases = []
# Cross above test cases with use_dataset in (True, False)
for use_dataset in (True, False):
for case in test_cases:
case = case.copy()
if use_dataset:
case["testcase_name"] = case["testcase_name"] + "_with_dataset"
case["use_dataset"] = use_dataset
crossed_test_cases.append(case)
return crossed_test_cases
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class StringLookupLayerTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
@parameterized.named_parameters(*_get_end_to_end_test_cases())
def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,
use_dataset, expected_output,
input_dtype):
cls = string_lookup.StringLookup
expected_output_dtype = dtypes.int64
input_shape = input_data.shape
if use_dataset:
# Keras APIs expect batched datasets.
# TODO(rachelim): `model.predict` predicts the result on each
# dataset batch separately, then tries to concatenate the results
# together. When the results have different shapes on the non-concat
# axis (which can happen in the output_mode = INT case for
# StringLookup), the concatenation fails. In real use cases, this may
# not be an issue because users are likely to pipe the preprocessing layer
# into other keras layers instead of predicting it directly. A workaround
# for these unit tests is to have the dataset only contain one batch, so
# no concatenation needs to happen with the result. For consistency with
# numpy input, we should make `predict` join differently shaped results
# together sensibly, with 0 padding.
input_data = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
input_shape[0])
vocab_data = dataset_ops.Dataset.from_tensor_slices(vocab_data).batch(
input_shape[0])
with CustomObjectScope({"StringLookup": cls}):
output_data = testing_utils.layer_test(
cls,
kwargs=kwargs,
input_shape=input_shape,
input_data=input_data,
input_dtype=input_dtype,
expected_output_dtype=expected_output_dtype,
validate_training=False,
adapt_data=vocab_data)
self.assertAllClose(expected_output, output_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class StringLookupVocabularyTest(keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest
):
def _write_to_temp_file(self, file_name, vocab_list):
vocab_path = os.path.join(self.get_temp_dir(), file_name + ".txt")
with gfile.GFile(vocab_path, "w") as writer:
for vocab in vocab_list:
writer.write(vocab + "\n")
writer.flush()
writer.close()
return vocab_path
def test_int_output_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_with_special_tokens(self):
vocab_data = ["", "[UNK]", "earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_no_vocab(self):
with self.assertRaisesRegex(
ValueError, "You must set the layer's vocabulary"):
layer = string_lookup.StringLookup()
layer([["a"]])
def test_binary_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 1, 1, 1, 1], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="binary")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_count_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "earth", "fire", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[0, 2, 0, 0, 2], [1, 1, 0, 1, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="count")
res = layer(input_data)
model = keras.Model(inputs=input_data, outputs=res)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_sparse_output(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(
vocabulary=vocab_data, output_mode="binary", sparse=True)
res = layer(input_data)
self.assertTrue(res.__class__.__name__, "SparseKerasTensor")
def test_get_vocab_returns_str(self):
vocab_data = ["earth", "wind", "and", "fire"]
expected_vocab = ["", "[UNK]", "earth", "wind", "and", "fire"]
layer = string_lookup.StringLookup(vocabulary=vocab_data)
layer_vocab = layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], six.text_type)
inverse_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
layer_vocab = inverse_layer.get_vocabulary()
self.assertAllEqual(expected_vocab, layer_vocab)
self.assertIsInstance(layer_vocab[0], six.text_type)
def test_int_output_explicit_vocab_from_file(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_int_output_explicit_vocab_from_file_via_setter(self):
vocab_list = ["earth", "wind", "and", "fire"]
vocab_path = self._write_to_temp_file("vocab_file", vocab_list)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup()
layer.set_vocabulary(vocab_path)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_non_unique_vocab_fails(self):
vocab_data = ["earth", "wind", "and", "fire", "fire"]
with self.assertRaisesRegex(ValueError, ".*repeated term.*fire.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_data)
def test_non_unique_vocab_from_file_fails(self):
vocab_list = ["earth", "wind", "and", "fire", "earth"]
vocab_path = self._write_to_temp_file("repeat_vocab_file", vocab_list)
with self.assertRaisesRegex(
errors_impl.FailedPreconditionError,
".*HashTable has different value for same key.*earth.*"):
_ = string_lookup.StringLookup(vocabulary=vocab_path)
def test_inverse_layer(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", ""]])
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = string_lookup.StringLookup(vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = string_lookup.StringLookup(vocabulary=vocab_path, invert=True)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_inverse_layer_from_file_with_non_default_msk(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 0]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[M]"]])
vocab_path = self._write_to_temp_file("vocab_file", vocab_data)
input_data = keras.Input(shape=(None,), dtype=dtypes.int64)
layer = string_lookup.StringLookup(
vocabulary=vocab_path, invert=True, mask_token="[M]")
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_explicit_vocab(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup(vocabulary=vocab_data)
invert_layer = string_lookup.StringLookup(
vocabulary=vocab_data, invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_forward_backward_adapted_vocab(self):
adapt_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "[UNK]"]])
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = string_lookup.StringLookup()
layer.adapt(adapt_data)
invert_layer = string_lookup.StringLookup(
vocabulary=layer.get_vocabulary(), invert=True)
int_data = layer(input_data)
out_data = invert_layer(int_data)
model = keras.Model(inputs=input_data, outputs=out_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
def test_ragged_string_input_multi_bucket(self):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = ragged_factory_ops.constant([["earth", "wind", "fire"],
["fire", "and", "earth",
"ohio"]])
expected_output = [[3, 4, 6], [6, 5, 3, 2]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string, ragged=True)
layer = string_lookup.StringLookup(num_oov_indices=2)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_data = model.predict(input_array)
self.assertAllEqual(expected_output, output_data)
if __name__ == "__main__":
test.main()
|
import requests
import json
from ..Config.config_handler import read_config
class ProcessBusDelays:
def __init__(self):
self.config_vals = read_config("Bus_API")
# Get the live data of Buses(Arrival Time, Departure Time, Delay) from API and returns.
def get_data_from_bus_api(self):
url = self.config_vals["api_url"]
headers = {self.config_vals["api_key_name"]:self.config_vals["api_key_value"]}
response = requests.get(url, headers=headers)
bus_data = json.loads(response.text)
bus_trip_delays = bus_data["entity"]
return bus_trip_delays
# Structure the live data (Delays, Arrival Time, Departure Time) in required format to send the recent stop details to frontend.
def get_delay_for_trip_live(self):
bus_trip_delays=self.get_data_from_bus_api()
result_response={}
for trip in bus_trip_delays:
temp = trip["trip_update"]
if temp["trip"]["schedule_relationship"]!="CANCELED":
delay_details = temp["stop_time_update"][-1]
if "departure" not in delay_details:
temp_delay = delay_details["arrival"]
if "delay" not in temp_delay:
delay = "Not Available"
else:
delay = temp_delay["delay"]
result_response[trip["id"]] = {
"STOP_ID": delay_details["stop_id"],
"STOP_SEQUENCE": delay_details["stop_sequence"],
"DELAY": delay
}
else:
temp_delay = delay_details["departure"]
if "delay" not in temp_delay:
delay = "Not Available"
else:
delay = temp_delay["delay"]
result_response[trip["id"]] = {
"STOP_ID": delay_details["stop_id"],
"STOP_SEQUENCE": delay_details["stop_sequence"],
"DELAY": delay
}
else:
result_response[trip["id"]] = {"STATUS":"CANCELED"}
return result_response
|
'''
用来读取 cifar10 数据集
'''
import pickle
import re
import os
import numpy as np
import matplotlib.pyplot as plt
class Cifar10:
def __init__(self,path,one_hot = True):
self.path = path
self.one_hot = one_hot
self._epochs_completed = 0
self._index_in_epoch = 0
self._num_examples = 50000
def _load_data(self):
images = []
labels = []
files = os.listdir(self.path)
for file in files:
if re.match('data_batch_*',file):
with open(os.path.join(self.path,file),'rb') as fo:
data = pickle.load(fo,encoding='bytes')
images.append(data[b'data'].reshape([-1,3,32,32]))
labels.append(data[b'labels'])
elif re.match('test_batch',file):
with open(os.path.join(self.path,file),'rb') as fo:
data = pickle.load(fo,encoding='bytes')
test_images = np.array(data[b'data'].reshape([-1,3,32,32]))
test_labels = np.array(data[b'labels'])
images = np.concatenate(images,axis = 0)
labels = np.concatenate(labels,axis = 0)
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self.train_images = images.transpose(0,2,3,1)[perm]
self.train_labels = np.array(labels).reshape([-1,1])[perm]
self.test_images = test_images.transpose(0,2,3,1)
self.test_labels = test_labels.reshape([-1, 1])
if self.one_hot:
self.train_labels = self._one_hot(self.train_labels,10)
self.test_labels = self._one_hot(self.test_labels,10)
return self.train_images,self.train_images,self.test_images,self.test_labels
def next_batch(self,batch_size,shuffle=True):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if start + batch_size > self._num_examples:
self._epochs_completed += 1
if shuffle:
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self.train_images = self.train_images[perm]
self.train_labels = self.train_labels[perm]
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self.train_images[start:end],self.train_labels[start:end]
def _one_hot(self,labels,num):
size = labels.shape[0]
label_one_hot = np.zeros([size,num])
for i in range(size):
label_one_hot[i,np.squeeze(labels[i])] = 1
return label_one_hot
def load_cifar10(path,one_hot = False):
cifar10 = Cifar10(path,one_hot)
cifar10._load_data()
return cifar10
if __name__ == '__main__':
path = 'd:/input_data/cifar-10/cifar-10-batches-py/'
cifar10 = load_cifar10(path,one_hot = False)
images = cifar10.train_images
labels = cifar10.train_labels
test_images = cifar10.test_images
test_labels = cifar10.test_labels
print("训练集shape = ",images.shape )
print('测试集shape = ',test_images.shape)
batch_xs,batch_ys = cifar10.next_batch(batch_size = 64,shuffle=True)
print("batch_xs shape = ",batch_xs.shape)
print("batch_ys shape = ",batch_ys.shape)
# plot image
classes = ["plane", "car", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
num_classes = len(classes)
samples_per_class = 7
for y, clss in enumerate(classes):
idxs = np.flatnonzero(labels == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(images[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(clss)
plt.show()
# batch中的信息:{'num_cases_per_batch': 10000, 'label_names': ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'], 'num_vis': 3072}
# cifar10_name = np.load(os.path.join(path,'batches.meta'))
# print(cifar10_name)
|
file = open('input.txt', 'r')
Lines = file.readlines()
input = []
for line in Lines:
input.append(int(line.strip().split(' ')[0]))
my_dict = {}
for i, num1 in enumerate(input):
for j, num2 in enumerate(input):
if i != j and num1 + num2 <= 2020:
my_dict[num1+num2] = [i, j]
for i, num1 in enumerate(input):
if 2020 - num1 in my_dict:
print (num1, my_dict[2020-num1])
|
# Generated by Django 2.2.10 on 2020-02-29 09:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("approval", "0008_auto_20190506_1719")]
operations = [
migrations.AlterField(
model_name="approval",
name="applicant",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="applications",
to=settings.AUTH_USER_MODEL,
verbose_name="søker",
),
),
migrations.AlterField(
model_name="approval",
name="approver",
field=models.ForeignKey(
blank=True,
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="approved_applications",
to=settings.AUTH_USER_MODEL,
verbose_name="godkjenner",
),
),
]
|
import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.plugin import LOW_PRIORITY, stream_weight
from streamlink.stream.dash import DASHStream
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"dash://(?P<url>.+)"
))
@pluginmatcher(priority=LOW_PRIORITY, pattern=re.compile(
r"(?P<url>.+\.mpd(?:\?.*)?)"
))
class MPEGDASH(Plugin):
@classmethod
def stream_weight(cls, stream):
match = re.match(r"^(?:(.*)\+)?(?:a(\d+)k)$", stream)
if match and match.group(1) and match.group(2):
weight, group = stream_weight(match.group(1))
weight += int(match.group(2))
return weight, group
elif match and match.group(2):
return stream_weight(match.group(2) + 'k')
else:
return stream_weight(stream)
def _get_streams(self):
url = update_scheme("https://", self.match.group(1), force=False)
log.debug("Parsing MPD URL: {0}".format(url))
return DASHStream.parse_manifest(self.session, url)
__plugin__ = MPEGDASH
|
"""
The Single Responsibility Principle
A class should have one, and only one, reason to change.
"""
food_bowl = 10
class Cat:
def __init__(self, name: str):
self.name = name
self.food_level = 30
self.cleanliness_level = 50
def meow(self):
print("meow")
def eat(self):
self.food_level += 10
def sleep(self):
print(f"{self.name} is now sleeping")
# breaking SRP
# def prepare_food_bowl(self):
# food_bowl += 10
# # breaking SRP
# def shower(self):
# self.cleanliness_level += 30
class Owner:
"""
The person that takes care of a cat
"""
@staticmethod
def prepare_food_bowl():
food_bowl += 10
@staticmethod
def clean_cat(cat: Cat):
cat.cleanliness_level += 30
if __name__ == "__main__":
cat = Cat("fred")
owner = Owner()
print(f"cat clean level: {cat.cleanliness_level}")
owner.clean_cat(cat)
print(f"cat clean level: {cat.cleanliness_level}")
|
import os
from itertools import chain
from .log import warn
from .entry import Entry, LoadError
DB_FILE_DEFAULT = os.path.expanduser(os.path.join("~", ".ripple.txt"))
DB_FILE = os.path.realpath(os.environ.get('RIPPLE_DB', DB_FILE_DEFAULT))
DB_DIR = os.environ.get('RIPPLE_DB_DIR', None)
if DB_DIR and os.path.isdir(DB_DIR):
DB_DIR = os.path.realpath(DB_DIR)
else:
DB_DIR = None
DB_TMP_FILE = DB_FILE + '.tmp'
def get_db(write=False):
"""
Opens the database file and returns the resulting instance.
"""
files = set()
if DB_DIR and not write:
files = set([os.path.join(DB_DIR, file) for file in os.listdir(DB_DIR)])
if os.path.isfile(DB_FILE):
files.add(DB_FILE)
if not files:
return DB()
handles = []
for file in files:
handles.append(open(file, 'r+'))
return DB.load(chain(*handles))
def save_db(db):
"""
Saves the database to disk
"""
with open(DB_TMP_FILE, 'w') as handle:
db.dump(handle)
os.rename(DB_TMP_FILE, DB_FILE)
class DB(object):
"""
The database of all entries.
Nothing fancy for now, everything in-memory, so you should probably
archive entries at some point.
"""
def __init__(self):
self.entries = []
def append(self, entry):
"""
Adds the given entry at the end of the database.
@param Entry entry
"""
self.entries.append(entry)
def dump(self, writer):
"""
Encodes the state into text format and writes it
to the given writer.
@param [.write(str)-supporting object] writer
"""
for entry in self.entries:
writer.write(entry.dumps())
writer.write("\n")
@classmethod
def load(cls, iterable):
"""
Creates a new DB instance, pre-populated with the parseable
entries.
@param iterable iterable (usually emitting lines)
"""
x = 1
db = DB()
for line in iterable:
if not line.strip() or line[0] == '#':
continue
try:
entry = Entry.loads(line.strip(), id=x)
except LoadError:
warn("unable to parse a line!")
continue
if entry:
db.entries.append(entry)
x += 1
db.entries.sort(cmp=lambda a, b: cmp(a.start, b.start))
return db
def get_entries(self):
"""
Returns an iterator over all entries
"""
return self.entries
def get_unfinished_entries(self):
"""
Returns a generator over all entries which have not been
finished yet. Usually this should only be one entry.
@return generator
"""
for entry in self.entries:
if not entry.end:
yield entry
def get_most_recent_entry(self):
"""
Returns the most-recently added entry.
@return Entry
"""
if self.entries:
return self.entries[-1]
return None
|
from contextlib import redirect_stdout
from io import StringIO
from pysweep.colors import SnakeColors
from pysweep.game import Board
class SnakeRenderer:
def __init__(self, with_chrome=True, colors=SnakeColors):
self._with_chrome = with_chrome
self._colors = colors
def render_board(self, grid, counter, time=(0, 0, 0)):
width = len(grid[0])
buffer = StringIO()
with redirect_stdout(buffer):
cols = [" "] + [str(col).rjust(2, ' ') for col in range(1, width + 1)]
horiz_border = "═══" * width
if self._with_chrome:
print(*cols)
print(" ╔═", horiz_border, "═╗", sep='')
for i, row in enumerate(grid, start=1):
line = [self.render_square(square) for square in row]
if self._with_chrome:
line.insert(0, '║')
line.insert(0, str(i).rjust(2, ' '))
line.append('║')
line.append(str(i).ljust(2, ' '))
print(*line, sep=' ')
if self._with_chrome:
print(" ╚═", horiz_border, "═╝", sep='')
print(*cols)
hours, minutes, seconds = [self._colors.TIMER + str(p).zfill(2) + self._colors.END for p in time]
print(f"Snakes Remaining: {self._colors.THREAT_COUNTER}{counter}{self._colors.END}")
print(f"Time Elapsed: {hours}:{minutes}:{seconds}")
return buffer.getvalue()
def render_square(self, square):
val = square.render()
if val == Board.Square.DEFAULT:
return f'{self._colors.GRASS}ψ{self._colors.END}'
if val == Board.Square.REVEALED:
return ' '
if val == Board.Square.THREAT:
return f'{self._colors.THREAT}೬{self._colors.END}'
if val == Board.Square.FLAG:
return f'{self._colors.FLAG}⚑{self._colors.END}'
return f"{self._colors.CLUE}{val}{self._colors.END}"
|
import os
import sys
from abc import ABCMeta, abstractmethod
from six import with_metaclass
modules = ["mod_unfilter", "mod_expand", "mod_sqli",
"mod_nosqli", "mod_lfi", "mod_crlf", "mod_exec", "mod_xss"]
themes = ["startbootstrap-agency-1.0.6", "startbootstrap-clean-blog-1.0.4"]
default = "unfilter"
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args:
self.fall = True
return True
else:
return False
class Attack(with_metaclass(ABCMeta, object)):
"""
This class represents an attack, it must be extended
for any class which implements a new type of attack
"""
name = "attack"
doReturn = True
# List of modules (strings) that must be launched before the current module
# Must be defined in the code of the module
require = []
if hasattr(sys, "frozen"):
ROOT_DIR = os.path.join(os.path.dirname(
unicode(sys.executable, sys.getfilesystemencoding())), "data")
else:
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
CONFIG_DIR = os.path.join(ROOT_DIR, "config", "attacks")
# Color codes
STD = "\033[0;0m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
ORANGE = "\033[0;33m"
YELLOW = "\033[1;33m"
BLUE = "\033[1;34m"
MAGENTA = "\033[0;35m"
CYAN = "\033[0;36m"
GB = "\033[0;30m\033[47m"
# The priority of the module, from 0 (first) to 10 (last). Default is 5
PRIORITY = 5
def __init__(self, fp=None):
self.color = 0
self.verbose = 0
self.settings = {}
# List of modules (objects) that must be launched during the current module
# Must be left empty in the code
self.deps = []
if fp is not None:
self.fp = fp
def __call__(self):
pass
def setColor(self):
self.color = 1
def setVerbose(self):
self.verbose = 1
@abstractmethod
def generateHandler(self, tree_node=None, o=None, elem=None):
pass
@abstractmethod
def doJob(self, http_res, backend, dbms, parent=None):
pass
def final(self):
self.fp.write(os.path.join(self.fp.path, self.fp.target),
self.settings['html'], ext=None)
self.fp.copy(os.path.join(self.CONFIG_DIR, 'php.ini.sample'),
os.path.join(self.fp.path, 'php.ini'))
def loadRequire(self, source, backend, dbms, obj=[]):
self.deps = obj
self.settings = {"html": ""}
self.settings['html'] = source
for x in self.deps:
self.settings.update(x.doJob(
self.settings['html'], backend, dbms, parent=self.name))
x.doReturn = False
def log(self, fmt_string, *args):
if len(args) == 0:
print(fmt_string)
else:
print(fmt_string.format(*args))
if self.color:
sys.stdout.write(self.STD)
def logR(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.RED)
self.log(fmt_string, *args)
def logG(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.GREEN)
self.log(fmt_string, *args)
def logY(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.YELLOW)
self.log(fmt_string, *args)
def logC(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.CYAN)
self.log(fmt_string, *args)
def logW(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.GB)
self.log(fmt_string, *args)
def logM(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.MAGENTA)
self.log(fmt_string, *args)
def logB(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.BLUE)
self.log(fmt_string, *args)
def logO(self, fmt_string, *args):
if self.color:
sys.stdout.write(self.ORANGE)
self.log(fmt_string, *args)
def Job(self, source, backend, dbms):
if self.doReturn == True:
if self.fp.tmpFile is not None:
self.generateHandler = type(self.__class__.generateHandler)(
self.fp.customizationClass.generateHandler, self, self.__class__)
self.__call__ = type(self.__class__.__call__)(
self.fp.customizationClass.__call__, self, self.__class__)
self.logG("[+] Your inputFile has been loaded!")
self.__call__()
self.settings = self.doJob(source, backend, dbms, parent=None)
self.final()
return self.settings
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
import os, sys, re
# Assumes SolidPython is in site-packages or elsewhwere in sys.path
from solid import *
from solid.utils import *
SEGMENTS = 24
# FIXME: ought to be 5
DFM = 5 # Default Material thickness
tab_width = 5
tab_offset = 4
tab_curve_rad = .35
# TODO: Slots & tabs make it kind of difficult to align pieces, since we
# always need the slot piece to overlap the tab piece by a certain amount.
# It might be easier to have the edges NOT overlap at all and then have tabs
# for the slots added programmatically. -ETJ 06 Mar 2013
def t_slot_holes( poly, point=None, edge_vec=RIGHT_VEC, screw_vec=DOWN_VEC, screw_type='m3', screw_length=16, material_thickness=DFM, kerf=0 ):
'''
Cuts a screw hole and two notches in poly so they'll
interface with the features cut by t_slot()
Returns a copy of poly with holes removed
-- material_thickness is the thickness of the material *that will
be attached** to the t-slot, NOT necessarily the material that poly
will be cut on.
-- screw_vec is the direction the screw through poly will face; normal to poly
-- edge_vec orients the holes to the edge they run parallel to
TODO: add kerf calculations
'''
point = point if point else ORIGIN
point = euclidify( point, Point3)
screw_vec = euclidify( screw_vec, Vector3)
edge_vec = euclidify( edge_vec, Vector3)
src_up = screw_vec.cross( edge_vec)
a_hole = square( [tab_width, material_thickness], center=True)
move_hole = tab_offset + tab_width/2
tab_holes = left( move_hole)( a_hole) + right( move_hole)( a_hole)
# Only valid for m3-m5 screws now
screw_dict = screw_dimensions.get( screw_type.lower())
if screw_dict:
screw_w = screw_dict['screw_outer_diam']
else:
raise ValueError( "Don't have screw dimensions for requested screw size %s"%screw_type)
# add the screw hole
tab_holes += circle( screw_w/2) # NOTE: needs any extra space?
tab_holes = transform_to_point( tab_holes, point, dest_normal=screw_vec, src_normal=UP_VEC, src_up=src_up)
return poly - tab_holes
def t_slot( poly, point=None, screw_vec=DOWN_VEC, face_normal=UP_VEC, screw_type='m3', screw_length=16, material_thickness=DFM, kerf=0 ):
'''
Cuts a t-shaped shot in poly and adds two tabs
on the outside edge of poly.
Needs to be combined with t_slot_holes() on another
poly to make a valid t-slot connection
-- material_thickness is the thickness of the material *that will
be attached** to the t-slot, NOT necessarily the material that poly
will be cut on.
-- This method will align the t-slots where you tell them to go,
using point, screw_vec (the direction the screw will be inserted), and
face_normal, a vector normal to the face being altered. To avoid confusion,
it's often easiest to work on the XY plane.
TODO: include kerf in calculations
'''
point = point if point else ORIGIN
point = euclidify( point, Point3)
screw_vec = euclidify( screw_vec, Vector3)
face_normal = euclidify( face_normal, Vector3)
tab = tab_poly( material_thickness=material_thickness)
slot = nut_trap_slot( screw_type, screw_length, material_thickness=material_thickness)
# NOTE: dest_normal & src_normal are the same. This should matter, right?
tab = transform_to_point( tab, point, dest_normal=face_normal, src_normal=face_normal, src_up=-screw_vec)
slot = transform_to_point( slot, point, dest_normal=face_normal, src_normal=face_normal, src_up=-screw_vec)
return poly + tab - slot
def tab_poly( material_thickness=DFM):
r = [ [ tab_width + tab_offset, -EPSILON],
[ tab_offset, -EPSILON],
[ tab_offset, material_thickness],
[ tab_width + tab_offset, material_thickness],]
l = [ [-rp[0], rp[1]] for rp in r]
tab_pts = l + r
tab_faces = [[0,1,2,3], [4,5,6,7]]
tab = polygon( tab_pts, tab_faces)
# Round off the top points so tabs slide in more easily
round_tabs = False
if round_tabs:
points_to_round = [ [r[1], r[2], r[3]],
[r[2], r[3], r[0]],
[l[1], l[2], l[3]],
[l[2], l[3], l[0]],
]
tab = fillet_2d( three_point_sets=points_to_round, orig_poly=tab,
fillet_rad=1, remove_material=True)
return tab
def nut_trap_slot( screw_type='m3', screw_length=16, material_thickness=DFM):
# This shape has a couple uses.
# 1) Right angle joint between two pieces of material.
# A bolt goes through the second piece and into the first.
# 2) Set-screw for attaching to motor spindles.
# Bolt goes full length into a sheet of material. Set material_thickness
# to something small (1-2 mm) to make sure there's adequate room to
# tighten onto the shaft
# Only valid for m3-m5 screws now
screw_dict = screw_dimensions.get( screw_type.lower())
if screw_dict:
screw_w = screw_dict['screw_outer_diam']
screw_w2 = screw_w/2
nut_hole_x = (screw_dict[ 'nut_inner_diam'] + 0.2)/2 # NOTE: How are these tolerances?
nut_hole_h = screw_dict['nut_thickness'] + 0.5
slot_depth = material_thickness - screw_length - 0.5
# If a nut isn't far enough into the material, the sections
# that hold the nut in may break off. Make sure it's at least
# half a centimeter. More would be better, actually
nut_loc = -5
else:
raise ValueError( "Don't have screw dimensions for requested screw size %s"%screw_type)
slot_pts = [[ screw_w2, EPSILON ],
[ screw_w2, nut_loc],
[ nut_hole_x, nut_loc],
[ nut_hole_x, nut_loc - nut_hole_h],
[ screw_w2, nut_loc - nut_hole_h],
[ screw_w2, slot_depth],
]
# mirror the slot points on the left
slot_pts += [[-x, y] for x,y in slot_pts][ -1::-1]
# TODO: round off top corners of slot
# Add circles around t edges to prevent acrylic breakage
slot = polygon( slot_pts)
slot = union()(
slot,
translate( [nut_hole_x, nut_loc])( circle( tab_curve_rad)),
translate( [-nut_hole_x, nut_loc])( circle( tab_curve_rad))
)
return render()(slot)
def assembly():
a = union()
return a
if __name__ == '__main__':
a = assembly()
scad_render_to_file( a, file_header='$fn = %s;'%SEGMENTS, include_orig_code=True)
|
from dim import db
from dim.dns import get_ip_from_ptr_name
from dim.rrtype import validate_strings
from dim.errors import InvalidParameterError, AlreadyExistsError, InvalidZoneError, DimError
from tests.util import RPCTest, raises
def test_validate_strings():
validate_strings(None, 'strings', [r'''\"\\\223'''])
validate_strings(None, 'strings', [r'''\"\\\223'''])
def rrs(coll, fields=('record', 'zone', 'type', 'value')):
if not coll:
return set()
if isinstance(coll[0], dict):
return set(tuple(rr[field] for field in fields) for rr in coll
if 'type' not in fields or rr['type'] != 'SOA')
else:
return set(coll)
def print_messages(result):
print('\n'.join(m[1] for m in result['messages']))
def test_get_ip_from_ptr_name():
assert get_ip_from_ptr_name('1.2.3.4.in-addr.arpa.') == '4.3.2.1'
assert get_ip_from_ptr_name('1.2/32.2.3.4.in-addr.arpa.') == '4.3.2.1'
assert get_ip_from_ptr_name('1.2/32.2.3.4.in-addr.arpa.') == '4.3.2.1'
assert get_ip_from_ptr_name('2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.') == \
'0001:0000:0000:0000:0000:0000:0000:0002'
with raises(ValueError):
get_ip_from_ptr_name('abc')
with raises(ValueError):
get_ip_from_ptr_name('1.3.4.in-addr.arpa.')
class ZoneTest(RPCTest):
def test_create_zone(self):
with raises(InvalidParameterError):
self.r.zone_create('a 0.com')
with raises(InvalidParameterError):
self.r.zone_create('test.com', soa_attributes={'a': 1})
self.r.zone_create('test.com')
with raises(AlreadyExistsError):
self.r.zone_create('test.com')
with raises(InvalidParameterError):
self.r.zone_create('test.com.')
with raises(InvalidParameterError):
self.r.zone_create('test-')
def test_zone_rename(self):
self.r.zone_create('internal', profile=True)
self.r.rr_create(name='internal.', type='NS', nsdname='external.')
self.r.rr_create(name='a.internal.', type='CNAME', cname='c')
self.r.zone_rename('internal', 'public', profile=True)
assert self.r.zone_list(profile=True) == [{'name': 'public'}]
assert rrs(self.r.rr_list(zone='public', profile=True)) == rrs([
('@', 'public', 'NS', 'external.'),
('a', 'public', 'CNAME', 'c')])
with raises(InvalidParameterError):
self.r.zone_rename('public', 'private', profile=False)
def test_add_view_1(self):
self.r.zone_create('test.com')
self.r.zone_create_view('test.com', 'view')
assert self.r.zone_list_views('test.com') == [{'name': 'default'}, {'name': 'view'}]
def test_rename_view(self):
self.r.zone_create('test.com')
self.r.zone_create_view('test.com', 'view')
self.r.zone_rename_view('test.com', 'view', 'test')
assert self.r.zone_list_views('test.com') == [{'name': 'default'}, {'name': 'test'}]
def test_add_view_2(self):
self.r.zone_create('profile', profile=True)
with raises(DimError):
self.r.zone_create_view('profile', 'test')
def test_attrs(self):
self.r.zone_create('test.com', attributes={'a': 'b'}, soa_attributes={'primary': 'c.'})
assert self.r.zone_get_attrs('test.com')['a'] == 'b'
self.r.zone_set_attrs('test.com', {'a': '1'})
assert self.r.zone_get_attrs('test.com')['a'] == '1'
self.r.zone_delete_attrs('test.com', ['a'])
assert 'a' not in self.r.zone_get_attrs('test.com')
assert self.r.zone_get_soa_attrs('test.com')['primary'] == 'c.'
self.r.zone_set_soa_attrs('test.com', {'primary': 'd.'})
assert self.r.zone_get_soa_attrs('test.com')['primary'] == 'd.'
def test_profiles(self):
self.r.zone_create('internal', profile=True, soa_attributes=dict(mail='a.b.com.', refresh='1337', expire=1))
self.r.zone_create('test.com', from_profile='internal', soa_attributes=dict(refresh='47'))
assert self.r.zone_get_soa_attrs('test.com')['refresh'] == 47
assert self.r.zone_get_soa_attrs('test.com')['mail'] == 'a.b.com.'
with raises(InvalidZoneError):
self.r.zone_delete('internal', profile=False)
with raises(InvalidZoneError):
self.r.zone_delete('test.com', profile=True)
self.r.zone_delete('internal', profile=True)
self.r.zone_delete('test.com')
def test_profile_rrs(self):
self.r.zone_create('profile', profile=True)
self.r.rr_create(name='@', zone='profile', type='NS', nsdname='whatever.com.', profile=True)
self.r.rr_create(name='a', zone='profile', type='TXT', strings='"something"', profile=True)
self.r.zone_create('test.com', from_profile='profile')
assert rrs(self.r.rr_list('*test.com.')) == rrs(
[('a', 'test.com', 'TXT', '"something"'),
('@', 'test.com', 'NS', 'whatever.com.')])
def test_list_zone(self):
self.r.zone_create('some.domain', soa_attributes=dict(primary='ns01.company.com.', mail='dnsadmin.company.com.'))
self.r.rr_create(name='some.domain.', type='MX', preference=10, exchange='mail.other.domain.', ttl=1200)
self.r.rr_create(name='www.some.domain.', type='A', ip='192.168.78.2')
records = self.r.rr_list(zone='some.domain')
assert records[0]['type'] == 'SOA' and records[0]['value'].startswith('ns01.company.com. dnsadmin.company.com')
assert rrs([('@', 'some.domain', 1200, 'MX', '10 mail.other.domain.'),
('www', 'some.domain', None, 'A', '192.168.78.2')])\
<= rrs(records, fields=('record', 'zone', 'ttl', 'type', 'value'))
def test_zone_list_underscore(self):
self.r.zone_create('nounderscore.com')
self.r.zone_create('with_underscore.com')
assert self.r.zone_list() == [
{'name': 'nounderscore.com'},
{'name': 'with_underscore.com'}]
assert self.r.zone_list('*_*') == [{'name': 'with_underscore.com'}]
def test_zone_list(self):
self.r.zone_create('profile.domain', profile=True)
self.r.zone_create('master.domain')
self.r.zone_create('no-no.domain')
self.r.zone_create('multipleviews.domain')
self.r.zone_create_view('multipleviews.domain', 'secondview')
self.r.zone_create('second.domain')
self.r.zone_group_create('zg')
self.r.zone_group_create('zg2')
self.r.zone_group_create('zg3')
self.r.zone_group_add_zone('zg', 'master.domain')
self.r.zone_group_add_zone('zg2', 'master.domain')
self.r.zone_group_add_zone('zg', 'second.domain')
self.r.zone_group_add_zone('zg', 'multipleviews.domain', 'default')
self.r.zone_group_add_zone('zg2', 'multipleviews.domain', 'secondview')
self.r.zone_group_add_zone('zg3', 'multipleviews.domain', 'default')
assert rrs(self.r.zone_list('*domain', profile=False, fields=True),
fields=('name', 'views', 'zone_groups')) == rrs(
[('second.domain', 1, 1),
('master.domain', 1, 2),
('multipleviews.domain', 2, 3),
('no-no.domain', 1, 0)
])
assert rrs(self.r.zone_list('*domain', profile=True, fields=True),
fields=('name',)) == rrs([('profile.domain',)])
assert rrs(self.r.zone_list('*domain', profile=False, fields=True),
fields=('name', 'views')) == rrs(
[('second.domain', 1),
('master.domain', 1),
('no-no.domain', 1),
('multipleviews.domain', 2)
])
assert self.r.zone_list(profile=True) == [{'name': 'profile.domain'}]
assert set([x['name'] for x in self.r.zone_list(profile=False)]) == set(
['master.domain',
'no-no.domain',
'multipleviews.domain',
'second.domain'
])
assert set([x['name'] for x in self.r.zone_list(profile=False, limit=2, offset=1)]) == set(
['multipleviews.domain',
'no-no.domain'
])
assert self.r.zone_count(profile=False) == 4
def test_zone_list_alias(self):
assert len(self.r.zone_list(alias=1)) == 0
assert self.r.zone_count(alias='a') == 0
self.r.zone_create('a.de')
assert [x['name'] for x in self.r.zone_list(profile=False, alias=True)] == ['a.de']
def test_revzone_profiles(self):
self.r.zone_create('revzone-profile', profile=True, soa_attributes={'primary': 'revzone.'})
self.r.ipblock_create('12.0.0.0/8', status='Container', attributes={'reverse_dns_profile': 'revzone-profile'})
self.r.ippool_create('pool')
self.r.ippool_add_subnet('pool', '12.0.0.0/23')
assert self.r.zone_get_soa_attrs('1.0.12.in-addr.arpa')['primary'] == 'revzone.'
def test_revzone_ipv6(self):
self.r.ipblock_create('2001:db8::/32', status='Container')
self.r.ippool_create('pool')
self.r.ippool_add_subnet('pool', '2001:db8:100:a::26c/126')
assert len(self.r.zone_list('a.0.0.0.0.0.1.0.8.b.d.0.1.0.0.2.ip6.arpa')) == 1
def test_subzone(self):
self.r.zone_create('server.lan')
self.r.rr_create(name='srv-monitoring.company.com.', type='TXT', strings=['test'])
self.r.rr_create(name='monitoring.company.com.', type='TXT', strings=['test2'])
self.r.zone_create('monitoring.company.com')
assert rrs(self.r.rr_list(zone='company.com', type='TXT')) == rrs([
('srv-monitoring', 'company.com', 'TXT', '"test"')])
assert rrs(self.r.rr_list(zone='monitoring.company.com', type='TXT')) == rrs([
('@', 'monitoring.company.com', 'TXT', '"test2"')])
def test_dnssec_attrs(self):
self.r.zone_create('test.com')
self.r.zone_set_attrs('test.com', {'default_algorithm': '8'})
self.r.zone_set_attrs('test.com', {'default_ksk_bits': 2048})
self.r.zone_set_attrs('test.com', {'default_zsk_bits': 1024})
with raises(InvalidParameterError):
self.r.zone_set_attrs('test.com', {'default_algorithm': 'rsasha1'})
with raises(InvalidParameterError):
self.r.zone_set_attrs('test.com', {'default_ksk_bits': 'a'})
with raises(InvalidParameterError):
self.r.zone_set_attrs('test.com', {'default_zsk_bits': 'a'})
def test_favorites(self):
# Test for a zone with a single view
self.r.zone_create('a.de')
assert self.r.zone_list2(favorite_only=True)['count'] == 0
assert not self.r.zone_favorite('a.de')
self.r.zone_favorite_add('a.de')
assert self.r.zone_favorite('a.de')
print(self.r.zone_list2(favorite_only=True))
assert self.r.zone_list2(favorite_only=True)['data'][0]['name'] == 'a.de'
self.r.zone_favorite_remove('a.de')
assert not self.r.zone_favorite('a.de')
class RR(RPCTest):
def test_create_twice(self):
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.ip_mark('12.0.0.1')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1', overwrite_ptr=True)
self.r.zone_delete('test.com', cleanup=True)
assert rrs(self.r.rr_list(pattern='*0.0.12.in-addr.arpa.')) == rrs([])
def test_rr_create_invalid_profile(self):
with raises(InvalidZoneError):
self.r.rr_create(profile=True, type='NS', nsdname='a.', zone='inexistent', name='@')
def test_create_invalid_record_name(self):
self.r.zone_create('a.de')
self.r.rr_create(name='a.de.', type='TXT', strings=['text'], zone='a.de')
with raises(InvalidParameterError):
self.r.rr_create(name='suba.de.', type='TXT', strings=['text'], zone='a.de')
def test_rr_delete_1(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='cname', cname='b.test.com.')
assert len(rrs(self.r.rr_list())) == 1
self.r.rr_delete(name='a.test.com.', type='cname', cname='b.test.com.')
assert len(rrs(self.r.rr_list())) == 0
def test_rr_delete_2(self):
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1')
assert len(rrs(self.r.rr_list())) == 2
self.r.rr_delete(name='a.test.com.', type='a', ip='12.0.0.1', free_ips=True)
assert len(rrs(self.r.rr_list())) == 0
assert self.r.ipblock_get_attrs('12.0.0.1')['status'] == 'Available'
def test_rr_delete_3(self):
self.r.ipblock_create('12::/32', status='Container')
self.r.zone_create('test.com')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12::/64')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1')
self.r.rr_create(name='a.test.com.', type='aaaa', ip='12::1')
self.r.rr_delete(name='a.test.com.', type='a', ip='12.0.0.1')
assert rrs(self.r.rr_list('a.test.com.')) == rrs([
('a', 'test.com', 'AAAA', '12::1'),
('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0', '0.0.0.0.0.0.0.0.0.0.0.0.2.1.0.0.ip6.arpa', 'PTR', 'a.test.com.')])
def test_rr_delete_4(self):
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1')
self.r.rr_create(name='b.test.com.', type='a', ip='12.0.0.1', overwrite_ptr=True)
self.r.rr_delete(name='a.test.com.', type='a', ip='12.0.0.1')
assert not self.r.rr_list('a.test.com.')
assert rrs(self.r.rr_list('b.test.com.')) == rrs([
('b', 'test.com', 'A', '12.0.0.1'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])
def test_rr_delete_5(self):
# trigger recursive delete via rr_delete(ptr)
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1')
self.r.rr_create(name='b.test.com.', type='cname', cname='a')
self.r.rr_delete(ip='12.0.0.1', type='ptr', ptrdname='a.test.com.', references='delete')
assert rrs(self.r.rr_list()) == set()
def test_rr_delete_6(self):
# delete only one forward reference; expect ptr unchanged
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.zone_create('test.com')
self.r.zone_create_view('test.com', 'other')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1', views=['default', 'other'])
self.r.rr_delete(name='a.test.com.', type='a', ip='12.0.0.1', views=['default'])
assert rrs(self.r.rr_list()) == rrs([
('a', 'test.com', 'A', '12.0.0.1'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'a.test.com.')])
def test_rr_delete_by_id(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='12.0.0.1')
rr_id = self.r.rr_get_references(name='a.test.com.', type='A')['root']
with raises(InvalidParameterError):
self.r.rr_delete(ids=rr_id)
self.r.rr_delete(ids=[rr_id], zone='a.de')
self.r.rr_delete(ids=[rr_id], unknown='a')
self.r.rr_delete(ids=[rr_id])
def test_ptr_overwrite(self):
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.ip_mark('12.0.0.1')
self.r.ip_mark('12.0.0.2')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='A', ip='12.0.0.1')
self.r.rr_create(ip='12.0.0.1', type='PTR', ptrdname='b.test.com.')
assert rrs(self.r.rr_list(pattern='*')) == rrs(
[('a', 'test.com', 'A', '12.0.0.1'),
('b', 'test.com', 'A', '12.0.0.1'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'a.test.com.')])
self.r.rr_create(ip='12.0.0.1', type='PTR', ptrdname='b.test.com.', overwrite_ptr=True)
assert rrs(self.r.rr_list(pattern='*')) == rrs(
[('a', 'test.com', 'A', '12.0.0.1'),
('b', 'test.com', 'A', '12.0.0.1'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])
self.r.rr_create(name='b.test.com.', type='A', ip='12.0.0.2')
self.r.rr_create(ip='12.0.0.1', type='PTR', ptrdname='b.test.com.', overwrite_ptr=True, overwrite_a=True)
assert rrs(self.r.rr_list(pattern='*')) == rrs(
[('a', 'test.com', 'A', '12.0.0.1'),
('b', 'test.com', 'A', '12.0.0.1'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.'),
('2', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])
def test_create_a(self):
self.r.ip_mark('12.0.0.1')
self.r.ip_mark('12.0.0.2')
self.r.ip_mark('12::1')
self.r.ip_mark('12::2')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='A', ip='12.0.0.1', ttl=1)
self.r.rr_create(name='b.test.com.', type='A', ip='12.0.0.2')
self.r.rr_create(name='c.test.com.', type='AAAA', ip='12::1')
self.r.rr_create(name='d.test.com.', type='AAAA', ip='12::2')
assert rrs(self.r.rr_list('*test.com.')) == rrs(
[('a', 'test.com', 'A', '12.0.0.1'),
('b', 'test.com', 'A', '12.0.0.2'),
('c', 'test.com', 'AAAA', '12::1'),
('d', 'test.com', 'AAAA', '12::2')])
def test_create_a2(self):
# ND-57
self.r.zone_create('test.com')
with raises(InvalidParameterError):
self.r.rr_create(name='test.com.', type='A', ip='::1')
with raises(InvalidParameterError):
self.r.rr_create(name='test.com.', type='AAAA', ip='127.0.0.1')
with raises(InvalidParameterError):
self.r.rr_get_attrs(name='test.com', type='A', ip='::1')
with raises(InvalidParameterError):
self.r.rr_get_attrs(name='test.com', type='AAAA', ip='0.0.0.1')
self.r.rr_create(name='test.com.', type='AAAA', ip='::1')
assert rrs(self.r.rr_list('*test.com.')) == rrs(
[('@', 'test.com', 'AAAA', '::1')])
self.r.rr_get_attrs(name='test.com.', type='AAAA', ip='::1')
def test_create_cname(self):
self.r.zone_create('test.com')
with raises(InvalidParameterError):
self.r.rr_create(name='a.test.com', type='CNAME', cname='c.test.com')
self.r.rr_create(name='a.test.com.', type='CNAME', cname='c.test.com.')
self.r.rr_create(name='b.test.com.', type='MX', preference=10, exchange='test.com.')
with raises(InvalidParameterError):
self.r.rr_create(name='b.test.com', type='CNAME', cname='c.test.com')
with raises(InvalidParameterError):
self.r.rr_create(name='d.test.com.', type='MX', preference=10, exchange='a.test.com.')
def test_create_cname_2(self):
# ND-100
self.r.zone_create('test.com')
self.r.rr_create(name='cname.test.com.', type='CNAME', cname='test.com.')
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.rr_create(ip='12.0.0.1', type='PTR', ptrdname='cname.test.com.', create_linked=False)
with raises(InvalidParameterError):
self.r.rr_create(ip='12.0.0.1', type='PTR', ptrdname='cname.test.com.', create_linked=True)
def test_create_srv(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='CNAME', cname='c.test.com.')
with raises(InvalidParameterError):
self.r.rr_create(name='_a._b.test.com.', type='SRV', priority=10, weight=1, port=1, target='a.test.com.')
self.r.rr_create(name='_a._b.test.com.', type='SRV', priority=10, weight=1, port=1, target='c.test.com.')
with raises(InvalidParameterError):
self.r.rr_create(name='c.test.com.', type='CNAME', cname='a.test.com.')
def test_email(self):
self.r.zone_create('test.com')
self.r.zone_set_soa_attrs('test.com', {'mail': 'first\.last.test.com.'})
assert " first\.last.test.com. " in self.r.zone_dump('test.com')
def test_create_revzone(self):
self.r.rr_create(ip='12.0.0.1', type='PTR', ptrdname='test.com.', create_linked=False, create_revzone=True)
def test_create_rr_rp(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='RP', mbox='john\.doe.example.com.', txtdname='test.com.')
def test_create_rr_cert(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='CERT', certificate_type=1, key_tag=2, algorithm=3, certificate='abc')
with raises(DimError):
self.r.rr_create(name='a.test.com.', type='CERT', certificate_type=1, key_tag=2, algorithm=3, certificate='a c')
def test_create_rr_tlsa(self):
default = dict(name='a.test.com.',
type='TLSA',
certificate_usage=1, selector=2, matching_type=1, certificate='abcd')
def rr_create(**kwargs):
d = default.copy()
d.update(kwargs)
return self.r.rr_create(**d)
self.r.zone_create('test.com')
assert set(rr_create(certificate_usage=4, selector=2, matching_type=3)['messages']) == set([
(20, 'Creating RR a TLSA 4 2 3 abcd in zone test.com'),
(30, 'certificate_usage value 4 is unassigned'),
(30, 'selector value 2 is unassigned'),
(30, 'matching_type value 3 is unassigned'),
])
rr_create(certificate_usage='PKIX-TA', selector='PRIVSEL', matching_type='SHA2-512')
for k, v in (('certificate', '1 2'),
('certificate', 'afcs'),
('selector', -1),
('matching_type', 256),
('certificate_usage', 'bad')):
with raises(DimError):
rr_create(k=v)
def test_rr_list_value_as_object(self):
self.r.zone_create('test.com')
rrs = [dict(type='TXT', strings='"a" "b"'),
dict(type='mx', preference=5, exchange='test.com.'),
dict(type='HINFO', os='os', cpu='cpu'),
dict(type='a', ip='1.2.3.4'),
dict(type='srv', priority=10, weight=1, port=1, target='a.test.com.'),
dict(type='naptr', order=1, preference=2, flags='f', service=r'223', regexp=r'r', replacement='a.de.'),
dict(type='cert', certificate_type=1, algorithm=2, key_tag=3, certificate='cert'),
dict(type='rp', mbox='gigi.a.de.', txtdname='test.com.')
]
for param in rrs:
name = '_a._b.test.com.'
self.r.rr_create(name=name, **param)
del param['type']
assert self.r.rr_list(name, value_as_object=True)[0]['value'] == param
self.r.rr_delete(name=name)
def test_root_zone_list(self):
self.r.zone_create('.')
self.r.rr_create(name='a.', type='TXT', strings=[''])
assert self.r.rr_list('a.')[0]['record'] == 'a'
def test_rr_attrs(self):
self.r.zone_create('a.de')
rrs = [dict(name='hinfo.a.de.', type='HINFO', os='os\\"', cpu='\\\\'),
dict(name='mx.a.de.', type='MX', preference=10, exchange='a.de.')]
for rr in rrs:
self.r.rr_create(**rr)
self.r.rr_set_ttl(ttl=300, **rr)
self.r.rr_set_comment(comment='com', **rr)
attrs = self.r.rr_get_attrs(**rr)
assert attrs['comment'] == 'com'
assert attrs['ttl'] == 300
with raises(InvalidParameterError):
self.r.rr_set_attrs(**rrs[0])
for dryrun in [False, True]:
comment = '%s' % dryrun
ttl = int(dryrun)
attrs = self.r.rr_set_attrs(ttl=ttl, comment=comment, dryrun=dryrun, **rr)
assert attrs['comment'] == comment
assert attrs['ttl'] == ttl
def test_rr_sorting(self):
self.r.zone_create('a.de')
rrs = [dict(name='a.de.', type='NS', nsdname='ns.a.de.', ttl=600),
dict(name='a.de.', type='A', ip='1.2.3.4', ttl=3600),
dict(name='*.b.a.de.', type='CNAME', cname='b.a.de.'),
dict(name='mx.a.de.', type='MX', preference=10, exchange='a.de.')]
for rr in rrs:
self.r.rr_create(**rr)
assert(self.r.rr_list(zone='a.de', limit=2)[1]['record'] == '@')
# TODO: test rr_list(created_by, modified_by)
class PTR(RPCTest):
def setUp(self):
RPCTest.setUp(self)
# Initial setup:
# Forward Zone:
# w1.zone. IN A 12.0.0.13
# Reverse Zone:
# 13.0.0.12.in-addr.arpa IN PTR w1.zone.
# 14.0.0.12.in-addr.arpa IN PTR w2.zone.
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create("pool")
self.r.ippool_add_subnet("pool", "12.0.0.0/24")
self.r.ip_mark('12.0.0.13')
self.r.ip_mark('12.0.0.14')
self.r.ip_mark('12.0.0.15')
self.r.rr_create(ip='12.0.0.14', type='PTR', ptrdname='w2.zone.')
self.r.zone_create("zone")
self.r.rr_create(name="w1.zone.", type='A', ip="12.0.0.13")
assert rrs(self.r.rr_list('*zone.')) == rrs(
[('w1', 'zone', 'A', '12.0.0.13'),
('14', '0.0.12.in-addr.arpa', 'PTR', 'w2.zone.'),
('13', '0.0.12.in-addr.arpa', 'PTR', 'w1.zone.')])
def test_new(self):
self.r.rr_create(ip='12.0.0.15', type='PTR', ptrdname='w2.zone.')
assert rrs(self.r.rr_list('12.0.0.15')) == rrs(
[('15', '0.0.12.in-addr.arpa', 'PTR', 'w2.zone.'),
('w2', 'zone', 'A', '12.0.0.15')])
def test_no_overwrite(self):
assert self.r.rr_create(type='PTR', ip='12.0.0.13', ptrdname='w3.zone.')['messages'] == [
(30, 'Not overwriting: 13.0.0.12.in-addr.arpa. PTR w1.zone.'),
(20, 'Creating RR w3 A 12.0.0.13 in zone zone')]
assert rrs(self.r.rr_list('12.0.0.13')) == rrs([
('w1', 'zone', 'A', '12.0.0.13'),
('w3', 'zone', 'A', '12.0.0.13'),
('13', '0.0.12.in-addr.arpa', 'PTR', 'w1.zone.')])
def test_overwrite(self):
assert set(self.r.rr_create(type='PTR', ip='12.0.0.13', ptrdname='w3.zone.', overwrite_ptr=True)['messages']) == set([
(30, 'Deleting RR 13 PTR w1.zone. from zone 0.0.12.in-addr.arpa'),
(20, 'Creating RR 13 PTR w3.zone. in zone 0.0.12.in-addr.arpa'),
(20, 'Creating RR w3 A 12.0.0.13 in zone zone')])
assert rrs(self.r.rr_list('12.0.0.13')) == rrs(
[('w1', 'zone', 'A', '12.0.0.13'),
('13', '0.0.12.in-addr.arpa', 'PTR', 'w3.zone.'),
('w3', 'zone', 'A', '12.0.0.13')])
class TXT(RPCTest):
def setUp(self):
RPCTest.setUp(self)
self.r.zone_create('test.com')
def test_parse(self):
for txt in ('unquoted', '"', '\\"', '\\', '"\\"', '"\\', '"\\0"', '"\\999"', 'a"b"', '"a"b', '"""', '"\\\\\\"'):
with raises(InvalidParameterError):
self.r.rr_create(name='a.test.com.', type='TXT', txt=txt)
canonical = {'"simple"': '"simple"',
'"ignore" \t\n"whitespace"': '"ignore" "whitespace"',
'"regular escape\\100"': '"regular escaped"',
'"preserved escape\\\\\\"\\244"': '"preserved escape\\\\\\"\\244"',
'""': '',
'"" "a"': '"a"',
'"a" ""': '"a"',
r'"\\" "\"" "\223"': r'"\\" "\"" "\223"'}
for i, original in enumerate(canonical.keys()):
rr_name = '%d.test.com.' % i
self.r.rr_create(name=rr_name, type='TXT', strings=original)
assert self.r.rr_list(rr_name)[0]['value'] == canonical[original]
class IpblockRRs(RPCTest):
def setUp(self):
RPCTest.setUp(self)
self.r.ipblock_create('12.0.0.0/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12.0.0.0/24')
self.r.zone_create('test.com')
self.r.rr_create(type='PTR', ptrdname='test.com.', ip='12.0.0.1')
assert len(rrs(self.r.rr_list())) == 2
def test_free_ip_simple(self):
self.r.ip_free('12.0.0.1')
assert len(rrs(self.r.rr_list('*'))) == 0
def test_free_ip_cname(self):
self.r.rr_create(name='a.test.com.', type='CNAME', cname='test.com.')
self.r.rr_create(name='b.test.com.', type='CNAME', cname='a.test.com.')
self.r.rr_create(name='c.test.com.', type='CNAME', cname='b.test.com.')
self.r.ip_free('12.0.0.1')
assert len(rrs(self.r.rr_list())) == 0
def test_delete_pool(self):
self.r.ippool_delete('test', force=True, delete_subnets=True)
assert len(rrs(self.r.rr_list())) == 0
class ZoneViewTest(RPCTest):
def setUp(self):
RPCTest.setUp(self)
self.r.zone_create('example.com')
self.r.zone_rename_view('example.com', 'default', 'us')
self.r.zone_create_view('example.com', 'de')
self.r.zone_create_view('example.com', 'sg')
self.r.rr_create(type='A', name='example.com.', ip='212.217.217.7', views=['us', 'de', 'sg'])
self.r.rr_create(type='A', name='example.com.', ip='74.208.13.212', views=['us'])
self.r.rr_create(type='A', name='example.com.', ip='7.12.212.98', views=['sg'])
self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx-ha1.company.de.', views=['us', 'de', 'sg'])
self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx-ha2.company.de.', views=['us', 'de', 'sg'])
self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx01.company.com.', views=['de'])
self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx02.company.com.', views=['de'])
self.r.rr_create(type='MX', name='example.com.', preference=10, exchange='mx1.example.com.', views=['us'])
def test_export_views(self):
assert rrs(self.r.rr_list(zone='example.com', view='de')[1:]) == rrs(
[('@', 'example.com', 'A', '212.217.217.7'),
('@', 'example.com', 'MX', '10 mx01.company.com.'),
('@', 'example.com', 'MX', '10 mx02.company.com.'),
('@', 'example.com', 'MX', '10 mx-ha1.company.de.'),
('@', 'example.com', 'MX', '10 mx-ha2.company.de.')])
assert rrs(self.r.rr_list(zone='example.com', view='us')[1:]) == rrs(
[('@', 'example.com', 'A', '212.217.217.7'),
('@', 'example.com', 'A', '74.208.13.212'),
('@', 'example.com', 'MX', '10 mx1.example.com.'),
('@', 'example.com', 'MX', '10 mx-ha1.company.de.'),
('@', 'example.com', 'MX', '10 mx-ha2.company.de.')])
assert rrs(self.r.rr_list(zone='example.com', view='sg')[1:]) == rrs(
[('@', 'example.com', 'A', '212.217.217.7'),
('@', 'example.com', 'A', '7.12.212.98'),
('@', 'example.com', 'MX', '10 mx-ha1.company.de.'),
('@', 'example.com', 'MX', '10 mx-ha2.company.de.')])
def test_favorites(self):
assert not self.r.zone_favorite('example.com')
self.r.zone_favorite_add('example.com', view='us')
assert self.r.zone_favorite('example.com', 'us')
assert not self.r.zone_favorite('example.com', 'de')
self.r.zone_favorite_add('example.com', view='us')
assert self.r.zone_favorite('example.com', 'us')
self.r.zone_favorite_add('example.com', view='de')
assert self.r.zone_favorite('example.com', 'de')
assert self.r.zone_favorite('example.com', 'us')
fav = self.r.zone_list2(favorite_only=True)['data']
assert len(fav) == 1
assert len(fav[0]['views']) == 2
self.r.zone_favorite_remove('example.com', view='us')
assert not self.r.zone_favorite('example.com', 'us')
assert self.r.zone_favorite('example.com', 'de')
self.r.zone_favorite_remove('example.com', view='de')
assert not self.r.zone_favorite('example.com', 'de')
assert not self.r.zone_favorite('example.com', 'us')
def no_warn(result):
assert [x for x in result['messages'] if x[0] == 30] == []
class RRReferencesTest(RPCTest):
def setUp(self):
RPCTest.setUp(self)
db.session.execute('ALTER TABLE rr AUTO_INCREMENT = 1')
self.r.ipblock_create('1.0.0.0/8', status='Container')
self.r.ippool_create('p')
self.r.ippool_add_subnet('p', '1.1.1.0/24')
self.r.zone_create('a.de')
self.r.zone_create_view('a.de', 'second')
self.r.rr_create(type='A', name='a.de.', ip='1.1.1.1', views=['default', 'second'])
self.r.zone_create('b.de')
self.r.rr_create(type='MX', name='mx.b.de.', preference=10, exchange='a.de.')
self.r.zone_create('c.de')
self.r.rr_create(type='CNAME', name='cname.c.de.', cname='mx.b.de.')
self.r.zone_create('subzone.a.de')
self.r.zone_delete_view('subzone.a.de', 'second')
self.r.zone_create_view('subzone.a.de', 'third')
nodes = [{'id': 1,
'name': 'a.de.',
'type': 'A',
'value': '1.1.1.1',
'view': 'default',
'zone': 'a.de'},
{'id': 2,
'name': '1.1.1.1.in-addr.arpa.',
'type': 'PTR',
'value': 'a.de.',
'view': 'default',
'zone': '1.1.1.in-addr.arpa'},
{'id': 3,
'name': 'a.de.',
'type': 'A',
'value': '1.1.1.1',
'view': 'second',
'zone': 'a.de'},
{'id': 4,
'name': 'mx.b.de.',
'type': 'MX',
'value': '10 a.de.',
'view': 'default',
'zone': 'b.de'},
{'id': 5,
'name': 'cname.c.de.',
'type': 'CNAME',
'value': 'mx.b.de.',
'view': 'default',
'zone': 'c.de'}]
self.nodes = {}
for node in nodes:
self.nodes[node['id']] = node
self.mx_ref_result = {'graph': {4: [5], 5: []},
'records': [self.nodes[i] for i in [4, 5]],
'root': 4}
def test_get_references(self):
a_rr = dict(name='a.de.', type='A', view='second', ip='1.1.1.1')
assert self.r.rr_get_references(delete=True, **a_rr) == \
{'graph': {3: [4], 4: [5], 5: []},
'records': [self.nodes[i] for i in [3, 4, 5]],
'root': 3}
assert self.r.rr_get_references(delete=False, **a_rr) == \
{'graph': {3: [4], 4: []},
'records': [self.nodes[i] for i in [3, 4]],
'root': 3}
ptr_rr = dict(name='1.1.1.1.in-addr.arpa.', type='PTR', ptrdname='a.de.')
assert self.r.rr_get_references(delete=True, **ptr_rr) == \
{'graph': {1: [4], 2: [1, 3], 3: [4], 4: [5], 5: []},
'records': [self.nodes[i] for i in [1, 2, 3, 4, 5]],
'root': 2}
assert self.r.rr_get_references(delete=False, **ptr_rr) == \
{'graph': {1: [4], 2: [1, 3], 3: [4], 4: []},
'records': [self.nodes[i] for i in [1, 2, 3, 4]],
'root': 2}
mx_rr = dict(name='mx.b.de.', type='MX', exchange='a.de.', preference=10)
assert self.r.rr_get_references(delete=True, **mx_rr) == self.mx_ref_result
assert self.r.rr_get_references(delete=False, **mx_rr) == self.mx_ref_result
self.r.zone_delete_view('a.de', 'default', cleanup=True)
assert self.r.rr_get_references(delete=True, **a_rr) == \
{'graph': {2: [], 3: [4, 2], 4: [5], 5: []},
'records': [self.nodes[i] for i in [2, 3, 4, 5]],
'root': 3}
assert self.r.rr_get_references(delete=False, **a_rr) == \
{'graph': {2: [], 3: [4, 2], 4: []},
'records': [self.nodes[i] for i in [2, 3, 4]],
'root': 3}
def test_edit_comment_ttl(self):
no_warn(self.r.rr_edit(2))
no_warn(self.r.rr_edit(2, comment='comment'))
no_warn(self.r.rr_edit(2, comment=None))
no_warn(self.r.rr_edit(2, ttl=77))
no_warn(self.r.rr_edit(2, ttl=None))
no_warn(self.r.rr_edit(2, references=[1, 3, 4], comment='comment', ttl=77))
ptr_rr = dict(name='1.1.1.1.in-addr.arpa.', type='PTR', ptrdname='a.de.')
assert self.r.rr_get_references(delete=False, **ptr_rr) == \
{'graph': {1: [4], 2: [1, 3], 3: [4], 4: []},
'records': [self.nodes[i] for i in [1, 2, 3, 4]],
'root': 2}
attrs = self.r.rr_get_attrs(**ptr_rr)
assert attrs['comment'] == 'comment'
assert attrs['ttl'] == 77
def test_edit_no_diff(self):
props = dict(name='mx.b.de.', comment=None, ttl=None, preference=10, exchange='a.de.', views=['default'])
import itertools
for i in range(len(props)):
no_warn(self.r.rr_edit(4, dict([x[0] for x in itertools.combinations(iter(props.items()), i + 1)])))
no_warn(self.r.rr_edit(4, name='mx.b.de.'))
assert self.r.rr_get_references(delete=False, type='MX', name='mx.b.de.') == \
{'graph': {4: [5], 5: []},
'records': [self.nodes[i] for i in [4, 5]],
'root': 4}
def test_edit_no_references(self):
no_warn(self.r.rr_edit(4, references=[5], preference=20))
assert rrs(self.r.rr_list(zone='b.de')) == rrs([('mx', 'b.de', 'MX', '20 a.de.')])
assert self.r.rr_get_references(name='mx.b.de.')['graph'][6] == [5]
def test_edit_references(self):
no_warn(self.r.rr_edit(4, references=[5], name='mx2.b.de.'))
assert rrs(self.r.rr_list(zone='b.de')) == rrs([
('mx2', 'b.de', 'MX', '10 a.de.')])
assert rrs(self.r.rr_list(zone='c.de')) == rrs([
('cname', 'c.de', 'CNAME', 'mx2.b.de.')])
# TODO fix this; it's probably bad
def test_edit_fail(self):
self.r.rr_create(name='mx2.b.de.', type='CNAME', cname='smth')
with raises(InvalidParameterError):
self.r.rr_edit(77)
self.r.rr_edit(4, references=[77])
self.r.rr_edit(4, cname='smth.')
self.r.rr_edit(4, exchange='smth.')
self.r.rr_edit(4, references=[5], name='mx2.b.de.')
def test_edit_subzone(self):
self.r.rr_edit(2, references=[1, 3, 4], ptrdname='subzone.a.de.', views=['default', 'third'])
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'subzone.a.de.')])
a_rrs = rrs([('@', 'subzone.a.de', 'A', '1.1.1.1')])
for view in ['default', 'third']:
assert rrs(self.r.rr_list(zone='subzone.a.de', type='A', view=view)) == a_rrs
def test_edit_a_ip_with_ref(self):
self.r.rr_edit(1, references=[2], ip='1.1.1.2')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.2')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_a_ip_no_ref(self):
self.r.rr_edit(1, references=None, ip='1.1.1.2')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'a.de.'),
('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.2')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_ptr_ip_no_ref(self):
self.r.rr_edit(2, references=None, ip='1.1.1.2')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_ptr_ip_with_ref(self):
self.r.rr_edit(2, references=[1], ip='1.1.1.2')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('2', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.2')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_a_name_no_ref(self):
self.r.rr_edit(1, references=None, name='new.a.de.')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_a_name_no_ref2(self):
self.r.zone_delete_view('a.de', 'second', True)
self.r.rr_edit(1, references=None, name='new.a.de.')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])
def test_edit_a_name_with_ref(self):
self.r.rr_edit(1, references=[2], name='new.a.de.')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_ptr_name_no_ref(self):
self.r.rr_edit(2, references=[], ptrdname='new.a.de.')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_ptr_name_with_ref(self):
self.r.rr_edit(2, references=[1], ptrdname='new.a.de.')
assert rrs(self.r.rr_list(zone='1.1.1.in-addr.arpa')) ==\
rrs([('1', '1.1.1.in-addr.arpa', 'PTR', 'new.a.de.')])
assert rrs(self.r.rr_list(zone='a.de', view='default')) == rrs([('new', 'a.de', 'A', '1.1.1.1')])
assert rrs(self.r.rr_list(zone='a.de', view='second')) == rrs([('@', 'a.de', 'A', '1.1.1.1')])
def test_edit_cname_with_ref(self):
self.r.zone_create_view('c.de', 'second')
self.r.rr_create(type='CNAME', name='cname.c.de.', cname='mx.b.de.', views=['second'])
self.r.rr_edit(id=4, name='mx2.b.de.')
for view in ['default', 'second']:
assert rrs(self.r.rr_list(zone='c.de', view=view, type='cname')) == rrs([('cname', 'c.de', 'CNAME', 'mx.b.de.')])
self.r.rr_edit(id=7, name='mx3.b.de.', references=[5])
for view, cname in [('default', 'mx3.b.de.'), ('second', 'mx.b.de.')]:
assert rrs(self.r.rr_list(zone='c.de', view=view, type='cname')) == rrs([('cname', 'c.de', 'CNAME', cname)])
|
""" Module to access the Saml endpoints """
# pylint: disable=too-many-lines,too-many-locals,too-many-public-methods,too-few-public-methods
from typing import Dict, Union
from pydantic import BaseModel
from ...models import (
MigrateAuthToSamlJsonBody,
ResetSamlAuthDataToEmailJsonBody,
ResetSamlAuthDataToEmailResponse200,
SamlCertificateStatus,
StatusOK,
UploadSamlIdpCertificateMultipartData,
UploadSamlPrivateCertificateMultipartData,
UploadSamlPublicCertificateMultipartData,
)
from ..base import ApiBaseClass
class SamlApi(ApiBaseClass):
"""Endpoints for configuring and interacting with SAML."""
async def migrate_auth_to_saml(
self,
*,
json_body: Union[MigrateAuthToSamlJsonBody, Dict],
) -> None:
"""Migrate user accounts authentication type to SAML.
Migrates accounts from one authentication provider to another. For
example, you can upgrade your authentication provider from email to
SAML.
Permissions:
Must have `manage_system` permission.
Minimum Server Version:
5.28
Api Reference:
`MigrateAuthToSaml <https://api.mattermost.com/#operation/MigrateAuthToSaml>`_
"""
url = "/users/migrate_auth/saml"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
return response
async def get_saml_metadata(
self,
) -> str:
"""Get metadata
Get SAML metadata from the server. SAML must be configured properly.
Permissions:
No permission required.
Api Reference:
`GetSamlMetadata <https://api.mattermost.com/#operation/GetSamlMetadata>`_
"""
url = "/saml/metadata"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = response.json()
return response200
return response
async def get_saml_metadata_from_idp(
self,
) -> str:
"""Get metadata from Identity Provider
Get SAML metadata from the Identity Provider. SAML must be configured
properly.
Permissions:
No permission required.
Api Reference:
`GetSamlMetadataFromIdp <https://api.mattermost.com/#operation/GetSamlMetadataFromIdp>`_
"""
url = "/saml/metadatafromidp"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = response.json()
return response200
return response
async def upload_saml_idp_certificate(
self,
*,
multipart_data: Union[UploadSamlIdpCertificateMultipartData, Dict],
) -> StatusOK:
"""Upload IDP certificate
Upload the IDP certificate to be used with your SAML configuration. The
server will pick a hard-coded filename for the IdpCertificateFile
setting in your `config.json`.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`UploadSamlIdpCertificate <https://api.mattermost.com/#operation/UploadSamlIdpCertificate>`_
"""
url = "/saml/certificate/idp"
multipart_body_data = UploadSamlIdpCertificateMultipartData.parse_obj(
multipart_data
)
request_kwargs = {
"url": url,
"data": multipart_body_data.get_data(),
"files": multipart_body_data.get_files(),
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def delete_saml_idp_certificate(
self,
) -> StatusOK:
"""Remove IDP certificate
Delete the current IDP certificate being used with your SAML
configuration. This will also disable SAML on your system as this
certificate is required for SAML.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`DeleteSamlIdpCertificate <https://api.mattermost.com/#operation/DeleteSamlIdpCertificate>`_
"""
url = "/saml/certificate/idp"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.delete(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def upload_saml_public_certificate(
self,
*,
multipart_data: Union[UploadSamlPublicCertificateMultipartData, Dict],
) -> StatusOK:
"""Upload public certificate
Upload the public certificate to be used for encryption with your SAML
configuration. The server will pick a hard-coded filename for the
PublicCertificateFile setting in your `config.json`.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`UploadSamlPublicCertificate <https://api.mattermost.com/#operation/UploadSamlPublicCertificate>`_
"""
url = "/saml/certificate/public"
multipart_body_data = UploadSamlPublicCertificateMultipartData.parse_obj(
multipart_data
)
request_kwargs = {
"url": url,
"data": multipart_body_data.get_data(),
"files": multipart_body_data.get_files(),
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def delete_saml_public_certificate(
self,
) -> StatusOK:
"""Remove public certificate
Delete the current public certificate being used with your SAML
configuration. This will also disable encryption for SAML on your system
as this certificate is required for that.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`DeleteSamlPublicCertificate <https://api.mattermost.com/#operation/DeleteSamlPublicCertificate>`_
"""
url = "/saml/certificate/public"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.delete(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def upload_saml_private_certificate(
self,
*,
multipart_data: Union[UploadSamlPrivateCertificateMultipartData, Dict],
) -> StatusOK:
"""Upload private key
Upload the private key to be used for encryption with your SAML
configuration. The server will pick a hard-coded filename for the
PrivateKeyFile setting in your `config.json`.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`UploadSamlPrivateCertificate <https://api.mattermost.com/#operation/UploadSamlPrivateCertificate>`_
"""
url = "/saml/certificate/private"
multipart_body_data = UploadSamlPrivateCertificateMultipartData.parse_obj(
multipart_data
)
request_kwargs = {
"url": url,
"data": multipart_body_data.get_data(),
"files": multipart_body_data.get_files(),
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def delete_saml_private_certificate(
self,
) -> StatusOK:
"""Remove private key
Delete the current private key being used with your SAML configuration.
This will also disable encryption for SAML on your system as this key is
required for that.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`DeleteSamlPrivateCertificate <https://api.mattermost.com/#operation/DeleteSamlPrivateCertificate>`_
"""
url = "/saml/certificate/private"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.delete(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = StatusOK.parse_obj(response.json())
return response200
return response
async def get_saml_certificate_status(
self,
) -> SamlCertificateStatus:
"""Get certificate status
Get the status of the uploaded certificates and keys in use by your SAML
configuration.
Permissions:
Must have `sysconsole_write_authentication` permission.
Api Reference:
`GetSamlCertificateStatus <https://api.mattermost.com/#operation/GetSamlCertificateStatus>`_
"""
url = "/saml/certificate/status"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.get(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = SamlCertificateStatus.parse_obj(response.json())
return response200
return response
async def reset_saml_auth_data_to_email(
self,
*,
json_body: Union[ResetSamlAuthDataToEmailJsonBody, Dict],
) -> ResetSamlAuthDataToEmailResponse200:
"""Reset AuthData to Email
Reset the AuthData field of SAML users to their email. This is meant to
be used when the \"id\" attribute is set to an empty value (\"\") from a
previously non-empty value.
Permissions:
Must have `manage_system` permission.
Minimum Server Version:
5.35
Api Reference:
`ResetSamlAuthDataToEmail <https://api.mattermost.com/#operation/ResetSamlAuthDataToEmail>`_
"""
url = "/saml/reset_auth_data"
if isinstance(json_body, BaseModel):
json_json_body = json_body.dict(exclude_unset=True)
else:
json_json_body = json_body
request_kwargs = {
"url": url,
"json": json_json_body,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = ResetSamlAuthDataToEmailResponse200.parse_obj(response.json())
return response200
return response
|
import logging
from datetime import datetime
from typing import Optional, Generator, Tuple
import shutil
from dateutil.parser import isoparse
from pathlib import Path
import pandas as pd
from collections import defaultdict
import calplot
from sqlite_utils import Database
from summary import update_daily_summaries
from summary import update_seasonal_summaries
from summary import get_nmis
from summary import get_usage_df
from jinja2 import Environment, FileSystemLoader
import plotly.express as px
import plotly.graph_objects as go
db = Database("nemdata.db")
def format_month(dt: datetime) -> str:
return dt.strftime("%b %Y")
def get_date_range(nmi: str):
sql = """select MIN(first_interval) start, MAX(last_interval) end
from nmi_summary where nmi = :nmi
"""
row = list(db.query(sql, {"nmi": nmi}))[0]
start = isoparse(row["start"])
end = isoparse(row["end"])
return start, end
def get_years(nmi: str):
start, end = get_date_range(nmi)
x = start.year
while x <= end.year:
yield x
x += 1
def get_day_data(
nmi: str,
) -> Generator[Tuple[str, float, float, float, float, float, float], None, None]:
sql = "select day, imp, exp, imp_morning, imp_day, imp_evening, imp_night from daily_reads where nmi = :nmi"
for row in db.query(sql, {"nmi": nmi}):
dt = datetime.strptime(row["day"], "%Y-%m-%d")
row = (
dt,
row["imp"],
row["exp"],
row["imp_morning"],
row["imp_day"],
row["imp_evening"],
row["imp_night"],
)
yield row
def get_import_overview_chart(nmi: str) -> Path:
"""Save calendar plot"""
days = []
data = []
for dt, imp, _, _, _, _, _ in get_day_data(nmi):
days.append(dt)
data.append(imp)
data = pd.Series(data, index=days)
plot = calplot.calplot(
data,
suptitle=f"Daily kWh for {nmi}",
how=None,
vmin=0,
vmax=35,
cmap="YlOrRd",
daylabels="MTWTFSS",
colorbar=True,
)
fig = plot[0]
file_path = Path(f"build/{nmi}_import.png")
fig.savefig(file_path, bbox_inches="tight")
logging.info("Created %s", file_path)
return file_path
def get_daily_plot(nmi: str) -> str:
"""Save calendar plot"""
day_data = list(get_day_data(nmi))
data = {
"morning": [x[3] for x in day_data],
"day": [x[4] for x in day_data],
"evening": [x[5] for x in day_data],
"night": [x[6] for x in day_data],
"export": [-x[2] for x in day_data],
}
index = [x[0] for x in day_data]
df = pd.DataFrame(index=index, data=data)
color_dict = {'export': 'green', 'morning': 'tan', 'day': 'skyblue', 'evening': 'orangered', 'night': 'slategrey'}
fig = px.bar(df, x=df.index, y=list(data.keys()), color_discrete_map = color_dict)
fig.update_xaxes(
rangeslider_visible=False,
rangeselector=dict(
buttons=list(
[
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
)
file_path = Path(f"build/{nmi}_daily.html")
return fig.to_html(file_path, full_html=False, include_plotlyjs="cdn")
def get_usage_plot(nmi: str) -> str:
"""Save calendar plot"""
df = get_usage_df(nmi)
fig = px.line(df, x=df.index, y=["consumption", "export"])
file_path = Path(f"build/{nmi}_usage.html")
return fig.write_html(file_path, full_html=False, include_plotlyjs="cdn")
def get_export_overview_chart(nmi: str) -> Optional[Path]:
"""Save calendar plot"""
days = []
data = []
for dt, _, exp, _, _, _, _ in get_day_data(nmi):
if exp:
days.append(dt)
data.append(exp)
if len(data) == 0:
return None
data = pd.Series(data, index=days)
plot = calplot.calplot(
data,
suptitle=f"Daily Export kWh for {nmi}",
how=None,
vmin=0,
vmax=35,
cmap="Greens",
daylabels="MTWTFSS",
colorbar=True,
)
fig = plot[0]
file_path = Path(f"build/{nmi}_export.png")
fig.savefig(file_path, bbox_inches="tight")
logging.info("Created %s", file_path)
return file_path
def copy_static_data():
"""Copy static file"""
files = ["bootstrap.min.css"]
for file in files:
shutil.copy(f"templates/{file}", f"build/{file}")
def get_seasonal_data(nmi: str):
year_data = {}
for year in get_years(nmi):
data = get_year_season_data(nmi, year)
year_data[year] = data
return year_data
def get_year_season_data(nmi: str, year: int):
imp_values = {}
exp_values = {}
sql = """select season, imp, exp
from season_reads
where nmi = :nmi and year = :year
"""
for r in db.query(sql, {"nmi": nmi, "year": year}):
season = r["season"]
imp = r["imp"]
exp = r["exp"]
imp_values[season] = imp
exp_values[season] = exp
a_days = 90
a_avg = imp_values.get("A - Summer", None)
a_sum = a_avg * a_days if a_avg else None
b_days = 92
b_avg = imp_values.get("B - Autumn", None)
b_sum = b_avg * b_days if b_avg else None
c_days = 92
c_avg = imp_values.get("C - Winter", None)
c_sum = c_avg * c_days if c_avg else None
d_days = 91
d_avg = imp_values.get("D - Spring", None)
d_sum = d_avg * d_days if d_avg else None
yr_sum = 0
yr_days = 0
if a_sum is not None:
yr_sum += a_sum
yr_days += a_days
if b_sum is not None:
yr_sum += b_sum
yr_days += b_days
if c_sum is not None:
yr_sum += c_sum
yr_days += c_days
if d_sum is not None:
yr_sum += d_sum
yr_days += d_days
yr_avg = round(yr_sum / yr_days, 3)
summary = {
"Summer": (a_avg, a_sum),
"Autumn": (b_avg, b_sum),
"Winter": (c_avg, c_sum),
"Spring": (d_avg, d_sum),
"Export": (d_avg, d_sum),
"Year": (yr_avg, yr_sum),
}
return summary
def build_report(nmi: str):
template = env.get_template("nmi-report.html")
start, end = get_date_range(nmi)
fp_imp = get_import_overview_chart(nmi)
fp_exp = get_export_overview_chart(nmi)
daily_chart = get_daily_plot(nmi)
has_export = True if fp_exp else None
report_data = {
"start": start,
"end": end,
"has_export": has_export,
"daily_chart": daily_chart,
"imp_overview_chart": fp_imp.name,
"exp_overview_chart": fp_exp.name if has_export else None,
"season_data": get_seasonal_data(nmi),
}
print(report_data)
output_html = template.render(nmi=nmi, **report_data)
file_path = f"build/{nmi}.html"
with open(file_path, "w", encoding="utf-8") as fh:
fh.write(output_html)
logging.info("Created %s", file_path)
logging.basicConfig(level="INFO")
Path("build").mkdir(exist_ok=True)
update_daily_summaries()
update_seasonal_summaries()
env = Environment(loader=FileSystemLoader("templates"))
env.filters["yearmonth"] = format_month
# copy_static_data()
for nmi in get_nmis():
build_report(nmi)
|
from rbqwrapper import RbqWrapper, main
def test_init():
RbqWrapper()
def test_null():
main([])
|
from py_mplus.objects import MPData, MPObject
from py_mplus.objects.comment.comment_icon import CommentIcon
class SettingsView(MPObject):
def _decode(self, buffer: MPData, category, skip):
if category == 1:
self.my_icon = CommentIcon(buffer, buffer.uint32())
elif category == 2:
self.user_name = buffer.string()
elif category == 3:
self.notice_of_news_and_events = buffer.boolean()
elif category == 4:
self.notice_of_updates_of_subscribed_titles = buffer.boolean()
elif category == 5:
self.english_titles_count = buffer.uint32()
elif category == 6:
self.spanish_titles_count = buffer.uint32()
else:
buffer.skip_type(skip)
'''
e.decode = function (e, t) {
e instanceof x || (e = x.create(e));
var n = void 0 === t ? e.len : e.pos + t,
r = new R.Proto.SettingsView;
while (e.pos < n) {
var a = e.uint32();
switch (a >>> 3) {
case 1:
r.myIcon = R.Proto.CommentIcon.decode(e, e.uint32());
break;
case 2:
r.userName = e.string();
break;
case 3:
r.noticeOfNewsAndEvents = e.bool();
break;
case 4:
r.noticeOfUpdatesOfSubscribedTitles = e.bool();
break;
case 5:
r.englishTitlesCount = e.uint32();
break;
case 6:
r.spanishTitlesCount = e.uint32();
break;
default:
e.skipType(7 & a);
break
}
}
return r
}
''' |
from distutils.core import setup, Extension
xio = Extension('xio',
define_macros = [('MAJOR_VERSION', '0'),
('MINOR_VERSION', '9')],
include_dirs = ['/usr/local/include', '../../src', '../../include'],
libraries = ['xio'],
library_dirs = ['/usr/local/lib', '../../.libs'],
sources = ['python_xio.c'])
setup (name = 'xio',
version = '0.9',
description = 'This is a proxyio package',
author = 'DongFang',
author_email = '[email protected]',
url = 'http://proxyio.org',
ext_modules = [xio])
|
# Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xls.visualization.ir_viz.python.ir_to_json."""
import json
import sys
from xls.common.python import init_xls
from xls.visualization.ir_viz.python import ir_to_json
from absl.testing import absltest
def setUpModule():
# This is required so that module initializers are called including those
# which register delay models.
init_xls.init_xls(sys.argv)
class IrToJsonTest(absltest.TestCase):
def test_ir_to_json(self):
json_str = ir_to_json.ir_to_json(
"""package test_package
fn main(x: bits[32], y: bits[32]) -> bits[32] {
ret add.1: bits[32] = add(x, y)
}""", 'unit')
json_dict = json.loads(json_str)
self.assertEqual(json_dict['name'], 'test_package')
function_dict = json_dict['function_bases'][0]
self.assertIn('edges', function_dict)
self.assertLen(function_dict['edges'], 2)
self.assertIn('nodes', function_dict)
self.assertLen(function_dict['nodes'], 3)
def test_ir_to_json_with_scheduling(self):
json_str = ir_to_json.ir_to_json(
"""package test
fn main(x: bits[32], y: bits[32]) -> bits[32] {
add.1: bits[32] = add(x, y)
ret neg.2: bits[32] = neg(add.1)
}""", 'unit', 2)
json_dict = json.loads(json_str)
function_dict = json_dict['function_bases'][0]
self.assertIn('edges', function_dict)
self.assertLen(function_dict['edges'], 3)
self.assertIn('nodes', function_dict)
self.assertLen(function_dict['nodes'], 4)
for node in function_dict['nodes']:
if node['id'] == 'x' or node['id'] == 'y':
self.assertEqual(node['attributes']['cycle'], 0)
elif node['id'] == 'add_1':
self.assertEqual(node['attributes']['cycle'], 0)
elif node['id'] == 'neg_2':
self.assertEqual(node['attributes']['cycle'], 1)
if __name__ == '__main__':
absltest.main()
|
import re
from lib.settings import HTTP_HEADER
__product__ = "Sucuri Firewall (Sucuri Cloudproxy)"
def detect(content, **kwargs):
content = str(content)
headers = kwargs.get("headers", None)
detection_schema = (
re.compile(r"Access Denied - Sucuri Website Firewall"),
re.compile(r"Sucuri WebSite Firewall - CloudProxy - Access Denied"),
re.compile(r"Questions\?.+cloudproxy@sucuri\.net")
)
for detection in detection_schema:
if detection.search(content) is not None:
return True
if re.compile(r"X-Sucuri-ID", re.I).search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
return True |
"""
This file is part of the openPMD-updater.
Copyright 2018 openPMD contributors
Authors: Axel Huebl
License: ISC
"""
from abc import abstractmethod
class ITransform(object):
"""Transform an openPMD file from one standard version to another.
"""
@abstractmethod
def __init__(self, backend):
"""Open a file"""
raise NotImplementedError("File opening not implemented!")
@property
def name():
"""Name and description of the transformation"""
raise NotImplementedError("Name and description not implemented!")
@property
def min_version():
"""Minimum openPMD standard version that is supported by this transformation"""
raise NotImplementedError("Minimum supported openPMD standard version "
"of this transformation not implemented!")
@property
def to_version():
"""openPMD standard version is fulfulled by this transformation"""
raise NotImplementedError("Targeted openPMD standard version of "
"this transformation not implemented!")
@abstractmethod
def transform(self, in_place=True):
"""Perform transformation"""
raise NotImplementedError("Transformation not implemented!")
|
import numba as nb
import numpy as np
@nb.stencil(neighborhood=((-1,1),(-1,1)))
def _grad_x(arr):
""" Convolution with horizontal derivative kernel
H = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
"""
dx = -(arr[-1,-1] + 2*arr[0,-1] + arr[1,-1]) + \
arr[-1, 1] + 2*arr[0, 1] + arr[1, 1]
return dx
@nb.stencil(neighborhood=((-1,1),(-1,1)))
def _grad_y(arr):
""" Convolution with vertical derivative kernel
H = [[-1,-2,-1],
[ 0, 0, 0],
[ 1, 2, 1]]
"""
dy = -(arr[-1,-1] + 2*arr[-1,0] + arr[-1,1]) + \
arr[ 1,-1] + 2*arr[ 1,0] + arr[ 1,1]
return dy
@nb.njit(nogil=True)
def _grad_hist_4_u1(arr):
"""
8bit input -> 4 channel 32bit (but clamped to 8 bits)
scale of abs(y) is +-1024
y/4 - scale to original domain +-256
To scale the full range to +-256 -> y/4 - small gradients lost
This is prolly ok
"""
dst_shape = (arr.shape[0], arr.shape[1], 4)
dx = np.empty(arr.shape, np.int32)
dy = np.empty(arr.shape, np.int32)
dx[:] = _grad_x(arr)
dy[:] = _grad_y(arr)
y = np.empty(dst_shape, np.int32)
y[...,0] = dx
y[...,1] = 0.5 * dx - 0.5 * dy
y[...,2] = dy
y[...,3] = 0.5 * dx + 0.5 * dy
return np.fmin(np.abs(y)//4, 255).astype(np.uint8)
def grad_hist_4_u1(image):
return _grad_hist_4_u1(image)
@nb.njit(nogil=True)
def _grad_mag_u1(arr):
dx = np.abs(_grad_x(arr))
dy = np.abs(_grad_y(arr))
dst_shape = (arr.shape[0], arr.shape[1], 1)
y = np.empty(dst_shape, np.int32)
y[...,0] = np.maximum(dx, dy)
return np.fmin(y//4, 255).astype(np.uint8)
def grad_mag_u1(image):
return _grad_mag_u1(image) |
#!/usr/bin/env python
r"""Browse the given dataset
Example usage:
python browse.py \
--dataset=widerface \
--data_dir=/home/user/widerface/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import logging
import argparse
from morghulis import create_dataset
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', dest='dataset', action='store', required=True, help='widerface, fddb, afw, mafa')
parser.add_argument('--data_dir', dest='data_dir', action='store', required=True, help='')
args = parser.parse_args()
dataset = args.dataset
data_dir = args.data_dir
ds = create_dataset(dataset, data_dir)
ds.browse()
if __name__ == '__main__':
main()
|
from django.apps import AppConfig
class DjangoGuidConfig(AppConfig):
name = 'django_guid'
def ready(self) -> None:
"""
In order to avoid circular imports we import signals here.
"""
from django_guid import signals # noqa F401
from django_guid.config import settings
settings.validate()
|
import json
import requests
class Api:
"""Client for communicating with various APIs"""
BASE_URL = 'https://api-basketball.p.rapidapi.com'
def __init__(self, api_key: str):
self.HEADERS = {
'x-rapidapi-host': "api-basketball.p.rapidapi.com",
'x-rapidapi-key': api_key
}
def get(self, path: str, params: object):
"""Performs a get request on the given path with the given parameters."""
url = self.BASE_URL + path
response = requests.get(url, headers=self.HEADERS, params=params)
if response.status_code != 200:
raise Exception("Failed with status code %d" % response.status_code)
return json.loads(response.text) |
def insertionsort(array):
for x in range(1,len(array)):
key = array[x]
j = x - 1
while ( j >= 0 and key < array[j]):
array[j+1] = array[j]
j -= 1
array[j+1] = key
return array
l = []
for x in range(int(input("Enter no. of data: "))):
l.append(int(input("Enter data: ")))
print("Initial List; ", l)
insertionsort(l)
print("Sorted List: ",l)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
import xml.etree.ElementTree as ET
except ImportError:
import xml.etree.cElementTree as ET
import HTMLParser
# 美化XML文件,缩进一致
def pretty_xml(elem, indent = " ", newline = "\n", null_str_keep = True, level = 0):
#print(level, len(elem), elem.text, elem.tail)
i = newline + level * indent
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + indent
for e in elem:
pretty_xml(e, indent, newline, null_str_keep, level + 1)
if not e.tail or not e.tail.strip():
e.tail = (e.tail.count(newline) * newline + level * indent) if null_str_keep and e.tail else i
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = (elem.tail.count(newline) * newline + level * indent) if null_str_keep and elem.tail else i
if not len(elem) and elem.text:
elem.text = elem.text.strip()
return elem
class commentTreeBuilder(ET.XMLTreeBuilder):
def __init__ (self, html = 0, target = None):
ET.XMLTreeBuilder.__init__(self, html, target)
self._parser.CommentHandler = self.handle_comment
def handle_comment(self, data):
self._target.start(ET.Comment, {})
self._target.data(data)
self._target.end(ET.Comment)
class Muser:
'''
<include>
<user id="075577010001">
<params>
<param name="password" value="33e9cloud"/>
<param name="vm-password" value="075577010001"/>
</params>
<variables>
<variable name="toll_allow" value="domestic,international,local"/>
<variable name="accountcode" value="075577010001"/>
<variable name="user_context" value="sipp.33e9.com"/>
<variable name="effective_caller_id_name" value="18688717887"/>
<variable name="effective_caller_id_number" value="18688717887"/>
<variable name="outbound_caller_id_name" value="$${outbound_caller_name}"/>
<variable name="outbound_caller_id_number" value="$${outbound_caller_id}"/>
<!-- <variable name="callgroup" value="techsupport"/> -->
</variables>
</user>
</include>
'''
def __init__(self, user_dir=r'/usr/local/freeswitch/conf/directory', include_sub_dir=True, exclude_dir=[], debug=False):
self.__user_dir = user_dir
self.__include_sub_dir = include_sub_dir
self.__exclude_dir = exclude_dir
self.__modify_rule = {'key':r'/params/param[@name="password"]', 'value':''}
self.__debug = debug
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
if exc_tb:
return False
else:
self.__del__()
def __del__(self):
pass
def set_modify_rule(self, key=r'/params/param[@name="password"]', value=''):
self.__modify_rule = {'key':key, 'value':value}
return self
def __modfiy_xml(self, file_path, numbers=[]):
tree = ET.parse(file_path, parser = commentTreeBuilder())
include_node = tree.getroot() # include节点
if self.__debug:
print("TARGET %s" % file_path)
user_node = include_node.find('user')
if user_node is not None:
id = user_node.attrib['id']
if id in numbers or len(numbers) == 0:
is_modify = False
key_xpath = "./" + self.__modify_rule.get('key', '')
value = self.__modify_rule.get('value', '')
for node in include_node.findall(key_xpath):
origion_value = node.get('value')
node.set('value', value)
is_modify = True
print("MODIFY NODE %s ATTR 'value' FROM %s TO %s IN FILE %s" % (key_xpath, origion_value, value, file_path))
break
if is_modify:
tree.write(file_path)
# 读取文件内容,替换HTML的格式,重新写入
with open(file_path, "r+") as f:
txt = HTMLParser.HTMLParser().unescape(f.read())
f.seek(0)
f.truncate()
f.write(txt)
def run(self, numbers=[]):
'''
numbers = [] 代表不检查此条件
'''
for root, dirs, files in os.walk(self.__user_dir):
# 搜索当前目录下的所有xml文件
for file in files:
if file[-3:] != "xml":
continue
self.__modfiy_xml(os.path.join(root, file), numbers)
else:
# 搜索完成,若不包含子目录,则直接break,不再继续搜索
if not self.__include_sub_dir:
break
|
import tests.context.r4 as r4
import tests.context.stu3 as stu3
models = {
"r4": r4.model,
"stu3": stu3.model,
}
|
import torch
import models.utils.utils as utils #This lib is from original PGAN implementation.
def PGAN(pretrained=False, *args, **kwargs):
"""
Progressive growing model
pretrained (bool): load a pretrained model ?
model_name (string): if pretrained, load one of the following models
celebaHQ-256, celebaHQ-512, DTD, celeba, cifar10. Default is celebaHQ.
"""
current_path = kwargs["current_path"]
from models.progressive_gan import ProgressiveGAN as PGAN
if 'config' not in kwargs or kwargs['config'] is None:
kwargs['config'] = {}
model = PGAN(useGPU=kwargs.get('useGPU', True),
storeAVG=True,
**kwargs['config'])
checkpoint = {"celebAHQ_256": current_path + '/weight/celebaHQ_256.pth',
"celebAHQ_512": current_path + '/weight/celebaHQ_512.pth',
"DTD": current_path + '/weight/DTD.pth',
"celeba_cropped": current_path + '/weight/generator.pth'} #Actually this is celeba cropped
if pretrained:
if "model_name" in kwargs:
if kwargs["model_name"] not in checkpoint.keys():
raise ValueError("model_name should be in "
+ str(checkpoint.keys()))
else:
print("Loading default model : celebaHQ-256")
kwargs["model_name"] = "celebAHQ-256"
#state_dict = model_zoo.load_url(checkpoint[kwargs["model_name"]], map_location='cpu')
state_dict = torch.load(checkpoint[kwargs["model_name"]], map_location='cuda')
model.load_state_dict(state_dict)
return model, state_dict
def load_pretrained_PGAN(dataset, project_path):
use_gpu = True if torch.cuda.is_available() else False
if(not use_gpu):
raise ValueError("You should use GPU.")
model, state_dict = PGAN(model_name=dataset, pretrained=True, useGPU=use_gpu, current_path=project_path)
netG = model.getOriginalG()
utils.loadStateDictCompatible(netG, state_dict['netG'])
return model, netG
|
#/usr/bin/env python3
import sys
if len(sys.argv) <= 1:
print("Usage: (path to repo list)+")
sys.exit(-1)
OPATH = "uniqTypedPrjs.txt"
lines = []
for ipath in sys.argv[1:]:
try:
with open(ipath, encoding="utf8") as f:
lines.extend(f.read().splitlines())
except IOError:
print(f"Cannot open file: {ipath}.")
continue
with open(OPATH, "w", encoding="utf8") as f:
f.writelines(line + "\n" for line in set(lines))
|
import pydwarf
import raws
milk_beer_reaction = """
[REACTION:BREW_DRINK_FROM_ANIMAL_EXTRACT]
[NAME:brew drink from animal extract]
[BUILDING:STILL:CUSTOM_A]
[REAGENT:extract:150:LIQUID_MISC:NONE:NONE:NONE]
[HAS_MATERIAL_REACTION_PRODUCT:DRINK_MAT]
[UNROTTEN]
[REAGENT:extract container:1:NONE:NONE:NONE:NONE]
[CONTAINS:extract]
[REAGENT:barrel/pot:1:NONE:NONE:NONE:NONE]
[EMPTY]
[FOOD_STORAGE_CONTAINER] barrel or any non-absorbing tool with FOOD_STORAGE
[PRESERVE_REAGENT]
[DOES_NOT_DETERMINE_PRODUCT_AMOUNT]
[PRODUCT:100:5:DRINK:NONE:GET_MATERIAL_FROM_REAGENT:extract:DRINK_MAT]
[PRODUCT_TO_CONTAINER:barrel/pot]
[PRODUCT_DIMENSION:150]
[SKILL:BREWING]
"""
milk_beer_material_template = """
[USE_MATERIAL_TEMPLATE:MILK_BEER:CREATURE_ALCOHOL_TEMPLATE]
[STATE_NAME:ALL_SOLID:frozen %(adj)s milk beer]
[STATE_ADJ:ALL_SOLID:frozen %(adj)s milk beer]
[STATE_NAME:LIQUID:%(adj)s milk beer]
[STATE_ADJ:LIQUID:%(adj)s milk beer]
[STATE_NAME:GAS:boiling %(adj)s milk beer]
[STATE_ADJ:GAS:boiling %(adj)s milk beer]
[PREFIX:NONE]
[MULTIPLY_VALUE:2]"""
@pydwarf.urist(
name = 'ketsuban.milkbooze',
title = 'Milk Booze',
version = '1.0.0',
author = 'Ketsuban',
description = '''Adds reactions which allow the brewing of alcoholic drinks
from animal milk.
Based on http://www.bay12forums.com/smf/index.php?topic=167546.0''',
arguments = {
'entities': '''The entities which should be allowed to brew the new drinks.
Defaults to only dwarves.'''
}
)
def milkbooze(df, entities=["MOUNTAIN"]):
# Add material templates to milkable creatures
beers_added = 0
for file in df.files.values():
if file.name.startswith("creature_"):
last_name_token = None
last_caste_token = None
for token in file.tokens():
if token.value == "CASTE":
last_caste_token = token
elif token.value == "NAME":
last_name_token = token
elif(
last_caste_token and
last_name_token and
token.next and
token.value == "USE_MATERIAL_TEMPLATE" and
token.args[0] == "MILK" and
token.args[1] == "MILK_TEMPLATE"
):
adjective = last_name_token.args[-1]
last_caste_token.addafter(
milk_beer_material_template % {"adj": adjective}
)
beers_added = beers_added + 1
pydwarf.log.debug("Added %s milk beer material template." % adjective)
# Add MATERIAL_REACTION to milk template
for cheese in df.all(
"MATERIAL_REACTION_PRODUCT:CHEESE_MAT:LOCAL_CREATURE_MAT:CHEESE"
):
pydwarf.log.debug(
"Adding a milk beer material reaction product inside file %s." % str(cheese.file)
)
cheese.addafter(
"[MATERIAL_REACTION_PRODUCT:DRINK_MAT:LOCAL_CREATURE_MAT:MILK_BEER]"
)
# Add a new reaction to stills to produce milk beers,
# and give the specified entities access to that reaction
addobject = pydwarf.scripts.pineapple.utils.addobject(
df,
add_to_file="raw/objects/reaction_milk_beer.txt",
tokens=milk_beer_reaction,
permit_entities=entities
)
if not addobject:
return addobject
else:
return pydwarf.success("Added milk beers to %d milkable creatures." % beers_added)
|
import datetime
import time
resolution_dict = {
'1': 60,
'5': 5 * 60,
'15': 15 * 60,
'30': 30 * 60,
'60': 60 * 60,
'240': 240 * 60,
'D': 3600 * 60
}
def convert_tv2ok_resolution(resolution):
return resolution_dict[resolution]
def convert_timestamp2ok(timestamp):
return datetime.datetime.utcfromtimestamp(timestamp).isoformat() + 'Z'
if __name__ == "__main__":
print(convert_timestamp2ok(int(time.time())))
|
# Copyright (c) 2014 Museum Victoria
# This software is released under the MIT license (see license.txt for details)
from Queue import *
import threading
import atexit
remote_action_PowerOn = RemoteAction()
remote_action_PowerOff = RemoteAction()
remote_action_SetInput = RemoteAction()
def local_action_activate(x = None):
'''{ "title": "Turn on", "desc": "Turn on." }'''
queue.put({'function': 'remote_action_PowerOn', 'delay': 120})
queue.put({'function': 'remote_action_SetInput', 'arg':{"source":"DIGITAL", "number":1}, 'delay': 5})
print 'Activated'
def local_action_deactivate(x = None):
'''{ "title": "Turn off", "desc": "Turn off." }'''
queue.put({'function': 'remote_action_PowerOff', 'delay': 120})
print 'Deactivated'
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.isSet():
if queue.empty() != True:
job = queue.get()
try:
print "Calling command " + job['function']
func = globals()[job['function']]
arg = job['args'] if 'args' in job else ''
func.call(arg)
self.event.wait(job['delay'])
queue.task_done()
except Exception, e:
print e
print "Failed to call command " + job['function']
else:
self.event.wait(1)
def stop(self):
self.event.set()
queue = Queue()
th = TimerClass()
@atexit.register
def cleanup():
print 'shutdown'
th.stop()
def main():
th.start()
print 'Nodel script started.' |
import sqlite3
# Потокобезопасное подключение. Не открываем соединение в каждой функции, только в декораторе
def ensure_connection(funct):
def inlay(*args, **kwargs):
with sqlite3.connect('vault.db') as conn:
res = funct(*args, conn=conn, **kwargs)
return res
return inlay
@ensure_connection
def init_db(conn, force: bool = False):
''':param force: явно пересоздать все таблицы'''
c = conn.cursor()
if force:
c.execute('DROP TABLE IF EXISTS user_data') # Если стоит флаг force, удалим таблицу, если она уже существует. Если сразу запустить код с этим флагом, то он не упадет.
c.execute('''
CREATE TABLE IF NOT EXISTS user_data (
id INTEGER PRIMARY KEY,
actual_name TEXT NOT NULL,
user_id INTEGER NOT NULL,
chat_id INTEGER NOT NULL,
nickname TEXT NOT NULL,
answer INTEGER,
rating INTEGER,
score INTEGER
)
''')
conn.commit()
@ensure_connection
def signup(conn, actual_name: str, user_id: int, chat_id: int, nickname: str, answer: int, rating: int, score: int):
c = conn.cursor()
c.execute('SELECT actual_name, user_id, nickname FROM user_data WHERE actual_name=? OR user_id=? OR nickname=?', (actual_name, user_id, nickname))
result = c.fetchone()
if result: pass
else:
c.execute('INSERT INTO user_data (actual_name, user_id, chat_id, nickname, answer, rating, score) VALUES (?, ?, ?, ?, ?, ?, ?)', (actual_name, user_id, chat_id, nickname, answer, rating, score))
conn.commit()
return 'OK'
@ensure_connection
def get_chat_ids(conn):
'''produces only unique chat_ids'''
c = conn.cursor()
result = [chat_id[0] for chat_id in c.execute('SELECT chat_id FROM user_data')]
return set(result)
@ensure_connection
def write_answers(conn, user_id: int):
c = conn.cursor()
c.execute('UPDATE user_data SET answer=1 WHERE user_id=?', (user_id, ))
c.execute('SELECT nickname FROM user_data WHERE user_id=?', (user_id, ))
result = c.fetchone()
conn.commit()
return result[0]
@ensure_connection
def write_score(conn, rating: int, nickname: str):
c = conn.cursor()
c.execute('UPDATE user_data SET rating=? WHERE nickname=?', (rating, nickname))
c.execute('UPDATE user_data SET score=score+? WHERE nickname=?', (rating, nickname))
conn.commit()
@ensure_connection
def did_they_answer(conn, user_id: int):
c = conn.cursor()
c.execute('SELECT answer FROM user_data WHERE user_id = ?', (user_id, ))
result = c.fetchone()
print(result)
return result[0]
@ensure_connection
def round_rating(conn):
c = conn.cursor()
result1 = [nickname[0] for nickname in c.execute('SELECT nickname FROM user_data WHERE rating=?', (2, ))]
result2 = [nickname[0] for nickname in c.execute('SELECT nickname FROM user_data WHERE rating=?', (1, ))]
c.execute('UPDATE user_data SET answer=0, rating=0')
conn.commit()
return (result1, result2)
@ensure_connection
def total_score(conn):
c = conn.cursor()
c.execute('SELECT GROUP_CONCAT(nickname), score FROM (SELECT score, nickname FROM user_data ORDER BY score, nickname) GROUP BY score;')
result = c.fetchall()
return result
@ensure_connection
def delete_score(conn):
c = conn.cursor()
c.execute('DROP TABLE user_data') |
from django.test import TestCase
from django.test.client import RequestFactory
class TestRender(TestCase):
def _callFUT(self, request, template_name,
context=None, content_type=None, status=None, using=None):
from variantmpl.shortcuts import render
return render(request, template_name,
context, content_type, status, using)
def test_missing_variant(self):
req = RequestFactory().get('/')
res = self._callFUT(req, 'index.html')
self.assertEqual(res.content.strip(), b'index.html')
def test_get_variant_template(self):
from variantmpl.conf import settings
req = RequestFactory().get('/')
setattr(req, settings.PROPERTY_NAME, 'v2')
res = self._callFUT(req, 'index.html')
self.assertEqual(res.content.strip(), b'index+v2.html')
def test_fallback_template(self):
from variantmpl.conf import settings
req = RequestFactory().get('/')
setattr(req, settings.PROPERTY_NAME, 'v2')
# When missing 'index2+v2.html', fallback to 'index2.html'
res = self._callFUT(req, 'index2.html')
self.assertEqual(res.content.strip(), b'index2.html')
class TestRenderToResponse(TestCase):
def _callFUT(self, template_name, context=None,
content_type=None, status=None, using=None, variant=None):
from variantmpl.shortcuts import render_to_response
return render_to_response(template_name, context, content_type,
status, using, variant=variant)
def test_missing_variant(self):
res = self._callFUT('index.html')
self.assertEqual(res.content.strip(), b'index.html')
def test_get_variant_template(self):
res = self._callFUT('index.html', variant='v2')
self.assertEqual(res.content.strip(), b'index+v2.html')
def test_fallback_template(self):
# When missing 'index2+v2.html', fallback to 'index2.html'
res = self._callFUT('index2.html', variant='v2')
self.assertEqual(res.content.strip(), b'index2.html')
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterator, List, Optional
from collections import UserList
from torch import nn, Tensor
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
from archai.common.utils import zip_eq
class OptimSched:
"""Holds the optimizer and scheduler"""
def __init__(self, optim:Optimizer, sched:Optional[_LRScheduler],
sched_on_epoch:Optional[bool])->None:
self.optim = optim
self.sched = sched
self.sched_on_epoch = sched_on_epoch
class MultiOptim:
def __init__(self) -> None:
self._optim_scheds:List[OptimSched] = []
def append(self, optim_sched:OptimSched)->None:
self._optim_scheds.append(optim_sched)
def zero_grad(self)->None:
for optim_sched in self._optim_scheds:
optim_sched.optim.zero_grad()
def step(self)->None:
for optim_sched in self._optim_scheds:
optim_sched.optim.step()
if optim_sched.sched and not optim_sched.sched_on_epoch:
optim_sched.sched.step(epoch=None)
def epoch(self, epoch:Optional[int]=None)->None:
for optim_sched in self._optim_scheds:
if optim_sched.sched and optim_sched.sched_on_epoch:
optim_sched.sched.step(epoch=epoch)
def get_lr(self, optim_index:int, param_index:int)->float:
return self._optim_scheds[optim_index].optim.param_groups[param_index]['lr']
def state_dict(self)->dict:
optim_states = [optim_sched.optim.state_dict() for optim_sched in self]
sched_states = [optim_sched.sched.state_dict() if optim_sched.sched else None \
for optim_sched in self]
return {'optim_states': optim_states, 'sched_states':sched_states}
def load_state_dict(self, state_dict:dict)->None:
optim_states = state_dict['optim_states']
sched_states = state_dict['sched_states']
for optim_sched, optim_state, sched_state in zip_eq(self, optim_states, sched_states):
optim_sched.optim.load_state_dict(optim_state)
if optim_sched.sched:
assert sched_state is not None
optim_sched.sched.load_state_dict(sched_state)
else:
assert sched_state is None
def __getitem__(self, index)->OptimSched:
return self._optim_scheds[index]
def __len__(self)->int:
return len(self._optim_scheds)
def __iter__(self)->Iterator[OptimSched]:
return iter(self._optim_scheds)
|
from hostlookup_abstract.api.views import BaseHostView
from hostlookup_netdisco.utils import host_lookup
class HostView(BaseHostView):
def host_lookup(self, request, q=''):
return host_lookup(q)
|
from ark.tasks.task_check_for_update import Task_CheckForUpdates
from ark.tasks.task_list_players import Task_ListPlayers
from ark.tasks.task_get_chat import Task_GetChat
from ark.tasks.task_daily_restart import Task_DailyRestart
from ark.tasks.task_daily_restart import Task_DailyRestartRepopulate
from ark.tasks.task_sql_keep_alive import Task_SQL_keep_alive
def init():
#Part of Core Features:
Task_ListPlayers.run_interval(8,immediately=True)
Task_GetChat.run_interval(5,immediately=True)
Task_SQL_keep_alive.run_interval(60)
#Extras:
Task_CheckForUpdates.run_interval(1800)
Task_DailyRestart.run_daily('15:00:00')
Task_DailyRestartRepopulate.run_daily('06:00:00')
|
import tensorflow as tf
# 对多个输入进行merge操作的层
class ReversedConcatenate1D(tf.keras.layers.Layer):
"""对输入进行反向拼接"""
def __init__(self, axis=-1, **kwargs):
super(ReversedConcatenate1D, self).__init__(**kwargs)
self.axis = axis
def call(self, inputs, mask=None):
if mask is None:
mask = tf.ones_like(inputs[..., 0], dtype=tf.bool)
x_forward = inputs
x_backward = tf.reverse_sequence(inputs, mask)
x = tf.concat([x_forward, x_backward], axis=-1)
x = x * mask
return x
class LayersConcatenate(tf.keras.layers.Layer):
"""多层输出结果的拼接"""
def __init__(self, layers, axis=-1, **kwargs):
super(LayersConcatenate, self).__init__(**kwargs)
self.layers = layers
self.axis = axis
def call(self, inputs):
x = []
for layer in self.layers:
x.append(layer(inputs))
x = tf.concat(x, self.axis)
return x
class MaskedConcatenate1D(tf.keras.layers.Layer):
"""支持对齐mask的Concatenate1D"""
def __init__(self, **kwargs):
super(Layer, self).__init__(**kwargs)
self.supports_masking = True
def call(self, inputs):
return tf.concat(inputs, axis=1)
def compute_mask(self, inputs, mask=None):
"""对齐mask"""
if mask is not None:
masks = []
for i, m in enumerate(mask):
if m is None:
m = tf.ones_like(inputs[i][..., 0], dtype=tf.bool)
masks.append(m)
return tf.concat(masks, axis=1)
def compute_output_shape(self, input_shape):
if all([shape[1] for shape in input_shape]):
seq_len = sum([shape[1] for shape in input_shape])
else:
seq_len = None
return (input_shape[0][0], seq_len, input_shape[0][2])
class MaskedFlatten(tf.keras.layers.Flatten):
"""支持mask的Flatten"""
def __init__(self, **kwargs):
super(MaskedFlatten, self).__init__(**kwargs)
self.supports_masking = True
def compute_mask(self, inputs, mask=None):
return mask
|
def sieve(m):
l = [True]*(int(m**.5)+2)
l[0], l[1] = False, False
for i in range(2, len(l)):
if not l[i]:
continue
for j in range(i*2, len(l), i):
l[j] = False
return l
def solution(n):
k = 100000000000
c = 0
for i, prime in enumerate(sieve(k)):
if prime:
c += 1
if c == n:
return i
return -1
print(solution(10001))
|
#Make the model now
class PowerPredictor(nn.Module):
def __init__(self,input_features,num_outputs,neuronslist):
super().__init__()
self.input_features = input_features
self.num_outputs = num_outputs
self.neuronslist = neuronslist
'''Here,
input_features : Number of different features in dataset
num_outputs : Number of outputs
neuronslist : List of number of hidden units per layer
(Since there are three layers so there will be three elements in this list'''
self.linear1 = nn.Linear(self.input_features,self.neuronslist[0])
self.act1 = nn.Softplus()
self.linear3 = nn.Linear(self.neuronslist[0], self.neuronslist[1])
self.act3 = nn.Softplus()
self.linear6 = nn.Linear(self.neuronslist[1], self.num_outputs)
self.act6 = nn.Softplus()
def forward(self,x):
out = self.act1(self.linear1(x))
#out = self.act2(self.linear2(out))
out = self.act3(self.linear3(out))
#out = self.act4(self.linear4(out))
#out = self.act5(self.linear5(out))
out = self.act6(self.linear6(out))
return out
|
from .apu import Apu
|
with open("12/nav.txt") as f:
lines = [x.strip() for x in f.readlines()]
instructions = list(map(lambda x: [x[0], x[1:]], lines))
direction = "E"
position = [0, 0]
def move(action: str, value: int):
if action == "N":
position[1] += value
elif action == "S":
position[1] -= value
elif action == "E":
position[0] += value
elif action == "W":
position[0] -= value
else:
print("ERROR -> Move")
def change_direction(direction: str, value: int):
degrees = 0
if direction == "N":
degrees = 0
elif direction == "S":
degrees = 180
elif direction == "E":
degrees = 90
elif direction == "W":
degrees = 270
degrees = (degrees + value) % 360
if degrees == 0:
return "N"
elif degrees == 180:
return "S"
elif degrees == 90:
return "E"
elif degrees == 270:
return "W"
for inst in instructions:
action = inst[0]
value = int(inst[1])
if action == "L":
direction = change_direction(direction, -1 * value)
elif action == "R":
direction = change_direction(direction, value)
elif action == "F":
move(direction, value)
else:
move(action, value)
print("Position: ", position)
manhattan_distance = abs(position[0]) + abs(position[1])
print("Manhattan-Distance: ", manhattan_distance)
|
#!/usr/bin/python
def edit_distance(x, y, n, m):
if m == 0:
return n
if n == 0:
return m
if x[n - 1] == y[m - 1]:
return edit_distance(x, y, n - 1, m - 1)
return 1 + min(
edit_distance(x, y, n - 1, m),
edit_distance(x, y, n, m - 1),
edit_distance(x, y, n - 1, m - 1)
)
def edit_distance_dp_recursive(x, y, n, m):
if edit_arr[n][m]:
return edit_arr[m][n]
if m == 0:
edit_arr[n][m] = n
if n == 0:
edit_arr[n][m] = m
if x[n - 1] == y[m - 1]:
edit_arr[n][m] = edit_distance(x, y, n - 1, m - 1)
else:
edit_arr[n][m] = 1 + min(
edit_distance(x, y, n - 1, m),
edit_distance(x, y, n, m - 1),
edit_distance(x, y, n - 1, m - 1)
)
return edit_arr[n][m]
def edit_distance_dp_topdown(x, y, n, m):
for i in xrange(1, n):
for j in xrange(1, m):
if i == 0:
edit_arr[i][j] = j
if j == 0:
edit_arr[i][j] = i
if x[i] == y[j]:
edit_arr[i][j] = edit_arr[i - 1][j - 1]
else:
edit_arr[i][j] = 1 + min(
edit_distance(x, y, i - 1, j),
edit_distance(x, y, i, j - 1),
edit_distance(x, y, i - 1, j - 1)
)
return edit_arr[n][m]
str1 = "sunday"
str2 = "saturday"
edit_arr = [[None for i in xrange(len(str2) + 1)] for j in xrange(len(str1) + 1)]
print edit_distance(str1, str2, len(str1), len(str2))
print edit_distance_dp_recursive(str1, str2, len(str1), len(str2))
print edit_distance_dp_topdown(str1, str2, len(str1), len(str2)) |
#!/usr/bin/env python3
"""
Validate if given list of files are encrypted with sops.
"""
from argparse import ArgumentParser
from ruamel.yaml import YAML
from ruamel.yaml.parser import ParserError
import sys
yaml = YAML(typ='safe')
def validate_enc(item):
"""
Validate given item is encrypted.
All leaf values in a sops encrypted file must be strings that
start with ENC[. We iterate through lists and dicts, checking
only for leaf strings. Presence of any other data type (like
bool, number, etc) also makes the file invalid.
"""
if isinstance(item, str):
return item.startswith('ENC[')
elif isinstance(item, list):
return all(validate_enc(i) for i in item)
elif isinstance(item, dict):
return all(validate_enc(i) for i in item.values())
else:
return False
def check_file(filename):
"""
Check if a file has been encrypted properly with sops.
Returns a boolean indicating wether given file is valid or not, as well as
a string with a human readable success / failure message.
"""
# sops doesn't have a --verify (https://github.com/mozilla/sops/issues/437)
# so we implement some heuristics, primarily to guard against unencrypted
# files being checked in.
with open(filename) as f:
try:
# Use the YAML parser to load files. All JSON is valid YAML, so this
# properly deals with JSON files too
doc = yaml.load(f)
except ParserError:
# All sops encrypted files are valid JSON or YAML
return False, f"{filename}: Not valid JSON or YAML, is not properly encrypted"
if 'sops' not in doc:
# sops puts a `sops` key in the encrypted output. If it is not
# present, very likely the file is not encrypted.
return False, f"{filename}: sops metadata key not found in file, is not properly encrypted"
invalid_keys = []
for k in doc:
if k != 'sops':
# Values under the `sops` key are not encrypted.
if not validate_enc(doc[k]):
# Collect all invalid keys so we can provide useful error message
invalid_keys.append(k)
if invalid_keys:
return False, f"{filename}: Unencrypted values found nested under keys: {','.join(invalid_keys)}"
return True, f"{filename}: Valid encryption"
def main():
argparser = ArgumentParser()
argparser.add_argument('filenames', nargs='+')
args = argparser.parse_args()
failed_messages = []
for f in args.filenames:
is_valid, message = check_file(f)
if not is_valid:
failed_messages.append(message)
if failed_messages:
print('\n'.join(failed_messages))
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
"""Top-level package for justice."""
__author__ = """Jakub Boukal"""
__email__ = '[email protected]'
__version__ = '0.1.8'
|
import codecs
import re
import argparse
import os
def is_chapter_name(line):
chapter_pattern = re.compile(r'第[0-9零一二三四五六七八九十百千]+章')
chapter_pattern2 = re.compile(r'^[0-9零一二三四五六七八九十百千]+$')
chapter_pattern3 = re.compile(r'^[0-9]')
b = re.search(chapter_pattern, line) is not None or \
re.search(chapter_pattern2, line) is not None or \
re.search(chapter_pattern3, line) is not None and not line.endswith('。')
return b
def is_scene_sep(line, bos='::', sep_chars=None):
if sep_chars is None:
sep_chars = ['…', '—', '.', '·', '-', '。']
if bos not in line: return False
speaker, speech = line.split(bos, maxsplit=1)
return speaker == '旁白' and all(c in sep_chars for c in speech)
def processf(lines):
# 过第一遍,拿到所有的标记行和分数
comment_lines_score = []
comment_lines_idx = []
for i,line in enumerate(lines):
if line[0] == "#":
# templine = line[(line[line.find(" ")+1:]).find(" ")+1:]
templine = line[line.find("旁"):]
if is_chapter_name(templine) or is_scene_sep(templine):
lines[i] = templine
# print(templine)
else:
comment_lines_idx.append(i)
comment_lines_score.append(float(line.split(" ")[1]))
# 过第二遍,把该加的分都加上
chaper_lines_idx = []
for i, line in enumerate(lines):
if is_chapter_name(line) or is_scene_sep(line):
if i + 1 in comment_lines_idx:
comment_lines_score[comment_lines_idx.index(i+1)] += 0.2
# elif i - 1 in comment_lines_idx:
# comment_lines_score[comment_lines_idx.index(i-1)] += 0.2
# 去掉低于0.6分的分割
for i,line in enumerate(lines):
if i in comment_lines_idx and comment_lines_score[comment_lines_idx.index(i)] < 0.6:
lines[i] = line[line.find("旁"):]
# 过第三遍,找到所有应该delete的分割
delete_idx = []
for i, idx in enumerate(comment_lines_idx):
if i == 0:
continue
elif i > 0:
idx_pre = comment_lines_idx[i-1]
# 如果上一个已经被加入到即将删除的列表里面了,就不再进行判定
if idx_pre in delete_idx:
continue
idx_pre += 1
flag = 0
# 逻辑: 始终用本分割和上一个分割进行对比
# 情况1:如果此分割和上一个分割只隔了6个或个以下,就删掉分低的那一个。
# 情况2:如果此分割和上一个分割全是旁白或者是只有一个人,就删掉分低的那一个
# 情况1 情况2 依顺序依次判断,情况一满足不再进行情况二的判定
if idx - idx_pre < 6 and comment_lines_score[i-1] < comment_lines_score[i]:
delete_idx.append(comment_lines_idx[i-1])
flag = 1
elif idx - idx_pre < 6 and comment_lines_score[i-1] > comment_lines_score[i]:
delete_idx.append(comment_lines_idx[i])
flag = 1
if flag == 0:
nameset = set()
while idx_pre < idx:
if lines[idx_pre].split("::")[0] != "旁白": # TODO 全旁白或者只有一个人
nameset.add(lines[idx_pre].split("::")[0])
idx_pre += 1
if len(nameset) < 2:
if comment_lines_score[i-1] < comment_lines_score[i]:
delete_idx.append(comment_lines_idx[i-1])
elif comment_lines_score[i-1] > comment_lines_score[i]:
delete_idx.append(comment_lines_idx[i])
# 正式进行删除
newline = []
for i, line in enumerate(lines):
if i in delete_idx:
linex = line[line.find("旁"):]
newline.append(linex) # TODO 不应该直接删
else:
newline.append(line)
return newline
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", type=str, required=True, help="The file to be processed")
args = parser.parse_args()
filelist = os.listdir(args.input_dir)
for name in filelist:
if name == ".txt":
continue
print(name)
file_input_name = os.path.join(args.input_dir,name)
file_outpout_name = "/nas/jiangdanyang/scene_cut_datas_convert/"+ name + "_out.txt"
file_input = codecs.open(file_input_name, mode='r', encoding='utf-8')
lines = [line.strip() for line in file_input]
newlines = processf(lines)
# file_out1 = codecs.open("/nas/jiangdanyang/scene_cut_datas_convert/"+name+str(0),mode='w', encoding='utf-8')
# for line in newlines:
# file_out1.write(line)
# file_out1.write('\n')
# file_out1.close()
err = 0
flag = 1
while( flag != 0):
flag = 0
origin = newlines.copy()
newlines = processf(newlines)
for i,line in enumerate(origin):
if line != newlines[i]:
flag = 1
# file_out1 = codecs.open("/nas/jiangdanyang/scene_cut_datas_convert/"+name+str(err+1),mode='w', encoding='utf-8')
# for line in newlines:
# file_out1.write(line)
# file_out1.write('\n')
# file_out1.close()
break
print(name, err)
err += 1
# 写文件阶段
file_output = codecs.open(file_outpout_name, mode="w", encoding='utf-8')
for line in newlines:
file_output.write(line)
file_output.write('\n')
file_output.close()
|
"""
Facilities for testing RL-related code.
"""
from .util import SimpleEnv, SimpleModel, TupleCartPole
__all__ = dir()
|
# Sudoku is a number-placement puzzle. The objective is to fill a 9 × 9 grid
# with digits so that each column, each row, and each of the nine 3 × 3 sub-grids
# that compose the grid contains all of the digits from 1 to 9.
#
# This algorithm should check if the given grid of numbers represents a correct solution to Sudoku.
#
# Example
#
# For the first example below, the output should be true. For the other grid,
# the output should be false: each of the nine 3 × 3 sub-grids should contain
# all of the digits from 1 to 9.
# True
grid = [[1,3,2,5,4,6,9,8,7],
[4,6,5,8,7,9,3,2,1],
[7,9,8,2,1,3,6,5,4],
[9,2,1,4,3,5,8,7,6],
[3,5,4,7,6,8,2,1,9],
[6,8,7,1,9,2,5,4,3],
[5,7,6,9,8,1,4,3,2],
[2,4,3,6,5,7,1,9,8],
[8,1,9,3,2,4,7,6,5]]
# False
grid2 = [[1,3,2,5,4,6,9,2,7],
[4,6,5,8,7,9,3,8,1],
[7,9,8,2,1,3,6,5,4],
[9,2,1,4,3,5,8,7,6],
[3,5,4,7,6,8,2,1,9],
[6,8,7,1,9,2,5,4,3],
[5,7,6,9,8,1,4,3,2],
[2,4,3,6,5,7,1,9,8],
[8,1,9,3,2,4,7,6,5]]
def sudoku(grid):
for deep in range(9):
for wide in range(9):
if not check_horizontal(deep, wide, grid):
return False
if not check_vertical(deep, wide, grid):
return False
if not check_square(deep, wide, grid):
return False
return True
def check_horizontal(deep, wide, grid):
for w in range(9):
if wide == w:
continue
if equal_pos(deep, wide, deep, w, grid):
return False
return True
def check_vertical(deep, wide, grid):
for d in range(9):
if deep == d:
continue
if equal_pos(deep, wide, d, wide, grid):
return False
return True
def check_square(deep, wide, grid):
center_point = get_square_center_pos(deep, wide)
all_points = get_all_points_in_square(center_point[0], center_point[1])
for point in all_points:
if deep == point[0] and wide == point[1]:
continue
if equal_pos(deep, wide, point[0], point[1], grid):
return False
return True
def get_square_center_pos(deep, wide):
centers = [[1,1], [1,4], [1,7], [4,1], [4,4], [4,7], [7,1], [7,4], [7,7]]
for d in range(deep - 1, deep + 2):
for w in range(wide - 1, wide + 2):
if [d, w] in centers:
return [d, w]
def get_all_points_in_square(d, w):
return [[deep, wide] for deep in range(d - 1, d + 2) for wide in range(w - 1, w + 2)]
# return points
def equal_pos(d1, w1, d2, w2, grid):
return grid[d1][w1] == grid[d2][w2]
print(sudoku(grid))
|
import logging
import urllib3
from flask import Flask
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from socorepo import config
from socorepo.config.loader import load_general_settings, load_remaining_config
from socorepo.log import setup_logging
def app_root_404(env, resp):
resp("404", [("Content-Type", "text/plain")])
return [b"404 The application root has been reconfigured."]
__version__ = "1.2.0"
# Disable unverified TLS certificate warnings.
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Load the basic config, which we require for setting up logging.
load_general_settings()
if config.CONFIGURE_LOGGING:
setup_logging(config.LOG_DIR if config.EXTERNAL_CONFIG else None)
# Now, we can load the remaining config. Any errors here will be logged to the logfile.
load_remaining_config()
if not config.EXTERNAL_CONFIG:
logging.getLogger("socorepo").warning(
"Running off internally stored default configuration files. This might not be what you want. "
"See README for more information on how to use your own configuration.")
# Create the app.
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
# Change the application root if configured.
if config.APPLICATION_ROOT != "/":
app.config["APPLICATION_ROOT"] = config.APPLICATION_ROOT
app.wsgi_app = DispatcherMiddleware(app_root_404, {config.APPLICATION_ROOT: app.wsgi_app})
# Initialize routes.
from . import views
# Start the cache scheduler.
from . import cache
|
from seleniumpm.examples.wikipedia import Wikipedia
from seleniumpm.examples.widgets.WikipediaPersonal import WikipediaPersonal
from seleniumpm.webelements.link import Link
from seleniumpm.locator import Locator
class SuperWikipedia(Wikipedia):
"""
This is simply an example of how you could extend Webpage classes. One use-case of this could be that your
website has a HomepageUnauthenticated and a HomepageAuthenticated; in essence, you could implement
2 classes that represent the base Homepage class in (1) an authenticated state, and (2) an unauthenticated
state.
"""
def __init__(self, driver, url):
super(SuperWikipedia, self).__init__(driver, url)
self.mainpage_link = Link(driver, Locator.by_xpath("//li[@id='n-mainpage-description']/a"))
self.personal_widget = WikipediaPersonal(driver, Locator.by_xpath("//div[@id='p-personal']"))
|
from aoc_cas.aoc2019.IntCodeComputer import IntCodeComputerVM, read_program
def instructions(*inputs):
for i in inputs:
yield from map(ord, i + "\n")
def gogoSpringyBoi(program, inputs):
vm = IntCodeComputerVM(program)
vm.input_provided_from(inputs)
s = []
row = ""
for r in vm.run():
if r > 0x110000:
return r
else:
char = chr(r)
row += char
if char == "\n":
print("".join(row))
row = ""
def part1(data):
program = read_program(data)
inputs = instructions(
"OR A T",
"AND B T",
"AND C T",
"NOT T J",
"AND D J",
"WALK",
)
return gogoSpringyBoi(program, inputs)
def part2(data):
program = read_program(data)
inputs = instructions(
# Only jump if there's a hole in the next 3 steps and D is ground
"OR A T",
"AND B T",
"AND C T",
"NOT T J",
"AND D J",
# Reset T to False
"AND J T",
# H | (E&I)
"OR E T",
"AND I T",
"OR H T",
"AND T J",
"NOT A T",
"OR T J",
"RUN",
)
return gogoSpringyBoi(program, inputs)
if __name__ == "__main__":
from aocd import get_data
data = get_data(year=2019, day=21)
print(part1(data))
print(part2(data))
|
from pgfutils import save, setup_figure
setup_figure(width=1, height=1)
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
cmap = plt.cm.coolwarm
custom_lines = [
Line2D([0], [0], color=cmap(0), lw=4),
Line2D([0], [0], color=cmap(0.5), lw=4),
Line2D([0], [0], color=cmap(1), lw=4),
]
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis("off")
plt.legend(custom_lines, ("One", "Two", "Three"), loc="center")
save()
|
import sys
if sys.version_info < (3, 10):
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
from . import (
cpu,
devices,
gpu,
memory,
)
__all__ = (
"cpu",
"devices",
"gpu",
"installed_plugins",
"memory",
)
installed_plugins = entry_points(group=__name__)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`lse`
==================
.. module:: lse
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <[email protected]>
Created on 2015-11-05, 16:30
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from skboost.milboost.softmax import SoftmaxFunction
class LogSumExponential(SoftmaxFunction):
"""The Log-Sum_Exponential softmax function.
https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
:param x: The values to evaluate softmax over.
:type x: :py:class:`numpy.ndarray`
:param r: The radius of the LSE. Defaults to 1.0
:type r: float
:return: The LSE softmax value.
:rtype: float
"""
def __str__(self):
return super(LogSumExponential, self).__str__() + "({0:.1f})".format(self.radius)
def __init__(self, *args):
super(LogSumExponential, self).__init__(*args)
self.radius = float(args[0]) if len(args) > 0 else 1.0
def f(self, x):
shift = np.max(x)
return shift + (np.log(np.sum(np.exp(self.radius * (x - shift))) / len(x)) / self.radius)
def d_dt(self, x):
return np.exp(self.radius * x) / np.sum(np.exp(self.radius * x))
|
"""
Tests whether NoMissingEmbeddings works
"""
import math
from inspect import cleandoc
from pandas import DataFrame
from testfixtures import compare
from mlinspect import DagNode, BasicCodeLocation, OperatorContext, OperatorType, FunctionInfo, DagNodeDetails, \
OptionalCodeInfo
from mlinspect._pipeline_inspector import PipelineInspector
from mlinspect.checks import CheckStatus, NoBiasIntroducedFor, \
NoBiasIntroducedForResult
from mlinspect.checks._no_bias_introduced_for import BiasDistributionChange
from mlinspect.instrumentation._dag_node import CodeReference
def test_no_bias_introduced_for_merge():
"""
Tests whether RowLineage works for joins
"""
test_code = cleandoc("""
import pandas as pd
df_a = pd.DataFrame({'A': ['cat_a', 'cat_b', 'cat_a', 'cat_c', 'cat_b'], 'B': [1, 2, 4, 5, 7]})
df_b = pd.DataFrame({'B': [1, 2, 3, 4, 5], 'C': [1, 5, 4, 11, None]})
df_merged = df_a.merge(df_b, on='B')
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_check(NoBiasIntroducedFor(['A'])) \
.execute()
check_result = inspector_result.check_to_check_results[NoBiasIntroducedFor(['A'])]
expected_result = get_expected_check_result_merge()
compare(check_result, expected_result)
def test_no_bias_introduced_simple_imputer():
"""
Tests whether RowLineage works for joins
"""
test_code = cleandoc("""
import pandas as pd
from sklearn.impute import SimpleImputer
import numpy as np
df = pd.DataFrame({'A': ['cat_a', np.nan, 'cat_a', 'cat_c']})
imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputed_data = imputer.fit_transform(df)
""")
inspector_result = PipelineInspector \
.on_pipeline_from_string(test_code) \
.add_check(NoBiasIntroducedFor(['A'])) \
.execute()
check_result = inspector_result.check_to_check_results[NoBiasIntroducedFor(['A'])]
expected_result = get_expected_check_result_simple_imputer()
compare(check_result, expected_result)
def get_expected_check_result_merge():
""" Expected result for the code snippet in test_no_bias_introduced_for_merge"""
failing_dag_node = DagNode(2,
BasicCodeLocation('<string-source>', 5),
OperatorContext(OperatorType.JOIN, FunctionInfo('pandas.core.frame', 'merge')),
DagNodeDetails("on 'B'", ['A', 'B', 'C']),
OptionalCodeInfo(CodeReference(5, 12, 5, 36), "df_a.merge(df_b, on='B')"))
change_df = DataFrame({'sensitive_column_value': ['cat_a', 'cat_b', 'cat_c'],
'count_before': [2, 2, 1],
'count_after': [2, 1, 1],
'ratio_before': [0.4, 0.4, 0.2],
'ratio_after': [0.5, 0.25, 0.25],
'relative_ratio_change': [(0.5 - 0.4) / 0.4, (.25 - 0.4) / 0.4, (0.25 - 0.2) / 0.2]})
expected_distribution_change = BiasDistributionChange(failing_dag_node, False, (.25 - 0.4) / 0.4, change_df)
expected_dag_node_to_change = {failing_dag_node: {'A': expected_distribution_change}}
failure_message = 'A Join causes a min_relative_ratio_change of \'A\' by -0.37500000000000006, a value below the ' \
'configured minimum threshold -0.3!'
expected_result = NoBiasIntroducedForResult(NoBiasIntroducedFor(['A']), CheckStatus.FAILURE, failure_message,
expected_dag_node_to_change)
return expected_result
def get_expected_check_result_simple_imputer():
""" Expected result for the code snippet in test_no_bias_introduced_for_simple_imputer"""
imputer_dag_node = DagNode(1,
BasicCodeLocation('<string-source>', 6),
OperatorContext(OperatorType.TRANSFORMER,
FunctionInfo('sklearn.impute._base', 'SimpleImputer')),
DagNodeDetails('Simple Imputer: fit_transform', ['A']),
OptionalCodeInfo(CodeReference(6, 10, 6, 72),
"SimpleImputer(missing_values=np.nan, strategy='most_frequent')"))
change_df = DataFrame({'sensitive_column_value': ['cat_a', 'cat_c', math.nan],
'count_before': [2, 1, 1],
'count_after': [3, 1, 0],
'ratio_before': [0.5, 0.25, 0.25],
'ratio_after': [0.75, 0.25, 0.],
'relative_ratio_change': [0.5, 0., -1.]})
expected_distribution_change = BiasDistributionChange(imputer_dag_node, True, 0., change_df)
expected_dag_node_to_change = {imputer_dag_node: {'A': expected_distribution_change}}
expected_result = NoBiasIntroducedForResult(NoBiasIntroducedFor(['A']), CheckStatus.SUCCESS, None,
expected_dag_node_to_change)
return expected_result
|
import numpy as np
import numba
# Склаярное произведение комплексных векторов
@numba.jit()
def complex_dot_prod(vec1, vec2):
return vec1.dot(np.conj(vec2))
# Проверка матрицы на квадратность
@numba.jit()
def check_square(H):
if H.shape.count(H.shape[0]) == len(H.shape):
return True
else:
return False
# Нахождение центра для точки отрезка
@numba.jit()
def _mu2(z1, z2):
z1_z2 = z1 * np.conj(z2)
return (z1 + z2) / 2 + (1j * np.imag(z1_z2) * (z2 - z1)) / (2 * (z1_z2 + np.real(z1_z2)))
# Нахождение радиуса для точки центра отрезка
@numba.jit()
def _R2(z1, z2):
z1_z2 = np.conj(z1) * z2
return np.real( np.sqrt( (np.abs(z1 - z2)**2 * np.abs(z1_z2)) / (2 * (z1_z2 + np.real(z1_z2))) ) )
# Нахождение точки центра теугольника
@numba.jit()
def _mu3(z1, z2, z3):
return 1j * ((np.abs(z1)**2 * (z2 - z3) + np.abs(z2)**2 * (z3 - z1) + np.abs(z3)**2 * (z1 - z2)) /
(2 * np.imag(z1 * np.conj(z2) + z2 * np.conj(z3) + z3 * np.conj(z1))))
# Нахождение радиуса по центру и точке, входящей в рассматриваемый треугольник
@numba.jit()
def _R3(mu, z):
return np.real(np.sqrt(np.abs(mu - z)**2))
# Нахождение центра спектра и его радиуса
@numba.jit()
def mu_find(spectre):
spectre = np.array(spectre, dtype=complex)
n = spectre.shape[0]
mu = 0 + 0j
R = 0
mu_flag = []
R_flag = []
if n < 2:
raise ValueError
elif n == 2:
mu = _mu2(spectre[0], spectre[1])
R = _R2(spectre[0], spectre[1])
elif n == 3:
for i in range(3):
mu_flag.append(_mu2(spectre[i], spectre[(i + 1) % 3]))
R_flag.append(_R2(spectre[i], spectre[(i + 1) % 3]))
mu_flag.append(_mu3(spectre[0], spectre[1], spectre[2]))
R_flag.append(_R3(mu_flag[3], spectre[0]))
mu_flag = np.array(mu_flag, dtype=complex)
R_flag = np.array(R_flag, dtype=float)
mu = mu_flag[np.argmin(R_flag)]
R = np.min(R_flag)
elif n > 3:
# Случай для отрезков
for i in range(n - 1):
for j in range(i, n):
mu2_loc = _mu2(spectre[i], spectre[j])
R2_loc = _R2(spectre[i], spectre[j])
for k in range(n):
if np.abs(mu2_loc - spectre[k]) < R2_loc:
break
mu_flag.append(mu2_loc)
R_flag.append(R2_loc)
# Cлучай для треугольников
if len(mu_flag) == 0:
for i in range(n - 2):
for j in range(i, n - 1):
for l in range(j, n):
mu3_loc = _mu3(spectre[i], spectre[j], spectre[l])
R3_loc = _R3(mu3_loc, spectre[i])
for k in range(n):
if np.abs(mu3_loc - spectre[k]) < R3_loc:
break
mu_flag.append(mu3_loc)
R_flag.append(R3_loc)
mu_flag = np.array(mu_flag, dtype=complex)
R_flag = np.array(R_flag, dtype=float)
mu = mu_flag[np.argmin(R_flag)]
R = np.min(R_flag)
return mu, R
# Обобщенный метод простых итераций
def GMSI(H, f, mu, u0=None, eps=10e-7, n_iter=10000):
if not check_square(H):
print("Matrix is not squared")
return u0
H = np.array(H, dtype=complex)
f = np.array(f, dtype=complex)
N = H.shape[0]
if u0 is None:
u0 = np.ones((N, ), dtype=complex)
u_vector = u0.copy()
for iter_index in range(n_iter):
u_vector = u0 - 1/mu * (H @ u0 - f)
if np.sqrt(complex_dot_prod(u_vector - u0, u_vector - u0)) / np.sqrt(complex_dot_prod(f, f)) < eps:
break
u0 = u_vector.copy()
return u_vector
# Специально для точки входа
def _main():
H = np.diag(np.array([5 + 0j, 10 - 5j, 10 + 5j]))
mu, R = mu_find(spectre=np.diag(H))
print("Найденные mu и R")
print(mu, R)
print("\n")
f = np.array([1, 2, 3], dtype=complex)
solve_GMSI = GMSI(H, f, mu)
print("Решение итерациями")
print(np.round(solve_GMSI, 3))
print("Сходится ли с ответом")
print(np.alltrue(np.isclose(H @ solve_GMSI, f)))
print("\n")
print("Прямое решение")
print(np.linalg.solve(H, f))
return 0
if __name__ == "__main__" :
_main() |
import json
from resource import Resource
class SubResource(Resource):
"""
Base class for resources that are purely subresources.
These resources should be handled through their corresponding
ParentResource classes.
"""
def update(self, **options):
"""
Updates the given subresource with its local changes.
Equivalent to PUT /resource/subresource/sres_id
"""
body = self._copy_dict()
body = json.dumps(body)
new_fields = self.client.put('/{}/{}/{}.json'.format(self.parent_name,
self.res_name,
self.id),
body,
**options)
# commit changes locally
self._replace_fields(new_fields)
def delete(self, **options):
"""
Deletes this resource from the store.
Equivalent to GET /resource/subresource/sres_id
"""
self.client.delete('/{}/{}.json'.format(self.parent_name,
self.res_name,
self.id),
**options)
class CountryState(SubResource):
res_name = "states"
parent_name = "countries"
class OptionValue(SubResource):
res_name = "values"
parent_name = "options"
class OrderShipment(SubResource):
res_name = "shipments"
parent_name = "orders"
class OrderShippingAddress(SubResource):
res_name = "shippingaddresses"
parent_name = "orders"
class ProductSKU(SubResource):
res_name = "skus"
parent_name = "products"
class ProductConfigurableField(SubResource):
res_name = "configurablefields"
parent_name = "products"
class ProductCustomField(SubResource):
res_name = "customfields"
parent_name = "products"
class ProductDiscountRule(SubResource):
res_name = "discountrules"
parent_name = "products"
class ProductImage(SubResource):
res_name = "images"
parent_name = "products"
class ProductRule(SubResource):
res_name = "rules"
parent_name = "products"
class ProductVideo(SubResource):
res_name = "productvideos"
parent_name = "products"
class ShippingMethod(SubResource):
res_name = "methods"
parent_name = "shipping" |
#!/usr/bin/python
import os, sys
from wallaby import *
import drive as d
import constants as c
def waitForButton():
print("waiting for right button")
while right_button() == 0:
pass
print("right button pressed")
msleep(500)
def moveServo(servo, position, speed):
i = get_servo_position(servo)
if position > i:
while i < position:
set_servo_position(servo, i)
i += speed
msleep(10)
set_servo_position(servo, position)
else:
while i > position:
set_servo_position(servo, i)
i -= speed
msleep(10)
set_servo_position(servo, position)
def onBlack():
return analog(c.topHat) > c.black
def DEBUG():
print("debug")
while not left_button():
pass
|
###########################
# Implements Q and A functionality
###########################
from discord import NotFound
import db
# keep track of next question number
QUESTION_NUMBER = 1
# dictionary of questions with answers
QNA = {}
###########################
# Class: QuestionsAnswers
# Description: object with question details
# Inputs:
# - q: question text
# - number: question number
# - message: id of the message associated with question
# - ans: answers to the question
# Outputs: None
###########################
class QuestionsAnswers:
''' Class containing needed question/answer information and identification '''
def __init__(self, qs, number, message, ans):
self.question = qs
self.number = number
self.msg = message
self.answer = ans
###########################
# Function: question
# Description: takes question from user and reposts anonymously and numbered
# Inputs:
# - ctx: context of the command
# - q: question text
# Outputs:
# - User question in new post
###########################
async def question(ctx, qs):
''' add a question '''
global QUESTION_NUMBER
# format question
q_str = 'Q' + str(QUESTION_NUMBER) + ': ' + qs + '\n'
message = await ctx.send(q_str)
# create QNA object
new_question = QuestionsAnswers(qs, QUESTION_NUMBER, message.id, '')
# add question to list
QNA[QUESTION_NUMBER] = new_question
db.mutation_query(
'INSERT INTO qna VALUES (?, ?, ?, ?)',
[ctx.guild.id, ctx.author.name, '', QUESTION_NUMBER]
)
# increment question number for next question
QUESTION_NUMBER += 1
# delete original question
await ctx.message.delete()
###########################
# Function: answer
# Description: adds user answer to specific question and post anonymously
# Inputs:
# - ctx: context of the command
# - num: question number being answered
# - ans: answer text to question specified in num
# Outputs:
# - User answer added to question post
###########################
async def answer(ctx, num, ans):
''' answer the specific question '''
if int(num) not in QNA.keys():
await ctx.author.send('Invalid question number: ' + str(num))
# delete user msg
await ctx.message.delete()
return
# get question
q_answered = QNA[int(num)]
# check if message exists
try:
message = await ctx.fetch_message(q_answered.msg)
except NotFound:
await ctx.author.send('Invalid question number: ' + str(num))
# delete user msg
await ctx.message.delete()
return
# generate and edit msg with answer
if "instructor" in [y.name.lower() for y in ctx.author.roles]:
role = 'Instructor'
else:
role = 'Student'
new_answer = role + ' Ans: ' + ans
db.mutation_query(
'UPDATE qna SET answer = ? WHERE qnumber = ?',(ans, int(num)),
)
# store new answer and uopdate the new answer to the database
if not q_answered.answer == '':
q_answered.answer += '\n'
q_answered.answer += new_answer
# check if message exists and edit
q_str = 'Q' + str(q_answered.number) + ': ' + q_answered.question
content = q_str + '\n' + q_answered.answer
try:
await message.edit(content=content)
# message.content = content
except NotFound:
await ctx.author.send('Invalid question number: ' + str(num))
# delete user msg
await ctx.message.delete()
|
import os
class SignalListener:
def buy_signal(self, ticker, signal_name):
self.say("Buy " + self.split_chars(ticker) + ", " + signal_name)
def sell_signal(self, ticker, signal_name):
self.say("Sell " + self.split_chars(ticker) + ", " + signal_name)
def split_chars(self, stock_name):
return " ".join([char for char in stock_name])
def say(self, words):
print(words)
os.system('spd-say "' + words + '"')
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.db import models
class PoliticalParty(models.Model):
name = models.CharField(verbose_name=u"Naziv stranke", max_length=200)
short_name = models.CharField(verbose_name=u"Kratki naziv", max_length=50)
slug = models.SlugField(verbose_name=u"slug", max_length=200, unique=True, db_index=True)
def __unicode__(self):
return self.short_name
class Meta:
verbose_name = u"Politička stranka"
verbose_name_plural = u"Političke stranke"
def get_absolute_url(self):
return reverse('url_glavni:fps_party', kwargs={'slug': self.slug})
# INCOME_TYPES = (
# (1, u'Prihodi iz državnog proračuna'),
# (2, u'Prihodi iz proračuna jedinica lokalne i područne (regionalne) samouprave'),
# (3, u'Prihodi od imovine'),
# (4, u'Prihodi od donacija'),
# (5, u'Drugi prihodi'),
# )
class Income(models.Model):
# code = models.SmallIntegerField(verbose_name=u"Šifra", choices=INCOME_TYPES)
name = models.CharField(verbose_name=u"Naziv prihoda", max_length=200)
def __unicode__(self):
return self.name
class Meta:
verbose_name = u"Prihod"
verbose_name_plural = u"Prihodi"
class Amount(models.Model):
party = models.ForeignKey(PoliticalParty, verbose_name=u"Politička stranka", related_name='amounts')
income = models.ForeignKey(Income, verbose_name=u"Prihod")
amount = models.DecimalField(verbose_name=u"Iznos", decimal_places=2, max_digits=20, default=0)
year = models.SmallIntegerField(verbose_name=u"Godina")
def __unicode__(self):
return self.party.short_name + ", " + self.income.name + ": " + str(self.amount)
class Meta:
verbose_name = u"Iznos"
verbose_name_plural = u"Iznosi"
|
from collections import defaultdict
from databird import Repository
from databird import utils
from typing import List
import datetime as dt
import logging
from databird.queue import MultiQueue
from redis import Redis
logger = logging.getLogger("databird.runner")
def retrieve_missing(
root_dir, repos: List[Repository], redis_conn=None, is_async=True, ref_time=None
):
"""Retrieve all targets that are missing from the repositories."""
if redis_conn is None:
redis_conn = Redis()
queue = MultiQueue(redis_conn, is_async=is_async)
if ref_time is None:
ref_time = dt.datetime.now()
logger.debug("ref time is " + str(ref_time))
submitted_jobs = []
for repo in repos:
logger.debug("checking repo " + repo.name)
for context, targets in repo.iter_missing(root_dir, ref_time):
logger.debug(
"missing {} targets for {}".format(len(targets), str(context["time"]))
)
driver_name = str(type(repo.driver).__name__)
info = "Repo {} with {} for targets {} at {}".format(
repo.name, driver_name, ", ".join(targets), str(context["time"])
)
job_id = "db_" + utils.hash_dict(targets)
job = queue.submit_job(
repo.queue,
job_id,
repo.driver.retrieve_safe,
context,
targets,
description=info,
)
if job is not None:
logger.info("Sumitted job " + job_id)
submitted_jobs.append(job)
else:
status = queue.job_status(job_id)
logger.info("Job {} already in queue: {}".format(job_id, str(status)))
return submitted_jobs
|
from pyomo.environ import sqrt, log, exp
Rgas = 8.3144598 # J/mol/K
pref = 101325 # Pa
Tref = 273.15 # K
class air:
'''
Thermodynamic data of water from US Bureau of Mines
for standard pressure (101325 Pa)'''
M = 28.8503972
def cp0(T):
'''Specific heat, J/mol/K '''
return 4.184 * (9.9044 + 126531./(T*T) - 77.24/sqrt(T) + 0.00042677*T - 9.257100000000002e-8*T*T)
def H0(T):
'''Enthalpy, J/mol '''
return 4.184 * (0 + 9.9044*T + 0.000213385*T*T -126531.0/T -154.48*sqrt(T) -3.0857e-08*T*T*T + 121.0)
def S0(T):
'''Entropy, J/mol/K '''
return 4.184*(-16.28923809866967-126531.0/(2*T*T)+154.48/sqrt(T)+2*0.000213385*T+(3*-3.0857e-08*T*T)/2+9.9044*log(T))
def G0(T):
'''Gibbs free energy, J/mol '''
return air.H0(T) - T * air.S0(T)
class H2O:
'''
Thermodynamic data of water
H, S, G0, cp from US Bureau of Mines
psat from IAPWS'''
M = 18.01528
class g:
'''Vapor phase for standard pressure (101325 Pa)'''
def cp0(T):
'''Specific heat, J/mol/K '''
return 4.184 * (4.7301998 + 15523./(T*T) + 29.5200005/sqrt(T) + 0.0049613*T - 7.72539e-7*T*T)
def H0(T):
'''Enthalpy, J/mol '''
return 4.184 * (-57795.0 + 4.7301998*T + 0.00248065*T*T -15523.0/T + 59.040001*sqrt(T) -2.57513e-07*T*T*T -2591.0)
def S0(T):
'''Entropy, J/mol/K '''
return 4.184*(20.21690179199231-15523.0/(2*T*T)-59.040001/sqrt(T)+2*0.00248065*T+(3*-2.57513e-07*T*T)/2+4.7301998*log(T))
def G0(T):
'''Gibbs free energy, J/mol '''
return H2O.g.H0(T) - T * H2O.g.S0(T)
class l:
'''Liquid phase for standard pressure (101325 Pa)'''
def cp0(T):
'''Specific heat, J/mol/K '''
return 4.184 * (558.898 + 3.85798e6/(T*T) - 7248.38/sqrt(T) - 0.6867*T + 0.00045269099999999997*T*T)
def H0(T):
'''Enthalpy, J/mol '''
return 4.184 * (-68315.0 + 558.898*T -0.34335*T*T -3857980.0/T -14496.76*sqrt(T) + 0.000150897*T*T*T + 123142.0)
def S0(T):
'''Entropy, J/mol/K '''
return 4.184*(-3800.901885684364-3857980.0/(2*T*T)+14496.76/sqrt(T)+2*-0.34335*T+(3*0.000150897*T*T)/2+558.898*log(T))
def G0(T):
'''Gibbs free energy, J/mol '''
return H2O.l.H0(T) - T * H2O.l.S0(T)
def L(T):
'''Latent heat of vaporization'''
return H2O.g.H0(T)-H2O.l.H0(T)
def psat(T):
'''
Saturation pressure
IAPWS R7-97(2012) equation 30
http://www.iapws.org/relguide/IF97-Rev.pdf
https://github.com/jjgomera/iapws/blob/master/iapws/iapws97.py
https://en.wikipedia.org/wiki/Vapour_pressure_of_water'''
ni = [1167.0521452767,-724213.16703206,-17.073846940092,12020.82470247,-3232555.0322333,14.91510861353,-4823.2657361591,405113.40542057,-0.23855557567849, 650.17534844798]
v = T + ni[8] / (T - ni[9])
A = 1 * v ** 2 + ni[0] * v + ni[1]
B = ni[2] * v ** 2 + ni[3] * v + ni[4]
C = ni[5] * v ** 2 + ni[6] * v + ni[7]
return 1000000*(2 * C / (-B + (B*B - 4 * A * C) ** 0.5)) ** 4
def psatIdeal(T):
'''Saturation pressure from ideal gas assumption'''
return pref * exp((H2O.l.G0(T)-H2O.g.G0(T))/Rgas/T)
def psatBuck(T):
'''Saturation pressure from Buck equation'''
return 611.21 * exp( (19.84282 - T/234.5) * ((T - 273.15)/(-16.01 + T)) ) |
# -*- encoding: utf-8 -*-
import sys
import threading
import time
import typing
import attr
from ddtrace.internal import nogevent
from ddtrace.internal import service
class PeriodicThread(threading.Thread):
"""Periodic thread.
This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`
seconds.
"""
_ddtrace_profiling_ignore = True
def __init__(
self,
interval, # type: float
target, # type: typing.Callable[[], typing.Any]
name=None, # type: typing.Optional[str]
on_shutdown=None, # type: typing.Optional[typing.Callable[[], typing.Any]]
):
# type: (...) -> None
"""Create a periodic thread.
:param interval: The interval in seconds to wait between execution of the periodic function.
:param target: The periodic function to execute every interval.
:param name: The name of the thread.
:param on_shutdown: The function to call when the thread shuts down.
"""
super(PeriodicThread, self).__init__(name=name)
self._target = target
self._on_shutdown = on_shutdown
self.interval = interval
self.quit = threading.Event()
self.daemon = True
def stop(self):
"""Stop the thread."""
# NOTE: make sure the thread is alive before using self.quit:
# 1. self.quit is Lock-based
# 2. if we're a child trying to stop a Thread,
# the Lock might have been locked in a parent process while forking so that'd block forever
if self.is_alive():
self.quit.set()
def run(self):
"""Run the target function periodically."""
while not self.quit.wait(self.interval):
self._target()
if self._on_shutdown is not None:
self._on_shutdown()
class _GeventPeriodicThread(PeriodicThread):
"""Periodic thread.
This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval`
seconds.
"""
# That's the value Python 2 uses in its `threading` module
SLEEP_INTERVAL = 0.005
def __init__(self, interval, target, name=None, on_shutdown=None):
"""Create a periodic thread.
:param interval: The interval in seconds to wait between execution of the periodic function.
:param target: The periodic function to execute every interval.
:param name: The name of the thread.
:param on_shutdown: The function to call when the thread shuts down.
"""
super(_GeventPeriodicThread, self).__init__(interval, target, name, on_shutdown)
self._tident = None
self._periodic_started = False
self._periodic_stopped = False
def _reset_internal_locks(self, is_alive=False):
# Called by Python via `threading._after_fork`
self._periodic_stopped = True
@property
def ident(self):
return self._tident
def start(self):
"""Start the thread."""
self.quit = False
if self._tident is not None:
raise RuntimeError("threads can only be started once")
self._tident = nogevent.start_new_thread(self.run, tuple())
if nogevent.threading_get_native_id:
self._native_id = nogevent.threading_get_native_id()
# Wait for the thread to be started to avoid race conditions
while not self._periodic_started:
time.sleep(self.SLEEP_INTERVAL)
def is_alive(self):
return not self._periodic_stopped and self._periodic_started
def join(self, timeout=None):
# FIXME: handle the timeout argument
while self.is_alive():
time.sleep(self.SLEEP_INTERVAL)
def stop(self):
"""Stop the thread."""
self.quit = True
def run(self):
"""Run the target function periodically."""
# Do not use the threading._active_limbo_lock here because it's a gevent lock
threading._active[self._tident] = self
self._periodic_started = True
try:
while self.quit is False:
self._target()
slept = 0
while self.quit is False and slept < self.interval:
nogevent.sleep(self.SLEEP_INTERVAL)
slept += self.SLEEP_INTERVAL
if self._on_shutdown is not None:
self._on_shutdown()
except Exception:
# Exceptions might happen during interpreter shutdown.
# We're mimicking what `threading.Thread` does in daemon mode, we ignore them.
# See `threading.Thread._bootstrap` for details.
if sys is not None:
raise
finally:
try:
self._periodic_stopped = True
del threading._active[self._tident]
except Exception:
# Exceptions might happen during interpreter shutdown.
# We're mimicking what `threading.Thread` does in daemon mode, we ignore them.
# See `threading.Thread._bootstrap` for details.
if sys is not None:
raise
def PeriodicRealThreadClass():
# type: () -> typing.Type[PeriodicThread]
"""Return a PeriodicThread class based on the underlying thread implementation (native, gevent, etc).
The returned class works exactly like ``PeriodicThread``, except that it runs on a *real* OS thread. Be aware that
this might be tricky in e.g. the gevent case, where ``Lock`` object must not be shared with the ``MainThread``
(otherwise it'd dead lock).
"""
if nogevent.is_module_patched("threading"):
return _GeventPeriodicThread
return PeriodicThread
@attr.s(eq=False)
class PeriodicService(service.Service):
"""A service that runs periodically."""
_interval = attr.ib(type=float)
_worker = attr.ib(default=None, init=False, repr=False)
_real_thread = False
"Class variable to override if the service should run in a real OS thread."
@property
def interval(self):
# type: (...) -> float
return self._interval
@interval.setter
def interval(
self, value # type: float
):
# type: (...) -> None
self._interval = value
# Update the interval of the PeriodicThread based on ours
if self._worker:
self._worker.interval = value
def _start_service(
self,
*args, # type: typing.Any
**kwargs # type: typing.Any
):
# type: (...) -> None
"""Start the periodic service."""
periodic_thread_class = PeriodicRealThreadClass() if self._real_thread else PeriodicThread
self._worker = periodic_thread_class(
self.interval,
target=self.periodic,
name="%s:%s" % (self.__class__.__module__, self.__class__.__name__),
on_shutdown=self.on_shutdown,
)
self._worker.start()
def _stop_service(
self,
*args, # type: typing.Any
**kwargs # type: typing.Any
):
# type: (...) -> None
"""Stop the periodic collector."""
self._worker.stop()
super(PeriodicService, self)._stop_service(*args, **kwargs)
def join(
self, timeout=None # type: typing.Optional[float]
):
# type: (...) -> None
if self._worker:
self._worker.join(timeout)
@staticmethod
def on_shutdown():
pass
@staticmethod
def periodic():
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 14:16:25 2018
@author: Arpit
"""
from games.game import Game
class T3Game(Game):
WINNER_R = 1
LOSER_R = -1
DRAW_R = 0
def __init__(self, size=3, **kwargs):
super().__init__(**kwargs)
self.rows = size
self.columns = size
self.stateCnt = self.rows * self.columns if not self.isConv else (2, self.rows, self.columns)
self.actionCnt = self.rows * self.columns
def newGame(self):
super().newGame()
self.illMoves = set()
def step(self, action):
if (super().step(action) < 0):
return -1
x = int(action/self.rows)
y = action % self.columns
self.gameState[x][y] = self.toPlay
self.updateStateForm(x, y)
self.illMoves.add(action)
self.checkEndStates(x, y)
self.switchTurn()
def checkEndStates(self, row, column):
if self.xInARow(row, column, 3):
self.setWinner(self.toPlay)
return
self.checkDrawState()
def getIllMoves(self):
return list(self.illMoves)
|
'''Leap year or not'''
def leapyear(year):
if (year % 4 == 0) and (year % 100 != 0) or (year % 400==0) : #checking for leap year
print(year," is a leap year") #input is a leap year
else:
print(year," is not a leap year") #input is not a leap year
year=int(input("Enter the year: "))
while year<=999 or year>=10000: #check whether the year is four digit number or not
print("Enter a year having four digits.")
year=int(input("Enter the year: "))
leapyear(year) |
from setuptools import setup, find_packages
exec(open("ddfs/_version.py", encoding="utf-8").read())
LONG_DESC = open("README.rst", encoding="utf-8").read()
setup(
name="ddfs",
version=__version__,
description="de-dup file system",
url="Project URL (for setup.py metadata)",
long_description=LONG_DESC,
author="Tom Soulanille",
author_email="[email protected]",
license="MIT -or- Apache License 2.0",
packages=find_packages(),
install_requires=[
"trio",
],
keywords=[
# COOKIECUTTER-TRIO-TODO: add some keywords
# "async", "io", "networking", ...
],
python_requires=">=3.5",
classifiers=[
"License :: OSI Approved :: MIT License",
"License :: OSI Approved :: Apache Software License",
"Framework :: Trio",
# COOKIECUTTER-TRIO-TODO: Remove any of these classifiers that don't
# apply:
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
# COOKIECUTTER-TRIO-TODO: Consider adding trove classifiers for:
#
# - Development Status
# - Intended Audience
# - Topic
#
# For the full list of options, see:
# https://pypi.org/classifiers/
],
)
|
import os
import sys
import argparse
env_path = os.path.join(os.path.dirname(__file__), '..')
if env_path not in sys.path:
sys.path.append(env_path)
from pytracking.evaluation import Tracker
def run_video(tracker_name, tracker_param, videofile, optional_box=None, debug=None):
"""Run the tracker on your webcam.
args:
tracker_name: Name of tracking method.
tracker_param: Name of parameter file.
debug: Debug level.
"""
tracker = Tracker(tracker_name, tracker_param)
tracker.run_video(videofilepath=videofile, optional_box=optional_box, debug=debug)
def main():
parser = argparse.ArgumentParser(description='Run the tracker on your webcam.')
parser.add_argument('tracker_name', type=str, help='Name of tracking method.')
parser.add_argument('tracker_param', type=str, help='Name of parameter file.')
parser.add_argument('videofile', type=str, help='path to a video file.')
parser.add_argument('--optional_box', default=None, help='optional_box with format x,y,w,h.')
parser.add_argument('--debug', type=int, default=0, help='Debug level.')
args = parser.parse_args()
run_video(args.tracker_name, args.tracker_param,args.videofile, args.optional_box, args.debug)
if __name__ == '__main__':
main()
|
def extractEuricetteWordpressCom(item):
'''
Parser for 'euricette.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Greatest Alchemist', 'Someday Will I Be The Greatest Alchemist?', 'translated'),
('The Elf is a Freeloader', 'The Elf is a Freeloader', 'translated'),
('Stepmother', 'I Obtained a Stepmother. I Obtained a Little Brother. It Appears That Little Brother Is Not Father\'s Child, but a Scum King\'s Child, However, Don\'t Mind It Please ( ´_ゝ`)', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
import functools
from typing import Callable
import numpy as np
import pytest
from l5kit.data import AGENT_DTYPE, FRAME_DTYPE, ChunkedDataset
from l5kit.rasterization import StubRasterizer
from l5kit.sampling import generate_agent_sample
def get_partial(
cfg: dict, history_num_frames: int, history_step_size: int, future_num_frames: int, future_step_size: int
) -> Callable:
rast_params = cfg["raster_params"]
rasterizer = StubRasterizer(
rast_params["raster_size"],
np.asarray(rast_params["pixel_size"]),
np.asarray(rast_params["ego_center"]),
rast_params["filter_agents_threshold"],
)
return functools.partial(
generate_agent_sample,
raster_size=rast_params["raster_size"],
pixel_size=np.asarray(rast_params["pixel_size"]),
ego_center=np.asarray(rast_params["ego_center"]),
history_num_frames=history_num_frames,
history_step_size=history_step_size,
future_num_frames=future_num_frames,
future_step_size=future_step_size,
filter_agents_threshold=rast_params["filter_agents_threshold"],
rasterizer=rasterizer,
)
def test_no_frames(zarr_dataset: ChunkedDataset, cfg: dict) -> None:
gen_partial = get_partial(cfg, 2, 1, 4, 1)
with pytest.raises(IndexError):
gen_partial(
state_index=0, frames=np.zeros(0, FRAME_DTYPE), agents=np.zeros(0, AGENT_DTYPE), selected_track_id=None,
)
def test_out_bounds(zarr_dataset: ChunkedDataset, cfg: dict) -> None:
gen_partial = get_partial(cfg, 0, 1, 10, 1)
data = gen_partial(
state_index=0,
frames=np.asarray(zarr_dataset.frames[90:96]),
agents=zarr_dataset.agents,
selected_track_id=None,
)
assert bool(np.all(data["target_availabilities"][:5])) is True
assert bool(np.all(data["target_availabilities"][5:])) is False
def test_future(zarr_dataset: ChunkedDataset, cfg: dict) -> None:
steps = [(1, 1), (2, 2), (4, 4)] # all of these should work
for step, step_size in steps:
gen_partial = get_partial(cfg, 2, 1, step, step_size)
data = gen_partial(
state_index=10,
frames=np.asarray(zarr_dataset.frames[90:150]),
agents=zarr_dataset.agents,
selected_track_id=None,
)
assert data["target_positions"].shape == (step, 2)
assert data["target_yaws"].shape == (step, 1)
assert data["target_availabilities"].shape == (step, 1)
assert data["centroid"].shape == (2,)
assert isinstance(data["yaw"], float)
assert data["extent"].shape == (3,)
assert bool(np.all(data["target_availabilities"])) is True
|
import base64
import requests
from bleach import config
from bleach.models import pullrequest
def listPullRequests(owner, repository):
url = _getUrl(owner, repository)
headers = _getHeaders()
processedResults = []
keepFetchingResults = True
while keepFetchingResults:
response = requests.get(url, headers=headers)
if response.status_code == 404:
print("couldn't find the organization or repository")
return []
if response.status_code == 401:
print("unauthorized, check your username or password")
raise Exception("unauthorized, check your username or password")
processedResponse = response.json()
pullrequests = processedResponse['values']
processedResults.extend(
pullrequest.PullRequest(
repo=repository,
createdAt=pullrequestInfo['created_on'],
user=pullrequestInfo['author']['username'],
title=pullrequestInfo['title'],
)
for pullrequestInfo in pullrequests
)
if 'next' in processedResponse:
url = processedResponse['next']
else:
keepFetchingResults = False
return processedResults
def _getUrl(owner, repository):
URL_TEMPLATE = "https://api.bitbucket.org/2.0/repositories/{owner}/{repository}/pullrequests{params}"
params = "?state=OPEN"
url = URL_TEMPLATE.format(owner=owner, repository=repository, params=params)
return url
def _getHeaders():
headers = {}
authorizationString = "{username}:{password}".format(
username=config.CONFIG["bitbucketCloudUser"], password=config.CONFIG["bitbucketCloudPassword"])
encodedAuthorizationString = base64.b64encode(bytes(authorizationString, 'utf-8'))
headers["Authorization"] = "basic %s" % encodedAuthorizationString.decode('utf-8')
return headers
|
from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Peter", lastname="Pyatochkin", company="System Group", address="Ukraine, Kiev, Vatslava Gavela blvd., 4",
mobilephone="+38 050 777 88 99", email="[email protected]", year="1980"))
db.connection.commit()
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
db.connection.commit()
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
# coding=utf-8
import subprocess
__author__ = 'davis.peixoto'
class GitUtils(object):
cd = None
def __init__(self):
pass
def get_commits_strings(self, path):
pass
def get_tags_list(self):
pass
@staticmethod
def get_rep_name(project_repository_path):
commands = [
'cd %s' % project_repository_path,
'git remote -v | head -n1 | awk \'{print $2}\' | sed \'s/.*\///\' | sed \'s/\.git//\''
]
command = ' && '.join(str(c) for c in commands)
# execute commands
proc_get_task_list = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
output = proc_get_task_list.stdout.read()
return output.replace('\n', '')
@staticmethod
def get_log(tag_start, tag_end, project_repository_path):
commands = [
'cd %s' % project_repository_path,
'git log --source --pretty=format:"%%aN|%%aE|%%d" %s..%s | grep -E "\\(" | sort | uniq' % (
tag_start, tag_end)
]
command = ' && '.join(str(c) for c in commands)
# execute commands
proc_get_task_list = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
tasks = proc_get_task_list.stdout.read()
return [x for x in tasks.split('\n') if x]
@staticmethod
def get_task_branches(tag_start, tag_end, project_repository_path):
# make command
commands = [
'cd %s' % project_repository_path,
'git --no-pager log %s..%s --source --reverse --oneline --decorate=short | grep -o -E "[A-Z]{1,6}-[0-9]{1,6}" | sort | uniq' % (
tag_start, tag_end)
]
command = ' && '.join(str(c) for c in commands)
# execute commands
proc_get_task_list = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
tasks = proc_get_task_list.stdout.read()
return [x for x in tasks.split('\n') if x]
# proc_latest = subprocess.Popen("cd /home/davis.peixoto/projects/rentcars; git tag | sort --version-sort -r | head -n 1",
# stdout=subprocess.PIPE, shell=True)
# latest_tag = proc_latest.stdout.read()
# from http://stackoverflow.com/questions/7353054/call-a-shell-command-containing-a-pipe-from-python-and-capture-stdout
#
# p1 = subprocess.Popen(["cat", "file.log"], stdout=subprocess.PIPE)
# p2 = subprocess.Popen(["tail", "-1"], stdin=p1.stdout, stdout=subprocess.PIPE)
# p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
# output,err = p2.communicate()
# proc_previous = subprocess.Popen(
# "cd /home/davis.peixoto/projects/rentcars; git tag | sort --version-sort -r | head -n 2 | tail -n 1",
# stdout=subprocess.PIPE, shell=True)
# previous_tag = proc_previous.stdout.read()
# proc_list = subprocess.Popen(
# "cd /home/davis.peixoto/projects/rentcars; git log --pretty=format:\"%aN%x09%ai%x09%s\" " + str(
# previous_tag) + ".." + str(latest_tag) + " | grep -E -o '[A-Z]{1,6}-[0-9]{1,9}' | sort | uniq ",
# stdout=subprocess.PIPE, shell=True)
# commits_list = proc_list.stdout.read()
# the_string = "git log --pretty=format:\"%aN%x09%ai%x09%s\" `git tag | sort --version-sort -r | head -n 2 | tail -n 1`..`git tag
# | sort --version-sort -r | head -n 1` | grep -E -o '[A-Z]{1,6}-[0-9]{1,9}' | sort | uniq"
# other_string = "git log --pretty=format:\"%aN%x09%ai%x09%s\" Production..master
# | sort | grep erg | nl | grep -E '([A-Z]{1,6}-[0-9]{1,6})'"
|
from datetime import datetime
from typing import Any
from chaosplt_grpc import remote_channel
from chaosplt_grpc.scheduler.client import schedule_experiment, \
cancel_experiment
__all__ = ["SchedulerService"]
class SchedulerService:
def __init__(self, config):
grpc_config = config["grpc"]["scheduler"]
self.scheduler_addr = grpc_config["address"]
def release(self):
pass
def schedule(self, schedule_id: str, user_id: str, org_id: str,
workspace_id: str, experiment_id: str, token_id: str,
token: str, scheduled: datetime, experiment: Any,
interval: int, repeat: int, cron: str, settings: Any,
configuration: Any, secrets: Any) -> str:
"""
Schedule a new execution with the given context
Return the job identifier for the execution.
"""
with remote_channel(self.scheduler_addr) as channel:
return schedule_experiment(
channel, schedule_id, user_id, org_id, workspace_id,
experiment_id, token_id, token, scheduled, experiment,
interval, repeat, cron, settings, configuration, secrets)
def cancel(self, job_id: str):
"""
Cancel the given job so that future executions do not take place.
"""
with remote_channel(self.scheduler_addr) as channel:
return cancel_experiment(channel, job_id)
|
import numpy as np
from PIL import Image
from mmd_scripting.scratch_stuff.progprint import progprint, progclean
_SCRIPT_VERSION = "Script version: Nuthouse01 - v0.5.01 - 09/13/2020"
"""
Given a no-tattoo body and a tattoo body, and a single color, calc the transparency needed
to create the tattoo mask sitting on top of the no-tattoo body to create the tattoo body.
"""
WITHTATS = "Body_bunny2.png"
WITHOUTTATS = "Body_bunny_notats3.png"
OUTPUT = "mask.png"
# pink1 = (249, 63, 125)
# pink2 = (200, 14, 90)
MASK_COLOR = (249, 63, 125)
# alpha blending: 255=opaque
# C = (alpha * A) + ((1 - alpha) * B)
# C = alpha * (A-B) + B
# # single pixel alpha blend
# def alpha_blend(px_bg, px_fg, alpha):
# # c = (alpha * px_fg) + ((1 - alpha) * px_bg)
# c = (alpha * (px_fg - px_bg)) + px_bg
# bulk alpha blend using numpy trickery
def alpha_blend_bulk(px_bg, px_fg, alpha_list):
t = px_fg - px_bg
t2 = alpha_list * t
r = t2 + px_bg
return r
def solvemask(notats, tats, color):
print("prepping")
# turn image object into list of pixels
notats_data = list(notats.getdata())
tats_data = list(tats.getdata())
# convert to numpy float-mode
notats_data = np.array(notats_data, dtype="float64") / 255
tats_data = np.array(tats_data, dtype="float64") / 255
color = np.array(color) / 255
# for each pixel value, compare before & after to find what % of the specified color should be added
# do this by trying all 0-255 possible alpha values
# for each alpha value, do the alpha blending algorithm, get the resulting color, and compare against the "after" color
# component-wise R G B difference, squared, averaged to get mean-square-error
# select the alpha with the min MSE
# pre-calculate the float versions of the alpha values
alpha_list = np.arange(256, dtype="float64") / 255
alpha_list = alpha_list.reshape((256,1))
# this holds resulting alpha values
alpha = np.zeros(tats_data.shape[0], dtype="uint8")
# this is a temp buffer used to store the MSE
blend_list = np.zeros(256, dtype="float64")
mse = np.zeros(256, dtype="float64")
print("running")
for d, (before, after) in enumerate(zip(notats_data, tats_data)):
progprint(d / tats_data.shape[0])
# first check shortcuts
if np.array_equal(before, after):
alpha[d] = 0 # transparent
continue
if np.array_equal(before, color):
alpha[d] = 255 # opaque
continue
# now do the hard way
# calc resulting colors after multiplying with all possible alphas
blend_list = alpha_blend_bulk(before, color, alpha_list)
# now calculate the error
mse = blend_list - after
# then square
mse = np.square(mse)
# then mean
mse = np.mean(mse, axis=1)
# then find the lowest error
bestfit = np.argmin(mse)
alpha[d] = bestfit
# print(before, after)
progclean()
print("done iterating")
nonzero = np.count_nonzero(alpha)
print("mask covers %f%%" % (100 * nonzero / alpha.size))
opaque = np.count_nonzero(alpha == 255)
print("mask opaque %f%%" % (100 * opaque / alpha.size))
# # convert results from 1d array to 2d array
# alpha = alpha.reshape(tats.size)
return alpha
def main():
print("reading")
tats = Image.open(WITHTATS)
notats = Image.open(WITHOUTTATS)
assert(tats.mode == notats.mode)
assert(tats.size == notats.size)
print("notats = '%s'" % WITHOUTTATS)
print("tats = '%s'" % WITHTATS)
print("color = '%s'" % str(MASK_COLOR))
print("output = '%s'" % OUTPUT)
mask = solvemask(notats, tats, MASK_COLOR)
# then build the result image from this color
img = Image.new('RGB', tats.size, MASK_COLOR)
# build a image-object from the mask
img_mask = Image.new("L", tats.size)
img_mask.putdata(mask)
# stick the mask into the mono-color image
img.putalpha(img_mask)
img.save(OUTPUT)
print("done!")
return None
if __name__ == "__main__":
print(_SCRIPT_VERSION)
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 12:37:37 2019
@author: gaetandissez
"""
import numpy as np
import sklearn.metrics as metrics
from spherecluster import SphericalKMeans
from sklearn.cluster import KMeans
from scipy import sparse
class NMTF:
#First load and convert to numpy arrays the data
R12 = sparse.load_npz('./tmp/R12.npz').toarray()
R23 = sparse.load_npz('./tmp/R23.npz').toarray()
R34 = sparse.load_npz('./tmp/R34.npz').toarray()
R25 = sparse.load_npz('./tmp/R25.npz').toarray()
W3 = sparse.load_npz('./tmp/W3.npz').toarray()
W4 = sparse.load_npz('./tmp/W4.npz').toarray()
L3 = sparse.load_npz('./tmp/L3.npz').toarray()
L4 = sparse.load_npz('./tmp/L4.npz').toarray()
#Those matrices are called Degree matrices
D3 = L3 + W3
D4 = L4 + W4
#eps is a constant needed experimentally in update rules to make sure that the denominator is never null
eps = 1e-8
n1, n2 = R12.shape
n3, n4 = R34.shape
n5 = R25.shape[1]
def update(self, A, num, den):
return A*(num / (den + NMTF.eps))**0.5
vupdate = np.vectorize(update)
def __init__(self, init_method, parameters, mask):
self.init_method = init_method
self.K = parameters
self.M = mask
self.iter = 0
def initialize(self):
self.R12_train = np.multiply(NMTF.R12, self.M)
if self.init_method == 'random':
"""Random uniform"""
self.G1 = np.random.rand(NMTF.n1, self.K[0])
self.G2 = np.random.rand(NMTF.n2, self.K[1])
self.G3 = np.random.rand(NMTF.n3, self.K[2])
self.G4 = np.random.rand(NMTF.n4, self.K[3])
self.G5 = np.random.rand(NMTF.n5, self.K[4])
if self.init_method == 'skmeans':
"""spherical k-means"""
#Sperical k-means clustering is done on the initial data
skm1 = SphericalKMeans(n_clusters=self.K[0])
skm1.fit(self.R12_train.transpose())
skm2 = SphericalKMeans(n_clusters=self.K[1])
skm2.fit(self.R12_train)
skm3 = SphericalKMeans(n_clusters=self.K[2])
skm3.fit(NMTF.R23)
skm4 = SphericalKMeans(n_clusters=self.K[3])
skm4.fit(NMTF.R34)
skm5 = SphericalKMeans(n_clusters=self.K[4])
skm5.fit(NMTF.R25)
#Factor matrices are initialized with the center coordinates
self.G1 = skm1.cluster_centers_.transpose()
self.G2 = skm2.cluster_centers_.transpose()
self.G3 = skm3.cluster_centers_.transpose()
self.G4 = skm4.cluster_centers_.transpose()
self.G5 = skm5.cluster_centers_.transpose()
if self.init_method == 'acol':
"""random ACOL"""
#We will "shuffle" the columns of R matrices and take the mean of k batches
Num1 = np.random.permutation(NMTF.n2)
Num2 = np.random.permutation(NMTF.n1)
Num3 = np.random.permutation(NMTF.n2)
Num4 = np.random.permutation(NMTF.n3)
Num5 = np.random.permutation(NMTF.n2)
G1 = []
for l in np.array_split(Num1, self.K[0]):
G1.append(np.mean(self.R12_train[:,l], axis = 1))
self.G1 = np.array(G1).transpose()
G2 = []
for l in np.array_split(Num2, self.K[1]):
G2.append(np.mean(self.R12_train.transpose()[:,l], axis = 1))
self.G2 = np.array(G2).transpose()
G3 = []
for l in np.array_split(Num3, self.K[2]):
G3.append(np.mean(NMTF.R23.transpose()[:,l], axis = 1))
self.G3 = np.array(G3).transpose()
G4 = []
for l in np.array_split(Num4, self.K[3]):
G4.append(np.mean(NMTF.R34.transpose()[:,l], axis = 1))
self.G4 = np.array(G4).transpose()
G5 = []
for l in np.array_split(Num5, self.K[4]):
G5.append(np.mean(NMTF.R25.transpose()[:,l], axis = 1))
self.G5 = np.array(G5).transpose()
if self.init_method == 'kmeans':
"""k-means with clustering on previous item"""
#As for spherical k-means, factor matrices will be initialized with the centers of clusters.
km1 = KMeans(n_clusters=self.K[0], n_init = 10).fit_predict(self.R12_train.transpose())
km2 = KMeans(n_clusters=self.K[1], n_init = 10).fit_predict(self.R12_train)
km3 = KMeans(n_clusters=self.K[2], n_init = 10).fit_predict(self.R23)
km4 = KMeans(n_clusters=self.K[3], n_init = 10).fit_predict(self.R34)
km5 = KMeans(n_clusters=self.K[4], n_init = 10).fit_predict(self.R25)
self.G1 = np.array([np.mean([self.R12_train[:,i] for i in range(len(km1)) if km1[i] == p], axis = 0) for p in range(self.K[0])]).transpose()
self.G2 = np.array([np.mean([self.R12_train[i] for i in range(len(km2)) if km2[i] == p], axis = 0) for p in range(self.K[1])]).transpose()
self.G3 = np.array([np.mean([self.R23[i] for i in range(len(km3)) if km3[i] == p], axis = 0) for p in range(self.K[2])]).transpose()
self.G4 = np.array([np.mean([self.R34[i] for i in range(len(km4)) if km4[i] == p], axis = 0) for p in range(self.K[3])]).transpose()
self.G5 = np.array([np.mean([self.R25[i] for i in range(len(km5)) if km5[i] == p], axis = 0) for p in range(self.K[4])]).transpose()
self.S12 = np.linalg.multi_dot([self.G1.transpose(), self.R12_train, self.G2])
self.S23 = np.linalg.multi_dot([self.G2.transpose(), self.R23, self.G3])
self.S34 = np.linalg.multi_dot([self.G3.transpose(), self.R34, self.G4])
self.S25 = np.linalg.multi_dot([self.G2.transpose(), self.R25, self.G5])
def iterate(self):
#These following lines compute the matrices needed for our update rules
Gt2G2 = np.dot(self.G2.transpose(), self.G2)
G2Gt2 = np.dot(self.G2, self.G2.transpose())
G3Gt3 = np.dot(self.G3, self.G3.transpose())
Gt3G3 = np.dot(self.G3.transpose(), self.G3)
G4Gt4 = np.dot(self.G4, self.G4.transpose())
R12G2 = np.dot(self.R12_train, self.G2)
R23G3 = np.dot(NMTF.R23, self.G3)
R34G4 = np.dot(NMTF.R34, self.G4)
R25G5 = np.dot(NMTF.R25, self.G5)
W3G3 = np.dot(NMTF.W3, self.G3)
W4G4 = np.dot(NMTF.W4, self.G4)
D3G3 = np.dot(NMTF.D3, self.G3)
D4G4 = np.dot(NMTF.D4, self.G4)
G3Gt3D3G3 = np.dot(G3Gt3, D3G3)
G4Gt4D4G4 = np.dot(G4Gt4, D4G4)
G3Gt3W3G3 = np.dot(G3Gt3, W3G3)
G4Gt4W4G4 = np.dot(G4Gt4, W4G4)
R12G2St12 = np.dot(R12G2, self.S12.transpose())
G1G1tR12G2St12 = np.linalg.multi_dot([self.G1, self.G1.transpose(), R12G2St12])
Rt12G1S12 = np.linalg.multi_dot([self.R12_train.transpose(), self.G1, self.S12])
G2Gt2Rt12G1S12 = np.dot(G2Gt2, Rt12G1S12)
R23G3St23 = np.dot(R23G3, self.S23.transpose())
G2Gt2R23G3St23 = np.dot(G2Gt2, R23G3St23)
Rt23G2S23 = np.linalg.multi_dot([NMTF.R23.transpose(),self.G2, self.S23])
G3Gt3Rt23G2S23 = np.dot(G3Gt3,Rt23G2S23)
R34G4St34 = np.dot(R34G4, self.S34.transpose())
G3Gt3R34G4St34 = np.dot(G3Gt3,R34G4St34)
Rt34G3S34 = np.linalg.multi_dot([NMTF.R34.transpose(),self.G3, self.S34])
G4Gt4Rt34G3S34 = np.dot(G4Gt4,Rt34G3S34)
Rt25G2S25 = np.linalg.multi_dot([NMTF.R25.transpose(), self.G2, self.S25])
G5G5tRt25G2S25 = np.linalg.multi_dot([self.G5, self.G5.transpose(), Rt25G2S25])
R25G5St25 = np.dot(R25G5, self.S25.transpose())
G2Gt2R25G5St25 = np.dot(G2Gt2, R25G5St25)
Gt1R12G2 = np.dot(self.G1.transpose(),R12G2)
Gt2R23G3 = np.dot(self.G2.transpose(),R23G3)
Gt3R34G4 = np.dot(self.G3.transpose(),R34G4)
Gt2R25G5 = np.dot(self.G2.transpose(), R25G5)
Gt1G1S12Gt2G2 = np.linalg.multi_dot([self.G1.transpose(), self.G1, self.S12, Gt2G2])
Gt2G2S23Gt3G3 = np.linalg.multi_dot([Gt2G2, self.S23, Gt3G3])
Gt3G3S34Gt4G4 = np.linalg.multi_dot([Gt3G3, self.S34, self.G4.transpose(), self.G4])
Gt2G2S25Gt5G5 = np.linalg.multi_dot([Gt2G2, self.S25, self.G5.transpose(), self.G5])
#Here factor matrices are updated.
self.G1 = NMTF.vupdate(self, self.G1, R12G2St12, G1G1tR12G2St12)
self.G2 = NMTF.vupdate(self, self.G2, Rt12G1S12 + R23G3St23 + R25G5St25, G2Gt2Rt12G1S12 + G2Gt2R23G3St23 + G2Gt2R25G5St25)
self.G3 = NMTF.vupdate(self, self.G3, Rt23G2S23 + R34G4St34 + W3G3 + G3Gt3D3G3, G3Gt3Rt23G2S23 + G3Gt3R34G4St34 + G3Gt3W3G3 + D3G3)
self.G4 = NMTF.vupdate(self, self.G4, Rt34G3S34 + W4G4 + G4Gt4D4G4, G4Gt4Rt34G3S34 + G4Gt4W4G4 + D4G4)
self.G5 = NMTF.vupdate(self, self.G5, Rt25G2S25, G5G5tRt25G2S25)
self.S12 = NMTF.vupdate(self, self.S12, Gt1R12G2, Gt1G1S12Gt2G2)
self.S23 = NMTF.vupdate(self, self.S23, Gt2R23G3, Gt2G2S23Gt3G3)
self.S34 = NMTF.vupdate(self, self.S34, Gt3R34G4, Gt3G3S34Gt4G4)
self.S25 = NMTF.vupdate(self, self.S25, Gt2R25G5, Gt2G2S25Gt5G5)
self.iter += 1
def validate(self, metric='aps'):
n, m = NMTF.R12.shape
R12_found = np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()])
R12_2 = []
R12_found_2 = []
#We first isolate the validation set and the corresponding result
for i in range(n):
for j in range(m):
if self.M[i, j] == 0:
R12_2.append(NMTF.R12[i, j])
R12_found_2.append(R12_found[i, j])
#We can asses the quality of our output with APS or AUROC score
if metric == 'auroc':
fpr, tpr, threshold = metrics.roc_curve(R12_2, R12_found_2)
return metrics.auc(fpr, tpr)
if metric == 'aps':
return metrics.average_precision_score(R12_2, R12_found_2)
def loss(self):
Gt3L3G3 = np.linalg.multi_dot([self.G3.transpose(), NMTF.L3, self.G3])
Gt4L4G4 = np.linalg.multi_dot([self.G4.transpose(), NMTF.L4, self.G4])
J = np.linalg.norm(self.R12_train - np.linalg.multi_dot([self.G1, self.S12, self.G2.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF.R23 - np.linalg.multi_dot([self.G2, self.S23, self.G3.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF.R34 - np.linalg.multi_dot([self.G3, self.S34, self.G4.transpose()]), ord='fro')**2
J += np.linalg.norm(NMTF.R25 - np.linalg.multi_dot([self.G2, self.S25, self.G5.transpose()]), ord='fro')**2
J += np.trace(Gt3L3G3) + np.trace(Gt4L4G4)
return J
def __repr__(self):
return 'Model NMTF with (k1, k2, k3, k4, k5)=({}, {}, {}, {}, {}) and {} initialization'.format(self.K[0], self.K[1], self.K[2], self.K[3], self.K[4], self.init_method)
|
import sys
from StringIO import StringIO
from django.test import TestCase
from django.core.management import call_command
from django_extensions.tests.models import Name
from django.conf import settings
from django.db.models import loading
class DumpScriptTests(TestCase):
def setUp(self):
self.real_stdout = sys.stdout
sys.stdout = StringIO()
self.original_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.append('django_extensions.tests')
loading.cache.loaded = False
call_command('syncdb', verbosity=0)
def tearDown(self):
sys.stdout = self.real_stdout
settings.INSTALLED_APPS.remove('django_extensions.tests')
settings.INSTALLED_APPS = self.original_installed_apps
loading.cache.loaded = False
def test_runs(self):
# lame test...does it run?
n = Name(name='Gabriel')
n.save()
call_command('dumpscript', 'tests')
self.assertTrue('Gabriel' in sys.stdout.getvalue())
|
import setuptools
__version__ = "0.0.1"
__description__ = 'The package targets to help user in playing the game wordle'
__author__ = 'ASK Jennie Developer <[email protected]>'
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='guesstheword',
version=__version__,
author="ASK Jennie",
py_modules=["jennie"],
install_requires=['requests'],
entry_points={
'console_scripts': [
'wordlehelper=wordlehelper:execute'
],
},
author_email=__author__,
description= __description__,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dextrop/guesstheword",
packages=setuptools.find_packages(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
) |
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1020
# -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
entrada = int(raw_input())
for a,b in [(365,"ano(s)"),(30,"mes(es)"),(1,"dia(s)")]:
total = int(entrada/a)
print "%d %s" % (total,b)
entrada -= total*a
|
import pandas as pd
def print_stage(stage_str):
count = 100
occupied_count = len(stage_str)
separator_num = int((count - occupied_count) / 2)
separator_str = "=" * separator_num
print_str = f"{separator_str}{stage_str}{separator_str}"
print(print_str)
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def read_files(file_name, lines_constraint=None):
results = []
with open(file_name) as f:
count = 0
for line in f:
results.append(line.replace("\n", ""))
if lines_constraint:
count += 1
if count >= lines_constraint:
break
return results
def write_predictions(preds, split, name):
with open(f"./{name}.{split}.pred", "w") as f:
f.write("\n".join(preds))
def write_scores(scores, split, name):
report = {}
for k in ["1", "2", "l"]:
for m in ["precision", "recall", "f1"]:
report[f"rouge-{k}-{m}"] = [scores[f"rouge-{k}-{m[0]}"]]
df = pd.DataFrame(report)
df.to_csv(f"./{name}_{split}_score.csv", index=False) |
import os
# 调试模式
DEBUG = True
# session
SECRET_KEY = os.urandom(24)
# 本地数据密码是rootroot, 119.23.218.150的是123456
#数据库连接配置
# HOSTNAME = "119.23.218.150"
HOSTNAME = "118.190.2.84"
PORT = '3306'
DATABASE='multiLanguage'
USERNAME='root'
PASSWORD='Linghong2017'
# PASSWORD='123456'
DB_URI = 'mysql+mysqlconnector://{}:{}@{}:{}/{}?charset=utf8'.format(USERNAME,PASSWORD,HOSTNAME,PORT,DATABASE)
SQLALCHEMY_DATABASE_URI = DB_URI
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_COMMIT_ON_TEARDOWN =True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.