body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
async def test_set_direction(hass):
'Test setting the direction of the device.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_set_direction(hass, FAN_ENTITY_ID, fan.DIRECTION_REVERSE))
assert (fan.DIRECTION_REVERSE == get_entity(hass).attributes.get('direction')) | 3,764,680,505,445,146,600 | Test setting the direction of the device. | tests/components/demo/test_fan.py | test_set_direction | ActuallyRuben/home-assistant | python | async def test_set_direction(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_set_direction(hass, FAN_ENTITY_ID, fan.DIRECTION_REVERSE))
assert (fan.DIRECTION_REVERSE == get_entity(hass).attributes.get('direction')) |
async def test_set_speed(hass):
'Test setting the speed of the device.'
assert (STATE_OFF == get_entity(hass).state)
(await common.async_set_speed(hass, FAN_ENTITY_ID, fan.SPEED_LOW))
assert (fan.SPEED_LOW == get_entity(hass).attributes.get('speed')) | -2,009,984,134,437,762,600 | Test setting the speed of the device. | tests/components/demo/test_fan.py | test_set_speed | ActuallyRuben/home-assistant | python | async def test_set_speed(hass):
assert (STATE_OFF == get_entity(hass).state)
(await common.async_set_speed(hass, FAN_ENTITY_ID, fan.SPEED_LOW))
assert (fan.SPEED_LOW == get_entity(hass).attributes.get('speed')) |
async def test_oscillate(hass):
'Test oscillating the fan.'
assert (not get_entity(hass).attributes.get('oscillating'))
(await common.async_oscillate(hass, FAN_ENTITY_ID, True))
assert get_entity(hass).attributes.get('oscillating')
(await common.async_oscillate(hass, FAN_ENTITY_ID, False))
assert (not get_entity(hass).attributes.get('oscillating')) | -8,049,271,439,002,481,000 | Test oscillating the fan. | tests/components/demo/test_fan.py | test_oscillate | ActuallyRuben/home-assistant | python | async def test_oscillate(hass):
assert (not get_entity(hass).attributes.get('oscillating'))
(await common.async_oscillate(hass, FAN_ENTITY_ID, True))
assert get_entity(hass).attributes.get('oscillating')
(await common.async_oscillate(hass, FAN_ENTITY_ID, False))
assert (not get_entity(hass).attributes.get('oscillating')) |
async def test_is_on(hass):
'Test is on service call.'
assert (not fan.is_on(hass, FAN_ENTITY_ID))
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert fan.is_on(hass, FAN_ENTITY_ID) | 3,499,190,597,735,891,500 | Test is on service call. | tests/components/demo/test_fan.py | test_is_on | ActuallyRuben/home-assistant | python | async def test_is_on(hass):
assert (not fan.is_on(hass, FAN_ENTITY_ID))
(await common.async_turn_on(hass, FAN_ENTITY_ID))
assert fan.is_on(hass, FAN_ENTITY_ID) |
def load_vgg(sess, vgg_path):
'\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n '
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return (input_img, prob, layer3_o, layer4_o, layer7_o) | 2,088,851,759,729,730,000 | Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out) | main.py | load_vgg | papaispicolo/CarNDT3-SemanticSegmentation | python | def load_vgg(sess, vgg_path):
'\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n '
vgg_tag = 'vgg16'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
graph = tf.get_default_graph()
input_img = graph.get_tensor_by_name(vgg_input_tensor_name)
prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_o = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_o = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_o = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return (input_img, prob, layer3_o, layer4_o, layer7_o) |
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
'\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n '
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2, 2), padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
output = tf.add(output, vgg_layer4_out)
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2, 2), padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
output = tf.add(output, vgg_layer3_out)
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8, 8), padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001), name='nn_final_output')
return output | -8,248,518,825,660,680,000 | Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output | main.py | layers | papaispicolo/CarNDT3-SemanticSegmentation | python | def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
'\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n '
conv_1by1_l7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
output = tf.layers.conv2d_transpose(conv_1by1_l7, 512, 4, strides=(2, 2), padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
output = tf.add(output, vgg_layer4_out)
output = tf.layers.conv2d_transpose(output, 256, 4, strides=(2, 2), padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
output = tf.add(output, vgg_layer3_out)
output = tf.layers.conv2d_transpose(output, num_classes, 16, strides=(8, 8), padding='SAME', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001), name='nn_final_output')
return output |
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
'\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n '
logits = tf.reshape(nn_last_layer, ((- 1), num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_operation = optimizer.minimize(cross_entropy_loss)
return (logits, training_operation, cross_entropy_loss) | 8,150,404,043,134,320,000 | Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss) | main.py | optimize | papaispicolo/CarNDT3-SemanticSegmentation | python | def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
'\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n '
logits = tf.reshape(nn_last_layer, ((- 1), num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
training_operation = optimizer.minimize(cross_entropy_loss)
return (logits, training_operation, cross_entropy_loss) |
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate):
'\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n '
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
(_, loss) = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img, correct_label: gt_img, keep_prob: 0.7, learning_rate: 0.0005})
print('Loss of {} at epoch {}/{}'.format(loss, epoch, epochs)) | 54,481,322,062,496,040 | Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate | main.py | train_nn | papaispicolo/CarNDT3-SemanticSegmentation | python | def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate):
'\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n '
sess.run(tf.global_variables_initializer())
for epoch in range(epochs):
for (input_img, gt_img) in get_batches_fn(batch_size):
(_, loss) = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: input_img, correct_label: gt_img, keep_prob: 0.7, learning_rate: 0.0005})
print('Loss of {} at epoch {}/{}'.format(loss, epoch, epochs)) |
def load_data():
'Loads CIFAR10 dataset.\n\n Returns:\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n '
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, ('data_batch_' + str(i)))
(x_train[((i - 1) * 10000):(i * 10000), :, :, :], y_train[((i - 1) * 10000):(i * 10000)]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
(x_test, y_test) = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if (K.image_data_format() == 'channels_last'):
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return ((x_train, y_train), (x_test, y_test)) | -7,465,674,360,239,780,000 | Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. | tensorflow/python/keras/_impl/keras/datasets/cifar10.py | load_data | 252125889/tensorflow | python | def load_data():
'Loads CIFAR10 dataset.\n\n Returns:\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n '
dirname = 'cifar-10-batches-py'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.empty((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, ('data_batch_' + str(i)))
(x_train[((i - 1) * 10000):(i * 10000), :, :, :], y_train[((i - 1) * 10000):(i * 10000)]) = load_batch(fpath)
fpath = os.path.join(path, 'test_batch')
(x_test, y_test) = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if (K.image_data_format() == 'channels_last'):
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return ((x_train, y_train), (x_test, y_test)) |
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
'Set the constants for this measurement: signal names, directions, and trigger scales'
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name | -4,321,173,536,370,213,400 | Set the constants for this measurement: signal names, directions, and trigger scales | compiler/characterizer/measurements.py | set_meas_constants | ckdur/OpenRAM | python | def set_meas_constants(self, trig_name, targ_name, trig_dir_str, targ_dir_str, trig_vdd, targ_vdd):
self.trig_dir_str = trig_dir_str
self.targ_dir_str = targ_dir_str
self.trig_val_of_vdd = trig_vdd
self.targ_val_of_vdd = targ_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name |
def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
'Constructs inputs to stimulus measurement function. Variant values are inputs here.'
self.port_error_check(port)
trig_val = (self.trig_val_of_vdd * vdd_voltage)
targ_val = (self.targ_val_of_vdd * vdd_voltage)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name, trig_name, targ_name, trig_val, targ_val, self.trig_dir_str, self.targ_dir_str, trig_td, targ_td) | 8,585,018,006,580,844,000 | Constructs inputs to stimulus measurement function. Variant values are inputs here. | compiler/characterizer/measurements.py | get_measure_values | ckdur/OpenRAM | python | def get_measure_values(self, trig_td, targ_td, vdd_voltage, port=None):
self.port_error_check(port)
trig_val = (self.trig_val_of_vdd * vdd_voltage)
targ_val = (self.targ_val_of_vdd * vdd_voltage)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
return (meas_name, trig_name, targ_name, trig_val, targ_val, self.trig_dir_str, self.targ_dir_str, trig_td, targ_td) |
def set_meas_constants(self, signal_name, slew_dir_str):
'Set the values needed to generate a Spice measurement statement based on the name of the measurement.'
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if (slew_dir_str == 'RISE'):
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif (slew_dir_str == 'FALL'):
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error('Unrecognised slew measurement direction={}'.format(slew_dir_str), 1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name | 1,089,750,063,097,577,300 | Set the values needed to generate a Spice measurement statement based on the name of the measurement. | compiler/characterizer/measurements.py | set_meas_constants | ckdur/OpenRAM | python | def set_meas_constants(self, signal_name, slew_dir_str):
self.trig_dir_str = slew_dir_str
self.targ_dir_str = slew_dir_str
if (slew_dir_str == 'RISE'):
self.trig_val_of_vdd = 0.1
self.targ_val_of_vdd = 0.9
elif (slew_dir_str == 'FALL'):
self.trig_val_of_vdd = 0.9
self.targ_val_of_vdd = 0.1
else:
debug.error('Unrecognised slew measurement direction={}'.format(slew_dir_str), 1)
self.trig_name_no_port = signal_name
self.targ_name_no_port = signal_name |
def set_meas_constants(self, power_type):
'Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)'
self.power_type = power_type | -4,338,757,808,079,665,000 | Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall) | compiler/characterizer/measurements.py | set_meas_constants | ckdur/OpenRAM | python | def set_meas_constants(self, power_type):
self.power_type = power_type |
def get_measure_values(self, t_initial, t_final, port=None):
'Constructs inputs to stimulus measurement function. Variant values are inputs here.'
self.port_error_check(port)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
else:
meas_name = self.name
return (meas_name, t_initial, t_final) | -9,196,840,615,746,831,000 | Constructs inputs to stimulus measurement function. Variant values are inputs here. | compiler/characterizer/measurements.py | get_measure_values | ckdur/OpenRAM | python | def get_measure_values(self, t_initial, t_final, port=None):
self.port_error_check(port)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
else:
meas_name = self.name
return (meas_name, t_initial, t_final) |
def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
'Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)'
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name | 3,653,245,460,261,028,400 | Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall) | compiler/characterizer/measurements.py | set_meas_constants | ckdur/OpenRAM | python | def set_meas_constants(self, trig_name, targ_name, trig_dir_str, trig_vdd):
self.trig_dir_str = trig_dir_str
self.trig_val_of_vdd = trig_vdd
self.trig_name_no_port = trig_name
self.targ_name_no_port = targ_name |
def get_measure_values(self, trig_td, vdd_voltage, port=None):
'Constructs inputs to stimulus measurement function. Variant values are inputs here.'
self.port_error_check(port)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = (self.trig_val_of_vdd * vdd_voltage)
return (meas_name, trig_name, targ_name, trig_voltage, self.trig_dir_str, trig_td) | -1,249,913,863,619,797,000 | Constructs inputs to stimulus measurement function. Variant values are inputs here. | compiler/characterizer/measurements.py | get_measure_values | ckdur/OpenRAM | python | def get_measure_values(self, trig_td, vdd_voltage, port=None):
self.port_error_check(port)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
trig_name = self.trig_name_no_port.format(port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
trig_name = self.trig_name_no_port
targ_name = self.targ_name_no_port
trig_voltage = (self.trig_val_of_vdd * vdd_voltage)
return (meas_name, trig_name, targ_name, trig_voltage, self.trig_dir_str, trig_td) |
def set_meas_constants(self, targ_name):
'Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall)'
self.targ_name_no_port = targ_name | -5,146,723,263,897,668,000 | Sets values useful for power simulations. This value is only meta related to the lib file (rise/fall) | compiler/characterizer/measurements.py | set_meas_constants | ckdur/OpenRAM | python | def set_meas_constants(self, targ_name):
self.targ_name_no_port = targ_name |
def get_measure_values(self, time_at, port=None):
'Constructs inputs to stimulus measurement function. Variant values are inputs here.'
self.port_error_check(port)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name, targ_name, time_at) | 6,943,203,446,779,191,000 | Constructs inputs to stimulus measurement function. Variant values are inputs here. | compiler/characterizer/measurements.py | get_measure_values | ckdur/OpenRAM | python | def get_measure_values(self, time_at, port=None):
self.port_error_check(port)
if (port != None):
meas_name = '{}{}'.format(self.name, port)
targ_name = self.targ_name_no_port.format(port)
else:
meas_name = self.name
targ_name = self.targ_name_no_port
return (meas_name, targ_name, time_at) |
def write_version_py(filename: str='popmon/version.py') -> None:
'Write package version to version.py.\n\n This will ensure that the version in version.py is in sync with us.\n\n :param filename: The version.py to write too.\n :type filename: str\n '
version_str = '"""THIS FILE IS AUTO-GENERATED BY SETUP.PY."""\n\nname = "{name!s}"\nversion = "{version!s}"\nfull_version = "{full_version!s}"\nrelease = {is_release!s}\n'
with open(filename, 'w') as version_file:
version_file.write(version_str.format(name=NAME.lower(), version=VERSION, full_version=FULL_VERSION, is_release=(not DEV))) | 1,453,976,218,261,424,000 | Write package version to version.py.
This will ensure that the version in version.py is in sync with us.
:param filename: The version.py to write too.
:type filename: str | setup.py | write_version_py | stephanecollot/popmon | python | def write_version_py(filename: str='popmon/version.py') -> None:
'Write package version to version.py.\n\n This will ensure that the version in version.py is in sync with us.\n\n :param filename: The version.py to write too.\n :type filename: str\n '
version_str = '"THIS FILE IS AUTO-GENERATED BY SETUP.PY."\n\nname = "{name!s}"\nversion = "{version!s}"\nfull_version = "{full_version!s}"\nrelease = {is_release!s}\n'
with open(filename, 'w') as version_file:
version_file.write(version_str.format(name=NAME.lower(), version=VERSION, full_version=FULL_VERSION, is_release=(not DEV))) |
def setup_package() -> None:
'The main setup method.\n\n It is responsible for setting up and installing the package.\n '
write_version_py()
setup(name=NAME, version=VERSION, url='https://github.com/ing-bank/popmon', license='MIT', author='ING Wholesale Banking Advanced Analytics', description='Monitor the stability of a pandas or spark dataset', keywords='pandas spark data-science data-analysis monitoring statistics python jupyter ipython', long_description=long_description, long_description_content_type='text/x-rst', python_requires='>=3.6', packages=find_packages(), install_requires=REQUIREMENTS, classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent'], package_data=dict(popmon=['visualization/templates/*.html', 'visualization/templates/assets/css/*.css', 'visualization/templates/assets/js/*.js', 'test_data/*.csv.gz', 'test_data/*.json*', 'notebooks/popmon*tutorial*.ipynb']), entry_points={'console_scripts': ['popmon_run = popmon.pipeline.amazing_pipeline:run']}) | -8,776,056,129,339,171,000 | The main setup method.
It is responsible for setting up and installing the package. | setup.py | setup_package | stephanecollot/popmon | python | def setup_package() -> None:
'The main setup method.\n\n It is responsible for setting up and installing the package.\n '
write_version_py()
setup(name=NAME, version=VERSION, url='https://github.com/ing-bank/popmon', license='MIT', author='ING Wholesale Banking Advanced Analytics', description='Monitor the stability of a pandas or spark dataset', keywords='pandas spark data-science data-analysis monitoring statistics python jupyter ipython', long_description=long_description, long_description_content_type='text/x-rst', python_requires='>=3.6', packages=find_packages(), install_requires=REQUIREMENTS, classifiers=['Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent'], package_data=dict(popmon=['visualization/templates/*.html', 'visualization/templates/assets/css/*.css', 'visualization/templates/assets/js/*.js', 'test_data/*.csv.gz', 'test_data/*.json*', 'notebooks/popmon*tutorial*.ipynb']), entry_points={'console_scripts': ['popmon_run = popmon.pipeline.amazing_pipeline:run']}) |
def load_fairsharing(force_download: bool=False, use_tqdm: bool=True, **kwargs):
'Get the FAIRsharing registry.'
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file) | -7,010,215,355,243,540,000 | Get the FAIRsharing registry. | src/fairsharing_client/api.py | load_fairsharing | cthoyt/fairsharing-client | python | def load_fairsharing(force_download: bool=False, use_tqdm: bool=True, **kwargs):
path = ensure_fairsharing(force_download=force_download, use_tqdm=use_tqdm, **kwargs)
with path.open() as file:
return yaml.safe_load(file) |
def ensure_fairsharing(force_download: bool=False, use_tqdm: bool=True, **kwargs):
'Get the FAIRsharing registry.'
if (PATH.exists() and (not force_download)):
return PATH
client = FairsharingClient(**kwargs)
rv = {row['prefix']: row for row in tqdm(client.iter_records(), unit_scale=True, unit='record', desc='Downloading FAIRsharing', disable=(not use_tqdm))}
with PATH.open('w') as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH | -3,742,049,016,979,473,000 | Get the FAIRsharing registry. | src/fairsharing_client/api.py | ensure_fairsharing | cthoyt/fairsharing-client | python | def ensure_fairsharing(force_download: bool=False, use_tqdm: bool=True, **kwargs):
if (PATH.exists() and (not force_download)):
return PATH
client = FairsharingClient(**kwargs)
rv = {row['prefix']: row for row in tqdm(client.iter_records(), unit_scale=True, unit='record', desc='Downloading FAIRsharing', disable=(not use_tqdm))}
with PATH.open('w') as file:
yaml.safe_dump(rv, file, allow_unicode=True, sort_keys=True)
return PATH |
def __init__(self, login: Optional[str]=None, password: Optional[str]=None, base_url: Optional[str]=None):
'Instantiate the client and get an appropriate JWT token.\n\n :param login: FAIRsharing username\n :param password: Corresponding FAIRsharing password\n :param base_url: The base URL\n '
self.base_url = (base_url or 'https://api.fairsharing.org')
self.signin_url = f'{self.base_url}/users/sign_in'
self.records_url = f'{self.base_url}/fairsharing_records'
self.username = pystow.get_config('fairsharing', 'login', passthrough=login, raise_on_missing=True)
self.password = pystow.get_config('fairsharing', 'password', passthrough=password, raise_on_missing=True)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': f'Bearer {self.jwt}'}) | -7,216,728,358,557,764,000 | Instantiate the client and get an appropriate JWT token.
:param login: FAIRsharing username
:param password: Corresponding FAIRsharing password
:param base_url: The base URL | src/fairsharing_client/api.py | __init__ | cthoyt/fairsharing-client | python | def __init__(self, login: Optional[str]=None, password: Optional[str]=None, base_url: Optional[str]=None):
'Instantiate the client and get an appropriate JWT token.\n\n :param login: FAIRsharing username\n :param password: Corresponding FAIRsharing password\n :param base_url: The base URL\n '
self.base_url = (base_url or 'https://api.fairsharing.org')
self.signin_url = f'{self.base_url}/users/sign_in'
self.records_url = f'{self.base_url}/fairsharing_records'
self.username = pystow.get_config('fairsharing', 'login', passthrough=login, raise_on_missing=True)
self.password = pystow.get_config('fairsharing', 'password', passthrough=password, raise_on_missing=True)
self.jwt = self.get_jwt()
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': f'Bearer {self.jwt}'}) |
def get_jwt(self) -> str:
'Get the JWT.'
payload = {'user': {'login': self.username, 'password': self.password}}
res = requests.post(self.signin_url, json=payload).json()
return res['jwt'] | 2,762,539,216,933,605,400 | Get the JWT. | src/fairsharing_client/api.py | get_jwt | cthoyt/fairsharing-client | python | def get_jwt(self) -> str:
payload = {'user': {'login': self.username, 'password': self.password}}
res = requests.post(self.signin_url, json=payload).json()
return res['jwt'] |
def iter_records(self) -> Iterable[Mapping[(str, Any)]]:
'Iterate over all FAIRsharing records.'
(yield from self._iter_records_helper(self.records_url)) | -8,303,350,746,535,719,000 | Iterate over all FAIRsharing records. | src/fairsharing_client/api.py | iter_records | cthoyt/fairsharing-client | python | def iter_records(self) -> Iterable[Mapping[(str, Any)]]:
(yield from self._iter_records_helper(self.records_url)) |
def load_reference(path_to_reference):
'Load Reference reference relevant passages\n Args:path_to_reference (str): path to a file to load.\n Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).\n '
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids | 7,859,981,109,900,859,000 | Load Reference reference relevant passages
Args:path_to_reference (str): path to a file to load.
Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints). | demo/search/src/eval/evaluation.py | load_reference | meta-soul/MetaSpore | python | def load_reference(path_to_reference):
'Load Reference reference relevant passages\n Args:path_to_reference (str): path to a file to load.\n Returns:qids_to_relevant_passageids (dict): dictionary mapping from query_id (int) to relevant passages (list of ints).\n '
with open(path_to_reference, 'r') as f:
qids_to_relevant_passageids = load_reference_from_stream(f)
return qids_to_relevant_passageids |
def load_candidate(path_to_candidate):
'Load candidate data from a file.\n Args:path_to_candidate (str): path to file to load.\n Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance\n '
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages | 2,387,017,372,047,628,300 | Load candidate data from a file.
Args:path_to_candidate (str): path to file to load.
Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance | demo/search/src/eval/evaluation.py | load_candidate | meta-soul/MetaSpore | python | def load_candidate(path_to_candidate):
'Load candidate data from a file.\n Args:path_to_candidate (str): path to file to load.\n Returns:qid_to_ranked_candidate_passages (dict): dictionary mapping from query_id (int) to a list of 1000 passage ids(int) ranked by relevance and importance\n '
with open(path_to_candidate, 'r') as f:
qid_to_ranked_candidate_passages = load_candidate_from_stream(f)
return qid_to_ranked_candidate_passages |
def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
'Perform quality checks on the dictionaries\n Args:\n p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping\n Dict as read in with load_reference or load_reference_from_stream\n p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates\n Returns:\n bool,str: Boolean whether allowed, message to be shown in case of a problem\n '
message = ''
allowed = True
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
for qid in qids_to_ranked_candidate_passages:
duplicate_pids = set([item for (item, count) in Counter(qids_to_ranked_candidate_passages[qid]).items() if (count > 1)])
if (len((duplicate_pids - set([0]))) > 0):
message = 'Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}'.format(qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return (allowed, message) | 3,041,526,021,280,315,400 | Perform quality checks on the dictionaries
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
bool,str: Boolean whether allowed, message to be shown in case of a problem | demo/search/src/eval/evaluation.py | quality_checks_qids | meta-soul/MetaSpore | python | def quality_checks_qids(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
'Perform quality checks on the dictionaries\n Args:\n p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping\n Dict as read in with load_reference or load_reference_from_stream\n p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates\n Returns:\n bool,str: Boolean whether allowed, message to be shown in case of a problem\n '
message =
allowed = True
candidate_set = set(qids_to_ranked_candidate_passages.keys())
ref_set = set(qids_to_relevant_passageids.keys())
for qid in qids_to_ranked_candidate_passages:
duplicate_pids = set([item for (item, count) in Counter(qids_to_ranked_candidate_passages[qid]).items() if (count > 1)])
if (len((duplicate_pids - set([0]))) > 0):
message = 'Cannot rank a passage multiple times for a single query. QID={qid}, PID={pid}'.format(qid=qid, pid=list(duplicate_pids)[0])
allowed = False
return (allowed, message) |
def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"Compute MRR metric\n Args:\n p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping\n Dict as read in with load_reference or load_reference_from_stream\n p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates\n Returns:\n dict: dictionary of metrics {'MRR': <MRR Score>}\n "
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if (qid in qids_to_relevant_passageids):
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if (candidate_pid[i] in target_pid):
MRR += (1.0 / (i + 1))
ranking.pop()
ranking.append((i + 1))
break
for (i, pid) in enumerate(candidate_pid):
if (pid in target_pid):
recall_q_all.add(qid)
if (i < 50):
recall_q_top50.add(qid)
if (i == 0):
recall_q_top1.add(qid)
break
if (len(ranking) == 0):
raise IOError('No matching QIDs found. Are you sure you are scoring the evaluation set?')
MRR = (MRR / len(qids_to_relevant_passageids))
recall_top1 = ((len(recall_q_top1) * 1.0) / len(qids_to_relevant_passageids))
recall_top50 = ((len(recall_q_top50) * 1.0) / len(qids_to_relevant_passageids))
recall_all = ((len(recall_q_all) * 1.0) / len(qids_to_relevant_passageids))
all_scores['MRR@10'] = MRR
all_scores['recall@1'] = recall_top1
all_scores['recall@50'] = recall_top50
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores | -6,520,825,046,772,404,000 | Compute MRR metric
Args:
p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping
Dict as read in with load_reference or load_reference_from_stream
p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates
Returns:
dict: dictionary of metrics {'MRR': <MRR Score>} | demo/search/src/eval/evaluation.py | compute_metrics | meta-soul/MetaSpore | python | def compute_metrics(qids_to_relevant_passageids, qids_to_ranked_candidate_passages):
"Compute MRR metric\n Args:\n p_qids_to_relevant_passageids (dict): dictionary of query-passage mapping\n Dict as read in with load_reference or load_reference_from_stream\n p_qids_to_ranked_candidate_passages (dict): dictionary of query-passage candidates\n Returns:\n dict: dictionary of metrics {'MRR': <MRR Score>}\n "
all_scores = {}
MRR = 0
qids_with_relevant_passages = 0
ranking = []
recall_q_top1 = set()
recall_q_top50 = set()
recall_q_all = set()
for qid in qids_to_ranked_candidate_passages:
if (qid in qids_to_relevant_passageids):
ranking.append(0)
target_pid = qids_to_relevant_passageids[qid]
candidate_pid = qids_to_ranked_candidate_passages[qid]
for i in range(0, MaxMRRRank):
if (candidate_pid[i] in target_pid):
MRR += (1.0 / (i + 1))
ranking.pop()
ranking.append((i + 1))
break
for (i, pid) in enumerate(candidate_pid):
if (pid in target_pid):
recall_q_all.add(qid)
if (i < 50):
recall_q_top50.add(qid)
if (i == 0):
recall_q_top1.add(qid)
break
if (len(ranking) == 0):
raise IOError('No matching QIDs found. Are you sure you are scoring the evaluation set?')
MRR = (MRR / len(qids_to_relevant_passageids))
recall_top1 = ((len(recall_q_top1) * 1.0) / len(qids_to_relevant_passageids))
recall_top50 = ((len(recall_q_top50) * 1.0) / len(qids_to_relevant_passageids))
recall_all = ((len(recall_q_all) * 1.0) / len(qids_to_relevant_passageids))
all_scores['MRR@10'] = MRR
all_scores['recall@1'] = recall_top1
all_scores['recall@50'] = recall_top50
all_scores['QueriesRanked'] = len(qids_to_ranked_candidate_passages)
return all_scores |
def main():
'Command line:\n python result_eval.py <path_to_reference_file> <path_to_candidate_file>\n '
if (len(sys.argv) == 3):
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json) | 208,726,705,023,159,900 | Command line:
python result_eval.py <path_to_reference_file> <path_to_candidate_file> | demo/search/src/eval/evaluation.py | main | meta-soul/MetaSpore | python | def main():
'Command line:\n python result_eval.py <path_to_reference_file> <path_to_candidate_file>\n '
if (len(sys.argv) == 3):
path_to_reference = sys.argv[1]
path_to_candidate = sys.argv[2]
else:
print('Usage: result_eval.py <reference ranking> <candidate ranking>')
exit()
metrics = compute_metrics_from_files(path_to_reference, path_to_candidate)
result = dict()
for metric in sorted(metrics):
result[metric] = metrics[metric]
result_json = json.dumps(result)
print(result_json) |
def __init__(self, data, time, timestamp=True):
'\n I parametri di input sono\n - "data": il dato che si vuole memorizzare (di qualsiasi natura)\n - "time": l\'informazione temporale associata al dato (numero intero)\n - "timestamp": flag booleana. Se vero, il campo "time" e\' un timestamp; altrimenti,\n e\' un intervallo di tempo\n '
try:
time = int(time)
except:
raise TypeError('"time" parameter is invalid. It must be an integer number')
self.data = data
self.time = time
self.timestamp = (True if timestamp else False) | 4,228,763,057,017,822,700 | I parametri di input sono
- "data": il dato che si vuole memorizzare (di qualsiasi natura)
- "time": l'informazione temporale associata al dato (numero intero)
- "timestamp": flag booleana. Se vero, il campo "time" e' un timestamp; altrimenti,
e' un intervallo di tempo | timed_structures.py | __init__ | gavalle94/P2P-Sim | python | def __init__(self, data, time, timestamp=True):
'\n I parametri di input sono\n - "data": il dato che si vuole memorizzare (di qualsiasi natura)\n - "time": l\'informazione temporale associata al dato (numero intero)\n - "timestamp": flag booleana. Se vero, il campo "time" e\' un timestamp; altrimenti,\n e\' un intervallo di tempo\n '
try:
time = int(time)
except:
raise TypeError('"time" parameter is invalid. It must be an integer number')
self.data = data
self.time = time
self.timestamp = (True if timestamp else False) |
def get_data(self):
'\n Ritorna il campo "data"\n '
return self.data | 85,169,138,149,347,760 | Ritorna il campo "data" | timed_structures.py | get_data | gavalle94/P2P-Sim | python | def get_data(self):
'\n \n '
return self.data |
def get_time(self):
'\n Ritorna il campo "time"\n '
return self.time | 4,622,078,850,810,152,000 | Ritorna il campo "time" | timed_structures.py | get_time | gavalle94/P2P-Sim | python | def get_time(self):
'\n \n '
return self.time |
def __init__(self, timestamp=True, empty=True):
'\n La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un \n intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False, \n un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0)\n '
self._list = []
self.timestamp = (timestamp is True)
if (not empty):
self.append(TimedData(0, 0, self.timestamp)) | 4,569,864,705,304,169,500 | La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un
intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False,
un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0) | timed_structures.py | __init__ | gavalle94/P2P-Sim | python | def __init__(self, timestamp=True, empty=True):
'\n La flag "timestamp" serve per specificare se la lista contiene dati con un timestamp (True) oppure un \n intervallo temporale (False) associato: la flag "empty" permette invece di creare, se settata a False, \n un TimedArray contenente al suo interno un nodo di partenza (d = 0, t = 0)\n '
self._list = []
self.timestamp = (timestamp is True)
if (not empty):
self.append(TimedData(0, 0, self.timestamp)) |
def get_list(self):
'\n Ritorna l\'elenco di oggetti "TimedData", memorizzati come lista\n '
return self._list | -8,256,815,946,186,110,000 | Ritorna l'elenco di oggetti "TimedData", memorizzati come lista | timed_structures.py | get_list | gavalle94/P2P-Sim | python | def get_list(self):
'\n Ritorna l\'elenco di oggetti "TimedData", memorizzati come lista\n '
return self._list |
def get_data_list(self):
'\n Ritorna gli attributi "data" di ogni elemento del vettore, sottoforma di lista\n '
return map((lambda x: x.get_data()), self._list) | 5,630,010,020,618,524,000 | Ritorna gli attributi "data" di ogni elemento del vettore, sottoforma di lista | timed_structures.py | get_data_list | gavalle94/P2P-Sim | python | def get_data_list(self):
'\n \n '
return map((lambda x: x.get_data()), self._list) |
def get_time_list(self):
'\n Ritorna gli attributi "time" di ogni elemento del vettore, sottoforma di lista\n '
return map((lambda x: x.get_time()), self._list) | 1,463,834,359,463,817,000 | Ritorna gli attributi "time" di ogni elemento del vettore, sottoforma di lista | timed_structures.py | get_time_list | gavalle94/P2P-Sim | python | def get_time_list(self):
'\n \n '
return map((lambda x: x.get_time()), self._list) |
def has_time_intervals(self):
'\n Ritorna True se gli elementi del vettore hanno associato un intervallo temporale\n '
return (self.timestamp is False) | -6,681,286,669,567,149,000 | Ritorna True se gli elementi del vettore hanno associato un intervallo temporale | timed_structures.py | has_time_intervals | gavalle94/P2P-Sim | python | def has_time_intervals(self):
'\n \n '
return (self.timestamp is False) |
def append(self, item):
'\n Aggiungo un elemento alla lista\n '
if (not isinstance(item, TimedData)):
raise TypeError('cannot add a non-"TimedData" object to a "TimedArray" list')
if (item.timestamp != self.timestamp):
raise ValueError(('"item" parameter is invalid: its "timestamp" attribute must be equal to %s' % self.timestamp))
self._list.append(item) | -585,549,019,964,779,300 | Aggiungo un elemento alla lista | timed_structures.py | append | gavalle94/P2P-Sim | python | def append(self, item):
'\n \n '
if (not isinstance(item, TimedData)):
raise TypeError('cannot add a non-"TimedData" object to a "TimedArray" list')
if (item.timestamp != self.timestamp):
raise ValueError(('"item" parameter is invalid: its "timestamp" attribute must be equal to %s' % self.timestamp))
self._list.append(item) |
def remove(self, item):
'\n Questa funzione rimuove "item" (se presente) dall\'array\n '
if (not isinstance(item, TimedData)):
raise TypeError('the item to remove must be a "TimedData" object')
if (item in self._list):
self._list.remove(item) | 8,417,262,223,646,293,000 | Questa funzione rimuove "item" (se presente) dall'array | timed_structures.py | remove | gavalle94/P2P-Sim | python | def remove(self, item):
'\n Questa funzione rimuove "item" (se presente) dall\'array\n '
if (not isinstance(item, TimedData)):
raise TypeError('the item to remove must be a "TimedData" object')
if (item in self._list):
self._list.remove(item) |
def remove_all(self, items):
'\n Questa funzione permette di rimuovere un elenco di oggetti "TimedData"\n '
if (not isinstance(items, (list, tuple))):
raise TypeError('"items" parameter must be an array')
try:
for x in items:
self.remove(x)
except TypeError:
raise TypeError('the items list must contain only "TimedData" objects') | 6,566,842,987,476,713,000 | Questa funzione permette di rimuovere un elenco di oggetti "TimedData" | timed_structures.py | remove_all | gavalle94/P2P-Sim | python | def remove_all(self, items):
'\n \n '
if (not isinstance(items, (list, tuple))):
raise TypeError('"items" parameter must be an array')
try:
for x in items:
self.remove(x)
except TypeError:
raise TypeError('the items list must contain only "TimedData" objects') |
def filter(self, f):
'\n Questa funzione applica la funzione f per filtrare il contenuto del vettore\n '
res = TimedArray(self.timestamp, empty=True)
res._list = filter(f, self._list)
return res | 154,201,250,522,479 | Questa funzione applica la funzione f per filtrare il contenuto del vettore | timed_structures.py | filter | gavalle94/P2P-Sim | python | def filter(self, f):
'\n \n '
res = TimedArray(self.timestamp, empty=True)
res._list = filter(f, self._list)
return res |
def filter_data_range(self, start, end):
'\n La funzione filtra il vettore per range di valori "Data"\n '
return self.filter((lambda x: (start <= x.get_data() <= end))) | -7,030,954,649,003,596,000 | La funzione filtra il vettore per range di valori "Data" | timed_structures.py | filter_data_range | gavalle94/P2P-Sim | python | def filter_data_range(self, start, end):
'\n \n '
return self.filter((lambda x: (start <= x.get_data() <= end))) |
def filter_time_range(self, start, end):
'\n La funzione filtra il vettore per range di valori "Data"\n '
return self.filter((lambda x: (start <= x.get_time() <= end))) | -147,114,222,026,584,930 | La funzione filtra il vettore per range di valori "Data" | timed_structures.py | filter_time_range | gavalle94/P2P-Sim | python | def filter_time_range(self, start, end):
'\n \n '
return self.filter((lambda x: (start <= x.get_time() <= end))) |
def search(self, to_search):
'\n Funzione di ricerca all\'interno del contenuto del vettore.\n Se "timestamp" e\' True, la chiave per la ricerca e\' il timestamp: altrimenti,\n la chiave diventa il contenuto a cui e\' associato l\'intervallo temporale.\n '
if self.timestamp:
res = self.search_by_time(to_search)
else:
res = self.search_by_data(to_search)
return res | 7,965,807,605,075,835,000 | Funzione di ricerca all'interno del contenuto del vettore.
Se "timestamp" e' True, la chiave per la ricerca e' il timestamp: altrimenti,
la chiave diventa il contenuto a cui e' associato l'intervallo temporale. | timed_structures.py | search | gavalle94/P2P-Sim | python | def search(self, to_search):
'\n Funzione di ricerca all\'interno del contenuto del vettore.\n Se "timestamp" e\' True, la chiave per la ricerca e\' il timestamp: altrimenti,\n la chiave diventa il contenuto a cui e\' associato l\'intervallo temporale.\n '
if self.timestamp:
res = self.search_by_time(to_search)
else:
res = self.search_by_data(to_search)
return res |
def search_by_data(self, to_search):
'\n Funzione di ricerca per campo "data", all\'interno del vettore\n '
research = (lambda x: (x.data == to_search))
return filter(research, self._list) | -6,827,395,980,579,480,000 | Funzione di ricerca per campo "data", all'interno del vettore | timed_structures.py | search_by_data | gavalle94/P2P-Sim | python | def search_by_data(self, to_search):
'\n Funzione di ricerca per campo "data", all\'interno del vettore\n '
research = (lambda x: (x.data == to_search))
return filter(research, self._list) |
def search_by_datas(self, search_params):
'\n Funzione di ricerca per campo "data", all\'interno del vettore: il parametro di ricerca e\' un vettore\n '
if (not isinstance(search_params, (list, tuple))):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
result = []
for x in search_params:
tmp = self.search_by_data(x)
for t in tmp:
result.append(t)
return result | 4,315,657,999,801,148,000 | Funzione di ricerca per campo "data", all'interno del vettore: il parametro di ricerca e' un vettore | timed_structures.py | search_by_datas | gavalle94/P2P-Sim | python | def search_by_datas(self, search_params):
'\n Funzione di ricerca per campo "data", all\'interno del vettore: il parametro di ricerca e\' un vettore\n '
if (not isinstance(search_params, (list, tuple))):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
result = []
for x in search_params:
tmp = self.search_by_data(x)
for t in tmp:
result.append(t)
return result |
def search_by_time(self, to_search):
'\n Funzione di ricerca per campo "time", all\'interno del vettore\n Il parametro "toSearch" deve essere un numero intero\n '
if (not isinstance(to_search, (int, long))):
raise TypeError('the research parameter must be an integer number (timestamp)')
research = (lambda x: (x.time == to_search))
return filter(research, self._list) | -6,471,579,091,854,855,000 | Funzione di ricerca per campo "time", all'interno del vettore
Il parametro "toSearch" deve essere un numero intero | timed_structures.py | search_by_time | gavalle94/P2P-Sim | python | def search_by_time(self, to_search):
'\n Funzione di ricerca per campo "time", all\'interno del vettore\n Il parametro "toSearch" deve essere un numero intero\n '
if (not isinstance(to_search, (int, long))):
raise TypeError('the research parameter must be an integer number (timestamp)')
research = (lambda x: (x.time == to_search))
return filter(research, self._list) |
def search_by_times(self, search_params):
'\n Funzione di ricerca per campo "time", all\'interno del vettore: il parametro di ricerca e\' un vettore\n '
if (not isinstance(search_params, (list, tuple))):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
result = []
for x in search_params:
tmp = self.search_by_time(x)
for t in tmp:
result.append(t)
return result | 2,318,858,354,737,550,000 | Funzione di ricerca per campo "time", all'interno del vettore: il parametro di ricerca e' un vettore | timed_structures.py | search_by_times | gavalle94/P2P-Sim | python | def search_by_times(self, search_params):
'\n Funzione di ricerca per campo "time", all\'interno del vettore: il parametro di ricerca e\' un vettore\n '
if (not isinstance(search_params, (list, tuple))):
raise TypeError('"searchParams" parameter is invalid. It must be an array')
result = []
for x in search_params:
tmp = self.search_by_time(x)
for t in tmp:
result.append(t)
return result |
def contains(self, to_search):
'\n La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca \n "toSearch" specificata, produce risultati\n '
return (len(self.search(to_search)) > 0) | -942,463,144,239,270,000 | La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca
"toSearch" specificata, produce risultati | timed_structures.py | contains | gavalle94/P2P-Sim | python | def contains(self, to_search):
'\n La funzione mi dice se la ricerca nel vettore, sulla base della chiave di ricerca \n "toSearch" specificata, produce risultati\n '
return (len(self.search(to_search)) > 0) |
def update(self, to_search, new_value):
'\n Questa funzione aggiorna il contenuto degli elementi del vettore che\n soddisfano il criterio di ricerca specificato\n - "toSearch" e\' la chiave di ricerca\n - "newValue" e\' il valore aggiornato da inserire\n '
items = self.search(to_search)
if self.timestamp:
def update_function(x):
x.data = new_value
else:
def update_function(x):
x.time = new_value
map(update_function, items) | 2,321,393,015,156,305,400 | Questa funzione aggiorna il contenuto degli elementi del vettore che
soddisfano il criterio di ricerca specificato
- "toSearch" e' la chiave di ricerca
- "newValue" e' il valore aggiornato da inserire | timed_structures.py | update | gavalle94/P2P-Sim | python | def update(self, to_search, new_value):
'\n Questa funzione aggiorna il contenuto degli elementi del vettore che\n soddisfano il criterio di ricerca specificato\n - "toSearch" e\' la chiave di ricerca\n - "newValue" e\' il valore aggiornato da inserire\n '
items = self.search(to_search)
if self.timestamp:
def update_function(x):
x.data = new_value
else:
def update_function(x):
x.time = new_value
map(update_function, items) |
@click.group()
def cli():
'\n Click entry point: vue-cli commands group\n By convention all new cli has a cli function with a pass statement\n '
pass | -7,861,764,993,612,917,000 | Click entry point: vue-cli commands group
By convention all new cli has a cli function with a pass statement | python_vuejs/vuejs.py | cli | Timtech4u/python-vuejs | python | @click.group()
def cli():
'\n Click entry point: vue-cli commands group\n By convention all new cli has a cli function with a pass statement\n '
pass |
@cli.command()
def vuecheck():
'\n Check if node > 5 and npm > 3 are installed\n '
if VueJs.node_check():
click.echo(click.style('Found node and npm', fg='green'))
else:
click.echo(click.style('Missing node and npm installation', fg='red')) | -9,014,432,238,725,884,000 | Check if node > 5 and npm > 3 are installed | python_vuejs/vuejs.py | vuecheck | Timtech4u/python-vuejs | python | @cli.command()
def vuecheck():
'\n \n '
if VueJs.node_check():
click.echo(click.style('Found node and npm', fg='green'))
else:
click.echo(click.style('Missing node and npm installation', fg='red')) |
@cli.command()
def installvuecli():
'\n Install vue-cli\n '
if VueJs.vue_cli_check():
click.echo(click.style('Found valid vue-cli', fg='green'))
else:
VueJs.install_cli()
click.echo(click.style('Installed vue-cli globally', fg='green')) | 6,865,345,044,462,891,000 | Install vue-cli | python_vuejs/vuejs.py | installvuecli | Timtech4u/python-vuejs | python | @cli.command()
def installvuecli():
'\n \n '
if VueJs.vue_cli_check():
click.echo(click.style('Found valid vue-cli', fg='green'))
else:
VueJs.install_cli()
click.echo(click.style('Installed vue-cli globally', fg='green')) |
@cli.command()
@click.argument('project')
def startvueapp(project):
'\n Init vue project via vue-cli\n '
result = VueJsBuilder.startproject(project)
click.echo(click.style(result.message, fg=result.color)) | -6,052,704,749,312,351,000 | Init vue project via vue-cli | python_vuejs/vuejs.py | startvueapp | Timtech4u/python-vuejs | python | @cli.command()
@click.argument('project')
def startvueapp(project):
'\n \n '
result = VueJsBuilder.startproject(project)
click.echo(click.style(result.message, fg=result.color)) |
@cli.command()
def vuedev():
'\n Run frontend dev server via npm\n '
VueJs.dev() | 1,504,815,223,673,680,400 | Run frontend dev server via npm | python_vuejs/vuejs.py | vuedev | Timtech4u/python-vuejs | python | @cli.command()
def vuedev():
'\n \n '
VueJs.dev() |
@cli.command()
def vuebuild():
'\n Build Vue.js project via npm\n '
VueJs.build() | 1,892,445,388,804,446,700 | Build Vue.js project via npm | python_vuejs/vuejs.py | vuebuild | Timtech4u/python-vuejs | python | @cli.command()
def vuebuild():
'\n \n '
VueJs.build() |
@staticmethod
def node_check():
'\n Node and npm version checker\n '
node_ver = check_output('node -v'.split()).decode('utf-8').rsplit('.')[0]
npm_ver = check_output('npm -v'.split()).decode('utf-8').rsplit('.')[0]
return all([(node_ver > 'v5'), (npm_ver >= '4')]) | 7,613,313,706,571,738,000 | Node and npm version checker | python_vuejs/vuejs.py | node_check | Timtech4u/python-vuejs | python | @staticmethod
def node_check():
'\n \n '
node_ver = check_output('node -v'.split()).decode('utf-8').rsplit('.')[0]
npm_ver = check_output('npm -v'.split()).decode('utf-8').rsplit('.')[0]
return all([(node_ver > 'v5'), (npm_ver >= '4')]) |
@staticmethod
def vue_cli_check():
'\n vue-cli version checker\n '
try:
return check_output('vue -V'.split()).decode('utf-8').rsplit('.')[0]
except OSError:
return False | 8,401,622,990,614,264,000 | vue-cli version checker | python_vuejs/vuejs.py | vue_cli_check | Timtech4u/python-vuejs | python | @staticmethod
def vue_cli_check():
'\n \n '
try:
return check_output('vue -V'.split()).decode('utf-8').rsplit('.')[0]
except OSError:
return False |
def rot_matrix(theta):
'\n rot_matrix(theta)\n 2D rotation matrix for theta in radians\n returns numpy matrix\n '
(c, s) = (np.cos(theta), np.sin(theta))
return np.matrix([[c, (- s)], [s, c]]) | -2,153,688,713,178,053,600 | rot_matrix(theta)
2D rotation matrix for theta in radians
returns numpy matrix | schmidt_funcs.py | rot_matrix | johnarban/arban | python | def rot_matrix(theta):
'\n rot_matrix(theta)\n 2D rotation matrix for theta in radians\n returns numpy matrix\n '
(c, s) = (np.cos(theta), np.sin(theta))
return np.matrix([[c, (- s)], [s, c]]) |
def rectangle(c, w, h, angle=0, center=True):
'\n create rotated rectangle\n for input into PIL ImageDraw.polygon\n to make a rectangle polygon mask\n\n Rectagle is created and rotated with center\n at zero, and then translated to center position\n\n accepters centers\n Default : center\n tl, tr, bl, br\n '
(cx, cy) = c
x = (((- w) / 2.0), ((+ w) / 2.0), ((+ w) / 2.0), ((- w) / 2.0))
y = (((+ h) / 2.0), ((+ h) / 2.0), ((- h) / 2.0), ((- h) / 2.0))
if (center is not True):
if (center[0] == 'b'):
cy = (cy + (h / 2.0))
else:
cy = (cy - (h / 2.0))
if (center[1] == 'l'):
cx = (cx + (w / 2.0))
else:
cx = (cx - (w / 2.0))
R = rot_matrix(((angle * np.pi) / 180.0))
c = []
for i in range(4):
(xr, yr) = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
c.append(((cx + xr), (cy + yr)))
return c | 7,472,526,589,179,137,000 | create rotated rectangle
for input into PIL ImageDraw.polygon
to make a rectangle polygon mask
Rectagle is created and rotated with center
at zero, and then translated to center position
accepters centers
Default : center
tl, tr, bl, br | schmidt_funcs.py | rectangle | johnarban/arban | python | def rectangle(c, w, h, angle=0, center=True):
'\n create rotated rectangle\n for input into PIL ImageDraw.polygon\n to make a rectangle polygon mask\n\n Rectagle is created and rotated with center\n at zero, and then translated to center position\n\n accepters centers\n Default : center\n tl, tr, bl, br\n '
(cx, cy) = c
x = (((- w) / 2.0), ((+ w) / 2.0), ((+ w) / 2.0), ((- w) / 2.0))
y = (((+ h) / 2.0), ((+ h) / 2.0), ((- h) / 2.0), ((- h) / 2.0))
if (center is not True):
if (center[0] == 'b'):
cy = (cy + (h / 2.0))
else:
cy = (cy - (h / 2.0))
if (center[1] == 'l'):
cx = (cx + (w / 2.0))
else:
cx = (cx - (w / 2.0))
R = rot_matrix(((angle * np.pi) / 180.0))
c = []
for i in range(4):
(xr, yr) = np.dot(R, np.asarray([x[i], y[i]])).A.ravel()
c.append(((cx + xr), (cy + yr)))
return c |
def comp(arr):
'\n returns the compressed version\n of the input array if it is a\n numpy MaskedArray\n '
try:
return arr.compressed()
except:
return arr | -4,376,504,528,940,478,000 | returns the compressed version
of the input array if it is a
numpy MaskedArray | schmidt_funcs.py | comp | johnarban/arban | python | def comp(arr):
'\n returns the compressed version\n of the input array if it is a\n numpy MaskedArray\n '
try:
return arr.compressed()
except:
return arr |
def mavg(arr, n=2, mode='valid'):
'\n returns the moving average of an array.\n returned array is shorter by (n-1)\n '
if (len(arr) > 400):
return signal.fftconvolve(arr, ([(1.0 / float(n))] * n), mode=mode)
else:
return signal.convolve(arr, ([(1.0 / float(n))] * n), mode=mode) | -4,092,778,539,070,365,700 | returns the moving average of an array.
returned array is shorter by (n-1) | schmidt_funcs.py | mavg | johnarban/arban | python | def mavg(arr, n=2, mode='valid'):
'\n returns the moving average of an array.\n returned array is shorter by (n-1)\n '
if (len(arr) > 400):
return signal.fftconvolve(arr, ([(1.0 / float(n))] * n), mode=mode)
else:
return signal.convolve(arr, ([(1.0 / float(n))] * n), mode=mode) |
def mgeo(arr, n=2):
"\n Returns array of lenth len(arr) - (n-1)\n\n # # written by me\n # # slower for short loops\n # # faster for n ~ len(arr) and large arr\n a = []\n for i in xrange(len(arr)-(n-1)):\n a.append(stats.gmean(arr[i:n+i]))\n\n # # Original method# #\n # # written by me ... ~10x faster for short arrays\n b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)\n for i in xrange(n)])\n return np.product(b,axis=0)[n-1:-n]**(1./float(n))\n "
a = []
for i in range((len(arr) - (n - 1))):
a.append(stats.gmean(arr[i:(n + i)]))
return np.asarray(a) | 2,791,907,089,048,375,000 | Returns array of lenth len(arr) - (n-1)
# # written by me
# # slower for short loops
# # faster for n ~ len(arr) and large arr
a = []
for i in xrange(len(arr)-(n-1)):
a.append(stats.gmean(arr[i:n+i]))
# # Original method# #
# # written by me ... ~10x faster for short arrays
b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)
for i in xrange(n)])
return np.product(b,axis=0)[n-1:-n]**(1./float(n)) | schmidt_funcs.py | mgeo | johnarban/arban | python | def mgeo(arr, n=2):
"\n Returns array of lenth len(arr) - (n-1)\n\n # # written by me\n # # slower for short loops\n # # faster for n ~ len(arr) and large arr\n a = []\n for i in xrange(len(arr)-(n-1)):\n a.append(stats.gmean(arr[i:n+i]))\n\n # # Original method# #\n # # written by me ... ~10x faster for short arrays\n b = np.array([np.roll(np.pad(arr,(0,n),mode='constant',constant_values=1),i)\n for i in xrange(n)])\n return np.product(b,axis=0)[n-1:-n]**(1./float(n))\n "
a = []
for i in range((len(arr) - (n - 1))):
a.append(stats.gmean(arr[i:(n + i)]))
return np.asarray(a) |
def avg(arr, n=2):
'\n NOT a general averaging function\n return bin centers (lin and log)\n '
diff = np.diff(arr)
if np.allclose(diff, diff[::(- 1)]):
return mavg(arr, n=n)
else:
return np.power(10.0, mavg(np.log10(arr), n=n)) | -197,540,613,873,403,330 | NOT a general averaging function
return bin centers (lin and log) | schmidt_funcs.py | avg | johnarban/arban | python | def avg(arr, n=2):
'\n NOT a general averaging function\n return bin centers (lin and log)\n '
diff = np.diff(arr)
if np.allclose(diff, diff[::(- 1)]):
return mavg(arr, n=n)
else:
return np.power(10.0, mavg(np.log10(arr), n=n)) |
def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'\n llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)\n get values evenly spaced in linear or log spaced\n n [10] -- Optional -- number of steps\n log [false] : switch for log spacing\n dx : spacing for linear bins\n dex : spacing for log bins (in base 10)\n dx and dex override n\n '
(xmin, xmax) = (float(xmin), float(xmax))
nisNone = (n is None)
dxisNone = (dx is None)
dexisNone = (dex is None)
if ((nisNone & dxisNone) & dexisNone):
print('Error: Defaulting to 10 linears steps')
n = 10.0
nisNone = False
log = (log or (dxisNone and (not dexisNone)))
if log:
if (xmin == 0):
print('log(0) is -inf. xmin must be > 0 for log spacing')
(xmin, xmax) = (np.log10(xmin), np.log10(xmax))
if (not nisNone):
if (log and dexisNone):
dex = ((xmax - xmin) / n)
elif ((not log) and dxisNone):
dx = ((xmax - xmin) / n)
if log:
return np.power(10, np.arange(xmin, (xmax + dex), dex))
else:
return np.arange(xmin, (xmax + dx), dx) | 1,981,684,264,025,986,300 | llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)
get values evenly spaced in linear or log spaced
n [10] -- Optional -- number of steps
log [false] : switch for log spacing
dx : spacing for linear bins
dex : spacing for log bins (in base 10)
dx and dex override n | schmidt_funcs.py | llspace | johnarban/arban | python | def llspace(xmin, xmax, n=None, log=False, dx=None, dex=None):
'\n llspace(xmin, xmax, n = None, log = False, dx = None, dex = None)\n get values evenly spaced in linear or log spaced\n n [10] -- Optional -- number of steps\n log [false] : switch for log spacing\n dx : spacing for linear bins\n dex : spacing for log bins (in base 10)\n dx and dex override n\n '
(xmin, xmax) = (float(xmin), float(xmax))
nisNone = (n is None)
dxisNone = (dx is None)
dexisNone = (dex is None)
if ((nisNone & dxisNone) & dexisNone):
print('Error: Defaulting to 10 linears steps')
n = 10.0
nisNone = False
log = (log or (dxisNone and (not dexisNone)))
if log:
if (xmin == 0):
print('log(0) is -inf. xmin must be > 0 for log spacing')
(xmin, xmax) = (np.log10(xmin), np.log10(xmax))
if (not nisNone):
if (log and dexisNone):
dex = ((xmax - xmin) / n)
elif ((not log) and dxisNone):
dx = ((xmax - xmin) / n)
if log:
return np.power(10, np.arange(xmin, (xmax + dex), dex))
else:
return np.arange(xmin, (xmax + dx), dx) |
def nametoradec(name):
'\n Get names formatted as\n hhmmss.ss+ddmmss to Decimal Degree\n only works for dec > 0 (splits on +, not -)\n Will fix this eventually...\n '
if ('string' not in str(type(name))):
rightascen = []
declinatio = []
for n in name:
(ra, de) = n.split('+')
ra = ((((((ra[0:2] + ':') + ra[2:4]) + ':') + ra[4:6]) + '.') + ra[6:8])
de = ((((de[0:2] + ':') + de[2:4]) + ':') + de[4:6])
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return (np.array(rightascen), np.array(declinatio))
else:
(ra, de) = name.split('+')
ra = ((((((ra[0:2] + ':') + ra[2:4]) + ':') + ra[4:6]) + '.') + ra[6:8])
de = ((((de[0:2] + ':') + de[2:4]) + ':') + de[4:6])
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return (np.array(coord.ra.value), np.array(coord.dec.value)) | -6,515,821,912,911,989,000 | Get names formatted as
hhmmss.ss+ddmmss to Decimal Degree
only works for dec > 0 (splits on +, not -)
Will fix this eventually... | schmidt_funcs.py | nametoradec | johnarban/arban | python | def nametoradec(name):
'\n Get names formatted as\n hhmmss.ss+ddmmss to Decimal Degree\n only works for dec > 0 (splits on +, not -)\n Will fix this eventually...\n '
if ('string' not in str(type(name))):
rightascen = []
declinatio = []
for n in name:
(ra, de) = n.split('+')
ra = ((((((ra[0:2] + ':') + ra[2:4]) + ':') + ra[4:6]) + '.') + ra[6:8])
de = ((((de[0:2] + ':') + de[2:4]) + ':') + de[4:6])
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
rightascen.append(coord.ra.value)
declinatio.append(coord.dec.value)
return (np.array(rightascen), np.array(declinatio))
else:
(ra, de) = name.split('+')
ra = ((((((ra[0:2] + ':') + ra[2:4]) + ':') + ra[4:6]) + '.') + ra[6:8])
de = ((((de[0:2] + ':') + de[2:4]) + ':') + de[4:6])
coord = SkyCoord(ra, de, frame='icrs', unit=('hourangle', 'degree'))
return (np.array(coord.ra.value), np.array(coord.dec.value)) |
def get_ext(extmap, errmap, extwcs, ra, de):
'\n Get the extinction (errors) for a particular position or\n list of positions\n More generally get the value (error) for a particular\n position given a wcs and world coordinates\n '
try:
(xp, yp) = extwcs.all_world2pix(np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
(xp, yp) = WCS(extwcs).all_world2pix(np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[(yp[int(round(i))], xp[int(round(i))])])
if (errmap is not None):
err.append(errmap[(yp[int(round(i))], xp[int(round(i))])])
except IndexError:
ext.append(np.nan)
if (errmap is not None):
err.append(np.nan)
if (errmap is not None):
return (np.array(ext), np.array(err))
else:
return (np.array(ext), None) | -8,182,216,514,830,697,000 | Get the extinction (errors) for a particular position or
list of positions
More generally get the value (error) for a particular
position given a wcs and world coordinates | schmidt_funcs.py | get_ext | johnarban/arban | python | def get_ext(extmap, errmap, extwcs, ra, de):
'\n Get the extinction (errors) for a particular position or\n list of positions\n More generally get the value (error) for a particular\n position given a wcs and world coordinates\n '
try:
(xp, yp) = extwcs.all_world2pix(np.array([ra]).flatten(), np.array([de]).flatten(), 0)
except:
(xp, yp) = WCS(extwcs).all_world2pix(np.array([ra]).flatten(), np.array([de]).flatten(), 0)
ext = []
err = []
for i in range(len(np.array(xp))):
try:
ext.append(extmap[(yp[int(round(i))], xp[int(round(i))])])
if (errmap is not None):
err.append(errmap[(yp[int(round(i))], xp[int(round(i))])])
except IndexError:
ext.append(np.nan)
if (errmap is not None):
err.append(np.nan)
if (errmap is not None):
return (np.array(ext), np.array(err))
else:
return (np.array(ext), None) |
def pdf(values, bins):
'\n ** Normalized differential area function. **\n (statistical) probability denisty function\n normalized so that the integral is 1\n and. The integral over a range is the\n probability of the value is within\n that range.\n\n Returns array of size len(bins)-1\n Plot versus bins[:-1]\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, x) = np.histogram(values, bins=bins, range=range, density=False)
pdf = (h / (np.sum(h, dtype=float) * np.diff(x)))
return (pdf, avg(x)) | 1,778,812,167,562,346,800 | ** Normalized differential area function. **
(statistical) probability denisty function
normalized so that the integral is 1
and. The integral over a range is the
probability of the value is within
that range.
Returns array of size len(bins)-1
Plot versus bins[:-1] | schmidt_funcs.py | pdf | johnarban/arban | python | def pdf(values, bins):
'\n ** Normalized differential area function. **\n (statistical) probability denisty function\n normalized so that the integral is 1\n and. The integral over a range is the\n probability of the value is within\n that range.\n\n Returns array of size len(bins)-1\n Plot versus bins[:-1]\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, x) = np.histogram(values, bins=bins, range=range, density=False)
pdf = (h / (np.sum(h, dtype=float) * np.diff(x)))
return (pdf, avg(x)) |
def pdf2(values, bins):
'\n The ~ PDF normalized so that\n the integral is equal to the\n total amount of a quantity.\n The integral over a range is the\n total amount within that range.\n\n Returns array of size len(bins)-1\n Plot versus bins[:-1]\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(pdf, x) = np.histogram(values, bins=bins, range=range, density=False)
pdf = (pdf.astype(float) / np.diff(x))
return (pdf, avg(x)) | -9,115,071,158,060,257,000 | The ~ PDF normalized so that
the integral is equal to the
total amount of a quantity.
The integral over a range is the
total amount within that range.
Returns array of size len(bins)-1
Plot versus bins[:-1] | schmidt_funcs.py | pdf2 | johnarban/arban | python | def pdf2(values, bins):
'\n The ~ PDF normalized so that\n the integral is equal to the\n total amount of a quantity.\n The integral over a range is the\n total amount within that range.\n\n Returns array of size len(bins)-1\n Plot versus bins[:-1]\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(pdf, x) = np.histogram(values, bins=bins, range=range, density=False)
pdf = (pdf.astype(float) / np.diff(x))
return (pdf, avg(x)) |
def cdf(values, bins):
'\n (statistical) cumulative distribution function\n Integral on [-inf, b] is the fraction below b.\n CDF is invariant to binning.\n This assumes you are using the entire range in the binning.\n Returns array of size len(bins)\n Plot versus bins[:-1]\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, bins) = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum((h / np.sum(h, dtype=float)))
return (np.append(0, c), bins) | -1,919,396,916,063,731,500 | (statistical) cumulative distribution function
Integral on [-inf, b] is the fraction below b.
CDF is invariant to binning.
This assumes you are using the entire range in the binning.
Returns array of size len(bins)
Plot versus bins[:-1] | schmidt_funcs.py | cdf | johnarban/arban | python | def cdf(values, bins):
'\n (statistical) cumulative distribution function\n Integral on [-inf, b] is the fraction below b.\n CDF is invariant to binning.\n This assumes you are using the entire range in the binning.\n Returns array of size len(bins)\n Plot versus bins[:-1]\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, bins) = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum((h / np.sum(h, dtype=float)))
return (np.append(0, c), bins) |
def cdf2(values, bins):
'\n # # Exclusively for area_function which needs to be unnormalized\n (statistical) cumulative distribution function\n Value at b is total amount below b.\n CDF is invariante to binning\n\n Plot versus bins[:-1]\n Not normalized to 1\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, bins) = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return (np.append(0.0, c), bins) | -3,582,090,697,934,871,000 | # # Exclusively for area_function which needs to be unnormalized
(statistical) cumulative distribution function
Value at b is total amount below b.
CDF is invariante to binning
Plot versus bins[:-1]
Not normalized to 1 | schmidt_funcs.py | cdf2 | johnarban/arban | python | def cdf2(values, bins):
'\n # # Exclusively for area_function which needs to be unnormalized\n (statistical) cumulative distribution function\n Value at b is total amount below b.\n CDF is invariante to binning\n\n Plot versus bins[:-1]\n Not normalized to 1\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, bins) = np.histogram(values, bins=bins, range=range, density=False)
c = np.cumsum(h).astype(float)
return (np.append(0.0, c), bins) |
def area_function(extmap, bins):
'\n Complimentary CDF for cdf2 (not normalized to 1)\n Value at b is total amount above b.\n '
(c, bins) = cdf2(extmap, bins)
return ((c.max() - c), bins) | 2,923,124,846,594,189,300 | Complimentary CDF for cdf2 (not normalized to 1)
Value at b is total amount above b. | schmidt_funcs.py | area_function | johnarban/arban | python | def area_function(extmap, bins):
'\n Complimentary CDF for cdf2 (not normalized to 1)\n Value at b is total amount above b.\n '
(c, bins) = cdf2(extmap, bins)
return ((c.max() - c), bins) |
def diff_area_function(extmap, bins, scale=1):
'\n See pdf2\n '
(s, bins) = area_function(extmap, bins)
dsdx = ((- np.diff(s)) / np.diff(bins))
return ((dsdx * scale), avg(bins)) | -843,582,300,145,384,000 | See pdf2 | schmidt_funcs.py | diff_area_function | johnarban/arban | python | def diff_area_function(extmap, bins, scale=1):
'\n \n '
(s, bins) = area_function(extmap, bins)
dsdx = ((- np.diff(s)) / np.diff(bins))
return ((dsdx * scale), avg(bins)) |
def log_diff_area_function(extmap, bins):
'\n See pdf2\n '
(s, bins) = diff_area_function(extmap, bins)
g = (s > 0)
dlnsdlnx = (np.diff(np.log(s[g])) / np.diff(np.log(bins[g])))
return (dlnsdlnx, avg(bins[g])) | 2,197,432,665,241,749,200 | See pdf2 | schmidt_funcs.py | log_diff_area_function | johnarban/arban | python | def log_diff_area_function(extmap, bins):
'\n \n '
(s, bins) = diff_area_function(extmap, bins)
g = (s > 0)
dlnsdlnx = (np.diff(np.log(s[g])) / np.diff(np.log(bins[g])))
return (dlnsdlnx, avg(bins[g])) |
def mass_function(values, bins, scale=1, aktomassd=183):
'\n M(>Ak), mass weighted complimentary cdf\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, bins) = np.histogram(values, bins=bins, range=range, density=False, weights=((values * aktomassd) * scale))
c = np.cumsum(h).astype(float)
return ((c.max() - c), bins) | -7,993,964,941,922,672,000 | M(>Ak), mass weighted complimentary cdf | schmidt_funcs.py | mass_function | johnarban/arban | python | def mass_function(values, bins, scale=1, aktomassd=183):
'\n \n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(h, bins) = np.histogram(values, bins=bins, range=range, density=False, weights=((values * aktomassd) * scale))
c = np.cumsum(h).astype(float)
return ((c.max() - c), bins) |
def hist(values, bins, err=False, density=False, **kwargs):
'\n really just a wrapper for numpy.histogram\n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(hist, x) = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if ((err is None) or (err is False)):
return (hist.astype(np.float), avg(x))
else:
return (hist.astype(np.float), avg(x), np.sqrt(hist)) | -5,347,693,388,444,016,000 | really just a wrapper for numpy.histogram | schmidt_funcs.py | hist | johnarban/arban | python | def hist(values, bins, err=False, density=False, **kwargs):
'\n \n '
if hasattr(bins, '__getitem__'):
range = (np.nanmin(bins), np.nanmax(bins))
else:
range = None
(hist, x) = np.histogram(values, bins=bins, range=range, density=density, **kwargs)
if ((err is None) or (err is False)):
return (hist.astype(np.float), avg(x))
else:
return (hist.astype(np.float), avg(x), np.sqrt(hist)) |
def bootstrap(X, X_err=None, n=None, smooth=False):
'\n (smooth) bootstrap\n bootstrap(X,Xerr,n,smooth=True)\n X : array to be resampled\n X_err [optional]: errors to perturb data for smooth bootstrap\n only provide is doing smooth bootstrapping\n n : number of samples. Default - len(X)\n smooth: optionally use smooth bootstrapping.\n will be set to False if no X_err is provided\n '
if (X_err is None):
smooth = False
if (n is None):
n = len(X)
resample_i = np.random.randint(0, len(X), size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, np.asarray(X_err)[resample_i])
return X_resample | -7,011,435,233,662,548,000 | (smooth) bootstrap
bootstrap(X,Xerr,n,smooth=True)
X : array to be resampled
X_err [optional]: errors to perturb data for smooth bootstrap
only provide is doing smooth bootstrapping
n : number of samples. Default - len(X)
smooth: optionally use smooth bootstrapping.
will be set to False if no X_err is provided | schmidt_funcs.py | bootstrap | johnarban/arban | python | def bootstrap(X, X_err=None, n=None, smooth=False):
'\n (smooth) bootstrap\n bootstrap(X,Xerr,n,smooth=True)\n X : array to be resampled\n X_err [optional]: errors to perturb data for smooth bootstrap\n only provide is doing smooth bootstrapping\n n : number of samples. Default - len(X)\n smooth: optionally use smooth bootstrapping.\n will be set to False if no X_err is provided\n '
if (X_err is None):
smooth = False
if (n is None):
n = len(X)
resample_i = np.random.randint(0, len(X), size=(n,))
X_resample = np.asarray(X)[resample_i]
if smooth:
X_resample = np.random.normal(X_resample, np.asarray(X_err)[resample_i])
return X_resample |
def alpha_ML(data, xmin, xmax):
'\n uses maximum likelihood to estimation\n to determine power-law and error\n From Clauset et al. 2010\n '
data = data[np.isfinite(data)]
data = data[((data >= xmin) & (data <= xmax))]
alpha = (1 + (len(data) * (np.sum(np.log((data / xmin))) ** (- 1))))
error = ((alpha - 1) / np.sqrt(len(data)))
N = len(data)
loglike = (((N * np.log((alpha - 1))) - (N * np.log(xmin))) - (alpha * np.sum(np.log((data / xmin)))))
return (alpha, error, loglike, xmin, xmax) | -285,406,338,309,995,040 | uses maximum likelihood to estimation
to determine power-law and error
From Clauset et al. 2010 | schmidt_funcs.py | alpha_ML | johnarban/arban | python | def alpha_ML(data, xmin, xmax):
'\n uses maximum likelihood to estimation\n to determine power-law and error\n From Clauset et al. 2010\n '
data = data[np.isfinite(data)]
data = data[((data >= xmin) & (data <= xmax))]
alpha = (1 + (len(data) * (np.sum(np.log((data / xmin))) ** (- 1))))
error = ((alpha - 1) / np.sqrt(len(data)))
N = len(data)
loglike = (((N * np.log((alpha - 1))) - (N * np.log(xmin))) - (alpha * np.sum(np.log((data / xmin)))))
return (alpha, error, loglike, xmin, xmax) |
def surfd(X, Xmap, bins, Xerr=None, Xmaperr=None, boot=False, scale=1.0, return_err=False, smooth=False):
'\n call: surfd(X, map, bins,\n xerr = None, merr = None, scale = 1.)\n calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx\n so it is independent of whether dx or dlog(x)\n '
if boot:
n = np.histogram(bootstrap(X, Xerr, smooth=True), bins=bins, range=(bins.min(), bins.max()))[0]
s = (np.histogram(bootstrap(Xmap, Xmaperr, smooth=True), bins=bins, range=(bins.min(), bins.max()))[0] * scale)
else:
n = np.histogram(X, bins=bins, range=(bins.min(), bins.max()))[0]
s = (np.histogram(Xmap, bins=bins, range=(bins.min(), bins.max()))[0] * scale)
if (not return_err):
return (n / s)
else:
return ((n / s), ((n / s) * np.sqrt(((1.0 / n) - (scale / s))))) | 8,136,631,929,769,016,000 | call: surfd(X, map, bins,
xerr = None, merr = None, scale = 1.)
calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx
so it is independent of whether dx or dlog(x) | schmidt_funcs.py | surfd | johnarban/arban | python | def surfd(X, Xmap, bins, Xerr=None, Xmaperr=None, boot=False, scale=1.0, return_err=False, smooth=False):
'\n call: surfd(X, map, bins,\n xerr = None, merr = None, scale = 1.)\n calculates H(X)/H(M) = Nx pdf(x) dx / Nm pdf(m) dm ; dm = dx\n so it is independent of whether dx or dlog(x)\n '
if boot:
n = np.histogram(bootstrap(X, Xerr, smooth=True), bins=bins, range=(bins.min(), bins.max()))[0]
s = (np.histogram(bootstrap(Xmap, Xmaperr, smooth=True), bins=bins, range=(bins.min(), bins.max()))[0] * scale)
else:
n = np.histogram(X, bins=bins, range=(bins.min(), bins.max()))[0]
s = (np.histogram(Xmap, bins=bins, range=(bins.min(), bins.max()))[0] * scale)
if (not return_err):
return (n / s)
else:
return ((n / s), ((n / s) * np.sqrt(((1.0 / n) - (scale / s))))) |
def alpha(y, x, err=None, return_kappa=False, cov=False):
'\n this returns -1*alpha, and optionally kappa and errors\n '
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list((a1 & a2)))
y = np.log(y[a])
x = np.log(x[a])
if (err is None):
(p, covar) = np.polyfit(x, y, 1, cov=True)
(m, b) = p
(me, be) = np.sqrt(np.sum((covar * [[1, 0], [0, 1]]), axis=1))
(me, be)
else:
err = err[a]
err = (err / y)
(p, covar) = np.polyfit(x, y, 1, w=(1.0 / (err ** 2)), cov=True)
(m, b) = p
(me, be) = np.sqrt(np.sum((covar * [[1, 0], [0, 1]]), axis=1))
(me, be)
if return_kappa:
if cov:
return (m, np.exp(b), me, be)
else:
return (m, np.exp(b))
elif cov:
return (m, me)
else:
return m | 732,717,894,788,148,200 | this returns -1*alpha, and optionally kappa and errors | schmidt_funcs.py | alpha | johnarban/arban | python | def alpha(y, x, err=None, return_kappa=False, cov=False):
'\n \n '
a1 = set(np.nonzero(np.multiply(x, y))[0])
a2 = set(np.where(np.isfinite(np.add(x, y, err)))[0])
a = np.asarray(list((a1 & a2)))
y = np.log(y[a])
x = np.log(x[a])
if (err is None):
(p, covar) = np.polyfit(x, y, 1, cov=True)
(m, b) = p
(me, be) = np.sqrt(np.sum((covar * [[1, 0], [0, 1]]), axis=1))
(me, be)
else:
err = err[a]
err = (err / y)
(p, covar) = np.polyfit(x, y, 1, w=(1.0 / (err ** 2)), cov=True)
(m, b) = p
(me, be) = np.sqrt(np.sum((covar * [[1, 0], [0, 1]]), axis=1))
(me, be)
if return_kappa:
if cov:
return (m, np.exp(b), me, be)
else:
return (m, np.exp(b))
elif cov:
return (m, me)
else:
return m |
def schmidt_law(Ak, theta):
'\n schmidt_law(Ak,(beta,kappa))\n beta is the power law index (same as alpha)\n '
if (len(theta) == 2):
(beta, kappa) = theta
return (kappa * (Ak ** beta))
elif (len(theta) == 3):
(beta, kappa, Ak0) = theta
sfr = ((Heaviside((Ak - Ak0)) * kappa) * (Ak ** beta))
sfr[(Ak < Ak0)] = 0
return sfr | 6,937,067,721,059,908,000 | schmidt_law(Ak,(beta,kappa))
beta is the power law index (same as alpha) | schmidt_funcs.py | schmidt_law | johnarban/arban | python | def schmidt_law(Ak, theta):
'\n schmidt_law(Ak,(beta,kappa))\n beta is the power law index (same as alpha)\n '
if (len(theta) == 2):
(beta, kappa) = theta
return (kappa * (Ak ** beta))
elif (len(theta) == 3):
(beta, kappa, Ak0) = theta
sfr = ((Heaviside((Ak - Ak0)) * kappa) * (Ak ** beta))
sfr[(Ak < Ak0)] = 0
return sfr |
def emcee_schmidt(x, y, yerr, pos=None, pose=None, nwalkers=None, nsteps=None, burnin=200, verbose=True):
'\n emcee_schmidt provides a convenient wrapper for fitting the schimdt law\n to binned x,log(y) data. Generally, it fits a normalization and a slope\n '
def model(x, theta):
'\n theta = (beta, kappa)\n '
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = (1 / (yerr ** 2))
return ((- 0.5) * np.sum((((y - mod) ** 2) * inv_sigma2)))
def lnprior(theta):
if (len(theta) == 3):
(beta, kappa, Ak0) = theta
c3 = (0.0 < Ak0 <= 5.0)
c4 = True
else:
(beta, kappa) = theta
c3 = True
c4 = True
c1 = (0 <= beta <= 6)
c2 = (0 <= kappa)
if (c1 and c2 and c3 and c4):
return 0.0
return (- np.inf)
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if (not np.isfinite(lp)):
return (- np.inf)
return (lp + lnlike(theta, x, y, yerr))
(ndim, nwalkers) = (len(pos), nwalkers)
pos = [(np.array(pos) + ((np.array(pose) * 0.5) * (0.5 - np.random.rand(ndim)))) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
samples = sampler.chain[:, burnin:, :].reshape(((- 1), sampler.ndim))
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose:
print(sampler.acor)
if verbose:
for (i, item) in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], (item[2] - item[1]), (item[1] - item[0]))
print(('%s = %0.2f (+%0.2f,-%0.2f)' % inserts))
return (sampler, np.median(samples, axis=0), np.std(samples, axis=0)) | 3,047,723,245,439,450,000 | emcee_schmidt provides a convenient wrapper for fitting the schimdt law
to binned x,log(y) data. Generally, it fits a normalization and a slope | schmidt_funcs.py | emcee_schmidt | johnarban/arban | python | def emcee_schmidt(x, y, yerr, pos=None, pose=None, nwalkers=None, nsteps=None, burnin=200, verbose=True):
'\n emcee_schmidt provides a convenient wrapper for fitting the schimdt law\n to binned x,log(y) data. Generally, it fits a normalization and a slope\n '
def model(x, theta):
'\n theta = (beta, kappa)\n '
return np.log(schmidt_law(x, theta))
def lnlike(theta, x, y, yerr):
mod = model(x, theta)
inv_sigma2 = (1 / (yerr ** 2))
return ((- 0.5) * np.sum((((y - mod) ** 2) * inv_sigma2)))
def lnprior(theta):
if (len(theta) == 3):
(beta, kappa, Ak0) = theta
c3 = (0.0 < Ak0 <= 5.0)
c4 = True
else:
(beta, kappa) = theta
c3 = True
c4 = True
c1 = (0 <= beta <= 6)
c2 = (0 <= kappa)
if (c1 and c2 and c3 and c4):
return 0.0
return (- np.inf)
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if (not np.isfinite(lp)):
return (- np.inf)
return (lp + lnlike(theta, x, y, yerr))
(ndim, nwalkers) = (len(pos), nwalkers)
pos = [(np.array(pos) + ((np.array(pose) * 0.5) * (0.5 - np.random.rand(ndim)))) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, yerr))
sampler.run_mcmc(pos, nsteps)
samples = sampler.chain[:, burnin:, :].reshape(((- 1), sampler.ndim))
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
if verbose:
print(sampler.acor)
if verbose:
for (i, item) in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], (item[2] - item[1]), (item[1] - item[0]))
print(('%s = %0.2f (+%0.2f,-%0.2f)' % inserts))
return (sampler, np.median(samples, axis=0), np.std(samples, axis=0)) |
def fit(bins, samp, samperr, maps, mapserr, scale=1.0, sampler=None, log=False, pos=None, pose=None, nwalkers=100, nsteps=10000.0, boot=1000, burnin=200, threshold=False, threshold2=False, verbose=True):
"\n # # # A Schmidt Law fitting Function using EMCEE by D.F.M.\n fit(bins, samp, samperr, maps, mapserr, scale=1.,\n pos=None, pose=None, nwalkers=100, nsteps=1e4)\n bins: bin edges for binning data (I know it's bad to bin)\n samp : values for your sample\n samperr : errors on values for you sample\n maps: map of values from which you drew your sample\n mapserr: error on maps...\n pos : initial location of ball of walkers\n pose : initial spread of walkers\n "
x = avg(bins)
(y, yerr) = surfd(samp, maps, bins, scale=scale, return_err=True)
bins2 = shift_bins(bins, 0.5)
bin
x2 = avg(bins2)
(y2, yerr2) = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x, x2))
concaty = np.concatenate((y, y2))
concatyerr = np.concatenate((yerr, yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = ((np.isfinite((1.0 / y)) & np.isfinite(yerr)) & np.isfinite((1.0 / yerr)))
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
init = alpha(y, x, return_kappa=True, cov=True)
if (pos is None):
pos = init[:2]
if (pose is None):
if np.isnan((init[2] + init[3])):
pose = (1, 1)
else:
pose = (init[2], init[3])
if (threshold | threshold2):
pos = (pos + (0.4,))
pose = (pose + (0.2,))
if threshold2:
pos = (pos + (8.0,))
pose = (pose + (0.5,))
pos = np.asarray(pos)
pose = (0.1 * pos)
if (sampler is None):
if verbose:
print('Sampler autocorrelation times . . .')
(sampler, theta, theta_std) = emcee_schmidt(x, np.log(y), (yerr / y), pos=pos, pose=pose, nwalkers=nwalkers, nsteps=nsteps, burnin=burnin, verbose=verbose)
else:
print((("Next time don't give me a " + str(type(sampler))) + '.'))
try:
return (sampler, x, y, yerr, theta, theta_std)
except:
return (sampler, x, y, yerr) | 1,029,633,922,976,774,800 | # # # A Schmidt Law fitting Function using EMCEE by D.F.M.
fit(bins, samp, samperr, maps, mapserr, scale=1.,
pos=None, pose=None, nwalkers=100, nsteps=1e4)
bins: bin edges for binning data (I know it's bad to bin)
samp : values for your sample
samperr : errors on values for you sample
maps: map of values from which you drew your sample
mapserr: error on maps...
pos : initial location of ball of walkers
pose : initial spread of walkers | schmidt_funcs.py | fit | johnarban/arban | python | def fit(bins, samp, samperr, maps, mapserr, scale=1.0, sampler=None, log=False, pos=None, pose=None, nwalkers=100, nsteps=10000.0, boot=1000, burnin=200, threshold=False, threshold2=False, verbose=True):
"\n # # # A Schmidt Law fitting Function using EMCEE by D.F.M.\n fit(bins, samp, samperr, maps, mapserr, scale=1.,\n pos=None, pose=None, nwalkers=100, nsteps=1e4)\n bins: bin edges for binning data (I know it's bad to bin)\n samp : values for your sample\n samperr : errors on values for you sample\n maps: map of values from which you drew your sample\n mapserr: error on maps...\n pos : initial location of ball of walkers\n pose : initial spread of walkers\n "
x = avg(bins)
(y, yerr) = surfd(samp, maps, bins, scale=scale, return_err=True)
bins2 = shift_bins(bins, 0.5)
bin
x2 = avg(bins2)
(y2, yerr2) = surfd(samp, maps, bins2, scale=scale, return_err=True)
concatx = np.concatenate((x, x2))
concaty = np.concatenate((y, y2))
concatyerr = np.concatenate((yerr, yerr2))
srt = np.argsort(concatx)
x = concatx[srt]
y = concaty[srt]
yerr = concatyerr[srt]
nonzero = ((np.isfinite((1.0 / y)) & np.isfinite(yerr)) & np.isfinite((1.0 / yerr)))
y = y[nonzero]
yerr = yerr[nonzero]
x = x[nonzero]
init = alpha(y, x, return_kappa=True, cov=True)
if (pos is None):
pos = init[:2]
if (pose is None):
if np.isnan((init[2] + init[3])):
pose = (1, 1)
else:
pose = (init[2], init[3])
if (threshold | threshold2):
pos = (pos + (0.4,))
pose = (pose + (0.2,))
if threshold2:
pos = (pos + (8.0,))
pose = (pose + (0.5,))
pos = np.asarray(pos)
pose = (0.1 * pos)
if (sampler is None):
if verbose:
print('Sampler autocorrelation times . . .')
(sampler, theta, theta_std) = emcee_schmidt(x, np.log(y), (yerr / y), pos=pos, pose=pose, nwalkers=nwalkers, nsteps=nsteps, burnin=burnin, verbose=verbose)
else:
print((("Next time don't give me a " + str(type(sampler))) + '.'))
try:
return (sampler, x, y, yerr, theta, theta_std)
except:
return (sampler, x, y, yerr) |
def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None, bins=None, scale=None, triangle_plot=True):
'\n model: should pass schmidt_law()\n '
try:
mpl.style.use('john')
except:
None
if hasattr(sampler, '__getitem__'):
chain = sampler
dim = chain.shape[(- 1)]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape(((- 1), dim))
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
n_params = len(theta_mcmc[:, 1])
for (i, item) in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], (item[2] - item[1]), (item[1] - item[0]))
print(('%s = %0.2f (+%0.2f,-%0.2f)' % inserts))
if triangle_plot:
if (n_params == 3):
labels = ['beta', 'kappa', 'A_{K,0}']
elif (n_params == 4):
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
_ = triangle.corner(samples, labels=labels, truths=theta_mcmc[:, 1], quantiles=[0.16, 0.84], verbose=False)
xln = np.logspace(np.log10((x.min() * 0.5)), np.log10((x.max() * 2.0)), 100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
percent = (lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0))
fig = plt.figure()
plt.plot(xln, percent(50), 'k')
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'], loc='upper left', fontsize=12)
plt.fill_between(xln, percent(1), percent(99), color='0.9')
plt.fill_between(xln, percent(2), percent(98), color='0.75')
plt.fill_between(xln, percent(16), percent(84), color='0.5')
plt.loglog(nonposy='clip')
return plt.gca() | -3,195,698,405,562,711,600 | model: should pass schmidt_law() | schmidt_funcs.py | schmidt_results_plots | johnarban/arban | python | def schmidt_results_plots(sampler, model, x, y, yerr, burnin=200, akmap=None, bins=None, scale=None, triangle_plot=True):
'\n \n '
try:
mpl.style.use('john')
except:
None
if hasattr(sampler, '__getitem__'):
chain = sampler
dim = chain.shape[(- 1)]
else:
chain = sampler.chain
dim = sampler.dim
samples = chain[:, burnin:, :].reshape(((- 1), dim))
theta_mcmc = np.percentile(samples, [16, 50, 84], axis=0).T
n_params = len(theta_mcmc[:, 1])
for (i, item) in enumerate(theta_mcmc):
j = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
inserts = (j[i], item[1], (item[2] - item[1]), (item[1] - item[0]))
print(('%s = %0.2f (+%0.2f,-%0.2f)' % inserts))
if triangle_plot:
if (n_params == 3):
labels = ['beta', 'kappa', 'A_{K,0}']
elif (n_params == 4):
labels = ['beta', 'kappa', 'A_{K,0}', 'A_{K,f}']
else:
labels = ['beta', 'kappa']
_ = triangle.corner(samples, labels=labels, truths=theta_mcmc[:, 1], quantiles=[0.16, 0.84], verbose=False)
xln = np.logspace(np.log10((x.min() * 0.5)), np.log10((x.max() * 2.0)), 100)
smlaw_samps = np.asarray([schmidt_law(xln, samp) for samp in samples])
percent = (lambda x: np.nanpercentile(smlaw_samps, x, interpolation='linear', axis=0))
fig = plt.figure()
plt.plot(xln, percent(50), 'k')
plt.errorbar(x, y, yerr, fmt='rs', alpha=0.7, mec='none')
plt.legend(['Median', 'Data'], loc='upper left', fontsize=12)
plt.fill_between(xln, percent(1), percent(99), color='0.9')
plt.fill_between(xln, percent(2), percent(98), color='0.75')
plt.fill_between(xln, percent(16), percent(84), color='0.5')
plt.loglog(nonposy='clip')
return plt.gca() |
def plot_walkers(sampler, limits=None, bad=None):
'\n sampler : emcee Sampler class\n '
if hasattr(sampler, '__getitem__'):
chain = sampler
ndim = chain.shape[(- 1)]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=((8 * ndim), (4 * ndim)))
if hasattr(limits, '__getitem__'):
limits += ([None] * (3 - len(limits)))
slices = slice(limits[0], limits[1], limits[2])
else:
slices = slice(None, limits, None)
for (w, walk) in enumerate(chain[:, slices, :]):
if (bad is None):
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for (p, param) in enumerate(walk.T):
ax = plt.subplot(ndim, 1, (p + 1))
ax.plot(param, color, alpha=0.75, lw=0.75)
plt.tight_layout()
return fig | -7,042,728,002,737,024,000 | sampler : emcee Sampler class | schmidt_funcs.py | plot_walkers | johnarban/arban | python | def plot_walkers(sampler, limits=None, bad=None):
'\n \n '
if hasattr(sampler, '__getitem__'):
chain = sampler
ndim = chain.shape[(- 1)]
else:
chain = sampler.chain
ndim = sampler.ndim
fig = plt.figure(figsize=((8 * ndim), (4 * ndim)))
if hasattr(limits, '__getitem__'):
limits += ([None] * (3 - len(limits)))
slices = slice(limits[0], limits[1], limits[2])
else:
slices = slice(None, limits, None)
for (w, walk) in enumerate(chain[:, slices, :]):
if (bad is None):
color = 'k'
elif bad[w]:
color = 'r'
else:
color = 'k'
for (p, param) in enumerate(walk.T):
ax = plt.subplot(ndim, 1, (p + 1))
ax.plot(param, color, alpha=0.75, lw=0.75)
plt.tight_layout()
return fig |
def model(x, theta):
'\n theta = (beta, kappa)\n '
return np.log(schmidt_law(x, theta)) | -4,696,528,238,367,610,000 | theta = (beta, kappa) | schmidt_funcs.py | model | johnarban/arban | python | def model(x, theta):
'\n \n '
return np.log(schmidt_law(x, theta)) |
def __init__(self):
'Initialize vocab dictionaries.'
self._words = []
self._counts = []
self._total_count = 0 | -7,167,729,922,947,978,000 | Initialize vocab dictionaries. | word2vec/estimators/word2vec.py | __init__ | akb89/word2vec | python | def __init__(self):
self._words = []
self._counts = []
self._total_count = 0 |
@property
def vocab_size(self):
'Return the number of items in vocabulary.\n\n Since we use len(word_freq_dict) as the default index for UKN in\n the index_table, we have to add 1 to the length\n '
return (len(self._words) + 1) | -396,653,238,297,316,860 | Return the number of items in vocabulary.
Since we use len(word_freq_dict) as the default index for UKN in
the index_table, we have to add 1 to the length | word2vec/estimators/word2vec.py | vocab_size | akb89/word2vec | python | @property
def vocab_size(self):
'Return the number of items in vocabulary.\n\n Since we use len(word_freq_dict) as the default index for UKN in\n the index_table, we have to add 1 to the length\n '
return (len(self._words) + 1) |
def build_vocab(self, data_filepath, vocab_filepath, min_count):
'Create vocabulary-related data.'
logger.info('Building vocabulary from file {}'.format(data_filepath))
logger.info('Loading word counts...')
if (self.vocab_size > 1):
logger.warning("This instance of W2V's vocabulary does not seem to be empty. Erasing previously stored vocab...")
(self._words, self._counts, self._total_count) = ([], [], 0)
word_count_dict = defaultdict(int)
with open(data_filepath, 'r') as data_stream:
for line in data_stream:
for word in line.strip().split():
word_count_dict[word] += 1
logger.info('Saving word frequencies to file: {}'.format(vocab_filepath))
with open(vocab_filepath, 'w') as vocab_stream:
for (word, count) in sorted(word_count_dict.items(), key=(lambda x: x[1]), reverse=True):
print('{}\t{}'.format(word, count), file=vocab_stream)
if (count >= min_count):
self._words.append(word)
self._counts.append(count)
self._total_count += count | -3,329,502,410,839,565,300 | Create vocabulary-related data. | word2vec/estimators/word2vec.py | build_vocab | akb89/word2vec | python | def build_vocab(self, data_filepath, vocab_filepath, min_count):
logger.info('Building vocabulary from file {}'.format(data_filepath))
logger.info('Loading word counts...')
if (self.vocab_size > 1):
logger.warning("This instance of W2V's vocabulary does not seem to be empty. Erasing previously stored vocab...")
(self._words, self._counts, self._total_count) = ([], [], 0)
word_count_dict = defaultdict(int)
with open(data_filepath, 'r') as data_stream:
for line in data_stream:
for word in line.strip().split():
word_count_dict[word] += 1
logger.info('Saving word frequencies to file: {}'.format(vocab_filepath))
with open(vocab_filepath, 'w') as vocab_stream:
for (word, count) in sorted(word_count_dict.items(), key=(lambda x: x[1]), reverse=True):
print('{}\t{}'.format(word, count), file=vocab_stream)
if (count >= min_count):
self._words.append(word)
self._counts.append(count)
self._total_count += count |
def load_vocab(self, vocab_filepath, min_count):
'Load a previously saved vocabulary file.'
logger.info('Loading word counts from file {}'.format(vocab_filepath))
(self._words, self._counts, self._total_count) = ([], [], 0)
with open(vocab_filepath, 'r', encoding='UTF-8') as vocab_stream:
for line in vocab_stream:
word_count = line.strip().split('\t', 1)
(word, count) = (word_count[0], int(word_count[1]))
if (count >= min_count):
self._words.append(word)
self._counts.append(count)
self._total_count += count
logger.info('Done loading word counts') | -8,126,846,170,437,619,000 | Load a previously saved vocabulary file. | word2vec/estimators/word2vec.py | load_vocab | akb89/word2vec | python | def load_vocab(self, vocab_filepath, min_count):
logger.info('Loading word counts from file {}'.format(vocab_filepath))
(self._words, self._counts, self._total_count) = ([], [], 0)
with open(vocab_filepath, 'r', encoding='UTF-8') as vocab_stream:
for line in vocab_stream:
word_count = line.strip().split('\t', 1)
(word, count) = (word_count[0], int(word_count[1]))
if (count >= min_count):
self._words.append(word)
self._counts.append(count)
self._total_count += count
logger.info('Done loading word counts') |
def train(self, train_mode, training_data_filepath, model_dirpath, batch_size, embedding_size, num_neg_samples, learning_rate, window_size, num_epochs, sampling_rate, p_num_threads, t_num_threads, shuffling_buffer_size, save_summary_steps, save_checkpoints_steps, keep_checkpoint_max, log_step_count_steps, debug, debug_port, xla):
'Train Word2Vec.'
if (self.vocab_size == 1):
raise Exception('You need to build or load a vocabulary before training word2vec')
if (train_mode not in ('cbow', 'skipgram')):
raise Exception("Unsupported train_mode '{}'".format(train_mode))
sess_config = tf.compat.v1.ConfigProto(log_device_placement=True)
sess_config.intra_op_parallelism_threads = t_num_threads
sess_config.inter_op_parallelism_threads = t_num_threads
run_config = tf.estimator.RunConfig(session_config=sess_config, save_summary_steps=save_summary_steps, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=keep_checkpoint_max, log_step_count_steps=log_step_count_steps)
estimator = tf.estimator.Estimator(model_fn=w2v_model.model, model_dir=model_dirpath, config=run_config, params={'mode': train_mode, 'vocab_size': self.vocab_size, 'batch_size': batch_size, 'embedding_size': embedding_size, 'num_neg_samples': num_neg_samples, 'learning_rate': learning_rate, 'words': self._words, 'p_num_threads': p_num_threads, 'xla': xla, 'men': MEN(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'resources', 'MEN_dataset_natural_form_full'))})
tf.compat.v1.disable_eager_execution()
if debug:
raise Exception('Unsupported parameter: waiting for the TF team to release v2 equivalents for TensorBoardDebugHook')
hooks = [tf.estimator.ProfilerHook(save_steps=save_summary_steps, show_dataflow=True, show_memory=True, output_dir=model_dirpath)]
estimator.train(input_fn=(lambda : datasets_utils.get_w2v_train_dataset(training_data_filepath, train_mode, self._words, self._counts, self._total_count, window_size, sampling_rate, batch_size, num_epochs, p_num_threads, shuffling_buffer_size)), hooks=hooks) | -3,904,962,833,964,570,600 | Train Word2Vec. | word2vec/estimators/word2vec.py | train | akb89/word2vec | python | def train(self, train_mode, training_data_filepath, model_dirpath, batch_size, embedding_size, num_neg_samples, learning_rate, window_size, num_epochs, sampling_rate, p_num_threads, t_num_threads, shuffling_buffer_size, save_summary_steps, save_checkpoints_steps, keep_checkpoint_max, log_step_count_steps, debug, debug_port, xla):
if (self.vocab_size == 1):
raise Exception('You need to build or load a vocabulary before training word2vec')
if (train_mode not in ('cbow', 'skipgram')):
raise Exception("Unsupported train_mode '{}'".format(train_mode))
sess_config = tf.compat.v1.ConfigProto(log_device_placement=True)
sess_config.intra_op_parallelism_threads = t_num_threads
sess_config.inter_op_parallelism_threads = t_num_threads
run_config = tf.estimator.RunConfig(session_config=sess_config, save_summary_steps=save_summary_steps, save_checkpoints_steps=save_checkpoints_steps, keep_checkpoint_max=keep_checkpoint_max, log_step_count_steps=log_step_count_steps)
estimator = tf.estimator.Estimator(model_fn=w2v_model.model, model_dir=model_dirpath, config=run_config, params={'mode': train_mode, 'vocab_size': self.vocab_size, 'batch_size': batch_size, 'embedding_size': embedding_size, 'num_neg_samples': num_neg_samples, 'learning_rate': learning_rate, 'words': self._words, 'p_num_threads': p_num_threads, 'xla': xla, 'men': MEN(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'resources', 'MEN_dataset_natural_form_full'))})
tf.compat.v1.disable_eager_execution()
if debug:
raise Exception('Unsupported parameter: waiting for the TF team to release v2 equivalents for TensorBoardDebugHook')
hooks = [tf.estimator.ProfilerHook(save_steps=save_summary_steps, show_dataflow=True, show_memory=True, output_dir=model_dirpath)]
estimator.train(input_fn=(lambda : datasets_utils.get_w2v_train_dataset(training_data_filepath, train_mode, self._words, self._counts, self._total_count, window_size, sampling_rate, batch_size, num_epochs, p_num_threads, shuffling_buffer_size)), hooks=hooks) |
def get_cocktail_irradiation(self):
'\n example cocktail.json\n\n {\n "chronology": "2016-06-01 17:00:00",\n "j": 4e-4,\n "j_err": 4e-9\n }\n\n :return:\n '
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret | -2,350,478,959,779,339,000 | example cocktail.json
{
"chronology": "2016-06-01 17:00:00",
"j": 4e-4,
"j_err": 4e-9
}
:return: | pychron/dvc/meta_repo.py | get_cocktail_irradiation | UManPychron/pychron | python | def get_cocktail_irradiation(self):
'\n example cocktail.json\n\n {\n "chronology": "2016-06-01 17:00:00",\n "j": 4e-4,\n "j_err": 4e-9\n }\n\n :return:\n '
p = os.path.join(paths.meta_root, 'cocktail.json')
ret = dvc_load(p)
nret = {}
if ret:
lines = ['1.0, {}, {}'.format(ret['chronology'], ret['chronology'])]
c = Chronology.from_lines(lines)
nret['chronology'] = c
nret['flux'] = ufloat(ret['j'], ret['j_err'])
return nret |
def test_column_generation_dtalite():
' validation using DTALite '
print('start column generation using DTALite')
st = time()
mode = 1
iter_num = 20
column_update_num = 20
pg.perform_network_assignment_DTALite(mode, iter_num, column_update_num)
print(f'processing time of column generation: {(time() - st):.2f} s for {iter_num} assignment iterations and {column_update_num} iterations in column generation')
print('\npath finding results can be found in agent.csv') | -1,906,266,710,231,534,000 | validation using DTALite | tests/demo.py | test_column_generation_dtalite | FangTang999/Path4GMNS | python | def test_column_generation_dtalite():
' '
print('start column generation using DTALite')
st = time()
mode = 1
iter_num = 20
column_update_num = 20
pg.perform_network_assignment_DTALite(mode, iter_num, column_update_num)
print(f'processing time of column generation: {(time() - st):.2f} s for {iter_num} assignment iterations and {column_update_num} iterations in column generation')
print('\npath finding results can be found in agent.csv') |
def api_request(self, method_name, params):
' Make api request and return single wrapped object\n\n :param method_name: name of API methods\n :param params: dict-wrapped params for specific API call\n '
data = self.client.api_request(method_name, params)
if (isinstance(data, dict) and (len(data) > 1)):
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace('Broker', ''))
if (class_name in data):
result_name = class_name
else:
result_name = method_name.split('/')[(- 1)]
if (result_name not in data):
return data
return self._get_return_object_type(data.get(result_name)) | -6,943,514,198,024,048,000 | Make api request and return single wrapped object
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call | infoblox_netmri/api/broker/broker.py | api_request | IngmarVG-IB/infoblox-netmri | python | def api_request(self, method_name, params):
' Make api request and return single wrapped object\n\n :param method_name: name of API methods\n :param params: dict-wrapped params for specific API call\n '
data = self.client.api_request(method_name, params)
if (isinstance(data, dict) and (len(data) > 1)):
for x in data.keys():
data[x] = self._get_return_object_type(data.get(x))
return data
class_name = to_snake(self.__class__.__name__.replace('Broker', ))
if (class_name in data):
result_name = class_name
else:
result_name = method_name.split('/')[(- 1)]
if (result_name not in data):
return data
return self._get_return_object_type(data.get(result_name)) |
def api_mixed_request(self, method_name, params):
' Make api request and download a file and return\n JSON response or request status dictionary\n\n :param method_name: name of API methods\n :param params: dict-wrapped params for specific API call\n '
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace('Broker', ''))
if (class_name in data):
result_name = class_name
else:
result_name = method_name.split('/')[(- 1)]
if (result_name not in data):
return data
return self._get_return_object_type(data.get(result_name)) | -3,617,585,501,267,358,000 | Make api request and download a file and return
JSON response or request status dictionary
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call | infoblox_netmri/api/broker/broker.py | api_mixed_request | IngmarVG-IB/infoblox-netmri | python | def api_mixed_request(self, method_name, params):
' Make api request and download a file and return\n JSON response or request status dictionary\n\n :param method_name: name of API methods\n :param params: dict-wrapped params for specific API call\n '
data = self.client.api_request(method_name, params, downloadable=True)
class_name = to_snake(self.__class__.__name__.replace('Broker', ))
if (class_name in data):
result_name = class_name
else:
result_name = method_name.split('/')[(- 1)]
if (result_name not in data):
return data
return self._get_return_object_type(data.get(result_name)) |
def api_list_request(self, method_name, params):
' Make api request and return list of wrapped objects\n\n :param method_name: name of API methods\n :param params: dict-wrapped params for specific API call\n '
data = self.client.api_request(method_name, params)
if (not data):
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print('Sorry, this method will be implemented in the future versions of NetMRI')
raise NotImplementedException(self.controller, method_name) | 6,639,086,436,260,162,000 | Make api request and return list of wrapped objects
:param method_name: name of API methods
:param params: dict-wrapped params for specific API call | infoblox_netmri/api/broker/broker.py | api_list_request | IngmarVG-IB/infoblox-netmri | python | def api_list_request(self, method_name, params):
' Make api request and return list of wrapped objects\n\n :param method_name: name of API methods\n :param params: dict-wrapped params for specific API call\n '
data = self.client.api_request(method_name, params)
if (not data):
return None
try:
return [self._get_return_object_type(x) for x in data[self.controller]]
except KeyError:
print('Sorry, this method will be implemented in the future versions of NetMRI')
raise NotImplementedException(self.controller, method_name) |
def _get_method_fullname(self, method):
' Returns full API method name using controller name\n\n **Input**\n :param method: method name\n :return: full API path\n '
return '{}/{}'.format(self.controller, method) | 2,949,533,357,265,416,700 | Returns full API method name using controller name
**Input**
:param method: method name
:return: full API path | infoblox_netmri/api/broker/broker.py | _get_method_fullname | IngmarVG-IB/infoblox-netmri | python | def _get_method_fullname(self, method):
' Returns full API method name using controller name\n\n **Input**\n :param method: method name\n :return: full API path\n '
return '{}/{}'.format(self.controller, method) |
def _get_return_object_type(self, data):
' Returns wrapped response which inherits from RemoteModel class\n\n :param data: API responce data\n :return: RemoteModel child class\n '
if ((not data) or (type(data) != dict)):
return data
class_name = data.get('_class')
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client) | 5,006,086,119,267,887,000 | Returns wrapped response which inherits from RemoteModel class
:param data: API responce data
:return: RemoteModel child class | infoblox_netmri/api/broker/broker.py | _get_return_object_type | IngmarVG-IB/infoblox-netmri | python | def _get_return_object_type(self, data):
' Returns wrapped response which inherits from RemoteModel class\n\n :param data: API responce data\n :return: RemoteModel child class\n '
if ((not data) or (type(data) != dict)):
return data
class_name = data.get('_class')
obj_class = locate(self._get_remote_class_name(class_name))
return obj_class(data, self.client) |
def _get_remote_class_name(self, name):
' Generate full path to specific RemoteModel instance\n\n :param name: name of model\n :return: full path for model\n '
return 'infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote'.format(pckg=to_snake(name), name=name) | -4,085,173,532,365,311,000 | Generate full path to specific RemoteModel instance
:param name: name of model
:return: full path for model | infoblox_netmri/api/broker/broker.py | _get_remote_class_name | IngmarVG-IB/infoblox-netmri | python | def _get_remote_class_name(self, name):
' Generate full path to specific RemoteModel instance\n\n :param name: name of model\n :return: full path for model\n '
return 'infoblox_netmri.api.remote.models.{pckg}_remote.{name}Remote'.format(pckg=to_snake(name), name=name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.