index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,400 | 62601eca767800f00b461ef46d72bddc5cf75de0 | <mask token>
class ResnetEncoder(nn.HybridBlock):
<mask token>
def __init__(self, backbone, pretrained, num_input_images=1, root=os.
path.join(os.path.expanduser('~'), '.mxnet/models'), ctx=cpu(), **
kwargs):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {'resnet18': resnet18_v1b, 'resnet34': resnet34_v1b,
'resnet50': resnet50_v1s, 'resnet101': resnet101_v1s,
'resnet152': resnet152_v1s}
num_layers = {'resnet18': 18, 'resnet34': 34, 'resnet50': 50,
'resnet101': 101, 'resnet152': 152}
if backbone not in resnets:
raise ValueError('{} is not a valid resnet'.format(backbone))
if num_input_images > 1:
self.encoder = resnets[backbone](pretrained=False, ctx=ctx, **
kwargs)
if pretrained:
filename = os.path.join(root,
'resnet%d_v%db_multiple_inputs.params' % (num_layers[
backbone], 1))
if not os.path.isfile(filename):
from ..model_store import get_model_file
loaded = mx.nd.load(get_model_file('resnet%d_v%db' % (
num_layers[backbone], 1), tag=pretrained, root=root))
loaded['conv1.weight'] = mx.nd.concat(*([loaded[
'conv1.weight']] * num_input_images), dim=1
) / num_input_images
mx.nd.save(filename, loaded)
self.encoder.load_parameters(filename, ctx=ctx)
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
self.encoder.synset = attrib.synset
self.encoder.classes = attrib.classes
self.encoder.classes_long = attrib.classes_long
else:
self.encoder = resnets[backbone](pretrained=pretrained, ctx=ctx,
**kwargs)
if backbone not in ('resnet18', 'resnet34'):
self.num_ch_enc[1:] *= 4
def hybrid_forward(self, F, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
<mask token>
| <mask token>
class ResnetEncoder(nn.HybridBlock):
<mask token>
def __init__(self, backbone, pretrained, num_input_images=1, root=os.
path.join(os.path.expanduser('~'), '.mxnet/models'), ctx=cpu(), **
kwargs):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {'resnet18': resnet18_v1b, 'resnet34': resnet34_v1b,
'resnet50': resnet50_v1s, 'resnet101': resnet101_v1s,
'resnet152': resnet152_v1s}
num_layers = {'resnet18': 18, 'resnet34': 34, 'resnet50': 50,
'resnet101': 101, 'resnet152': 152}
if backbone not in resnets:
raise ValueError('{} is not a valid resnet'.format(backbone))
if num_input_images > 1:
self.encoder = resnets[backbone](pretrained=False, ctx=ctx, **
kwargs)
if pretrained:
filename = os.path.join(root,
'resnet%d_v%db_multiple_inputs.params' % (num_layers[
backbone], 1))
if not os.path.isfile(filename):
from ..model_store import get_model_file
loaded = mx.nd.load(get_model_file('resnet%d_v%db' % (
num_layers[backbone], 1), tag=pretrained, root=root))
loaded['conv1.weight'] = mx.nd.concat(*([loaded[
'conv1.weight']] * num_input_images), dim=1
) / num_input_images
mx.nd.save(filename, loaded)
self.encoder.load_parameters(filename, ctx=ctx)
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
self.encoder.synset = attrib.synset
self.encoder.classes = attrib.classes
self.encoder.classes_long = attrib.classes_long
else:
self.encoder = resnets[backbone](pretrained=pretrained, ctx=ctx,
**kwargs)
if backbone not in ('resnet18', 'resnet34'):
self.num_ch_enc[1:] *= 4
def hybrid_forward(self, F, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
def predict(self, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
| <mask token>
class ResnetEncoder(nn.HybridBlock):
"""Encoder of Monodepth2
Parameters
----------
backbone : string
Pre-trained dilated backbone network type ('resnet18', 'resnet34', 'resnet50',
'resnet101' or 'resnet152').
pretrained : bool or str
Refers to if the backbone is pretrained or not. If `True`,
model weights of a model that was trained on ImageNet is loaded.
num_input_images : int
The number of input sequences. 1 for depth encoder, larger than 1 for pose encoder.
(Default: 1)
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
def __init__(self, backbone, pretrained, num_input_images=1, root=os.
path.join(os.path.expanduser('~'), '.mxnet/models'), ctx=cpu(), **
kwargs):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {'resnet18': resnet18_v1b, 'resnet34': resnet34_v1b,
'resnet50': resnet50_v1s, 'resnet101': resnet101_v1s,
'resnet152': resnet152_v1s}
num_layers = {'resnet18': 18, 'resnet34': 34, 'resnet50': 50,
'resnet101': 101, 'resnet152': 152}
if backbone not in resnets:
raise ValueError('{} is not a valid resnet'.format(backbone))
if num_input_images > 1:
self.encoder = resnets[backbone](pretrained=False, ctx=ctx, **
kwargs)
if pretrained:
filename = os.path.join(root,
'resnet%d_v%db_multiple_inputs.params' % (num_layers[
backbone], 1))
if not os.path.isfile(filename):
from ..model_store import get_model_file
loaded = mx.nd.load(get_model_file('resnet%d_v%db' % (
num_layers[backbone], 1), tag=pretrained, root=root))
loaded['conv1.weight'] = mx.nd.concat(*([loaded[
'conv1.weight']] * num_input_images), dim=1
) / num_input_images
mx.nd.save(filename, loaded)
self.encoder.load_parameters(filename, ctx=ctx)
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
self.encoder.synset = attrib.synset
self.encoder.classes = attrib.classes
self.encoder.classes_long = attrib.classes_long
else:
self.encoder = resnets[backbone](pretrained=pretrained, ctx=ctx,
**kwargs)
if backbone not in ('resnet18', 'resnet34'):
self.num_ch_enc[1:] *= 4
def hybrid_forward(self, F, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
def predict(self, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
| <mask token>
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import mxnet as mx
from mxnet.gluon import nn
from mxnet.context import cpu
from ...model_zoo.resnetv1b import resnet18_v1b, resnet34_v1b, resnet50_v1s, resnet101_v1s, resnet152_v1s
class ResnetEncoder(nn.HybridBlock):
"""Encoder of Monodepth2
Parameters
----------
backbone : string
Pre-trained dilated backbone network type ('resnet18', 'resnet34', 'resnet50',
'resnet101' or 'resnet152').
pretrained : bool or str
Refers to if the backbone is pretrained or not. If `True`,
model weights of a model that was trained on ImageNet is loaded.
num_input_images : int
The number of input sequences. 1 for depth encoder, larger than 1 for pose encoder.
(Default: 1)
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
def __init__(self, backbone, pretrained, num_input_images=1, root=os.
path.join(os.path.expanduser('~'), '.mxnet/models'), ctx=cpu(), **
kwargs):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {'resnet18': resnet18_v1b, 'resnet34': resnet34_v1b,
'resnet50': resnet50_v1s, 'resnet101': resnet101_v1s,
'resnet152': resnet152_v1s}
num_layers = {'resnet18': 18, 'resnet34': 34, 'resnet50': 50,
'resnet101': 101, 'resnet152': 152}
if backbone not in resnets:
raise ValueError('{} is not a valid resnet'.format(backbone))
if num_input_images > 1:
self.encoder = resnets[backbone](pretrained=False, ctx=ctx, **
kwargs)
if pretrained:
filename = os.path.join(root,
'resnet%d_v%db_multiple_inputs.params' % (num_layers[
backbone], 1))
if not os.path.isfile(filename):
from ..model_store import get_model_file
loaded = mx.nd.load(get_model_file('resnet%d_v%db' % (
num_layers[backbone], 1), tag=pretrained, root=root))
loaded['conv1.weight'] = mx.nd.concat(*([loaded[
'conv1.weight']] * num_input_images), dim=1
) / num_input_images
mx.nd.save(filename, loaded)
self.encoder.load_parameters(filename, ctx=ctx)
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
self.encoder.synset = attrib.synset
self.encoder.classes = attrib.classes
self.encoder.classes_long = attrib.classes_long
else:
self.encoder = resnets[backbone](pretrained=pretrained, ctx=ctx,
**kwargs)
if backbone not in ('resnet18', 'resnet34'):
self.num_ch_enc[1:] *= 4
def hybrid_forward(self, F, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
def predict(self, input_image):
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.
features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
| """Encoder module of Monodepth2
Code partially borrowed from
https://github.com/nianticlabs/monodepth2/blob/master/networks/resnet_encoder.py
"""
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import mxnet as mx
from mxnet.gluon import nn
from mxnet.context import cpu
from ...model_zoo.resnetv1b import \
resnet18_v1b, resnet34_v1b, resnet50_v1s, resnet101_v1s, resnet152_v1s
class ResnetEncoder(nn.HybridBlock):
r"""Encoder of Monodepth2
Parameters
----------
backbone : string
Pre-trained dilated backbone network type ('resnet18', 'resnet34', 'resnet50',
'resnet101' or 'resnet152').
pretrained : bool or str
Refers to if the backbone is pretrained or not. If `True`,
model weights of a model that was trained on ImageNet is loaded.
num_input_images : int
The number of input sequences. 1 for depth encoder, larger than 1 for pose encoder.
(Default: 1)
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
def __init__(self, backbone, pretrained, num_input_images=1,
root=os.path.join(os.path.expanduser('~'), '.mxnet/models'),
ctx=cpu(), **kwargs):
super(ResnetEncoder, self).__init__()
self.num_ch_enc = np.array([64, 64, 128, 256, 512])
resnets = {'resnet18': resnet18_v1b,
'resnet34': resnet34_v1b,
'resnet50': resnet50_v1s,
'resnet101': resnet101_v1s,
'resnet152': resnet152_v1s}
num_layers = {'resnet18': 18,
'resnet34': 34,
'resnet50': 50,
'resnet101': 101,
'resnet152': 152}
if backbone not in resnets:
raise ValueError("{} is not a valid resnet".format(backbone))
if num_input_images > 1:
self.encoder = resnets[backbone](pretrained=False, ctx=ctx, **kwargs)
if pretrained:
filename = os.path.join(
root, 'resnet%d_v%db_multiple_inputs.params' % (num_layers[backbone], 1))
if not os.path.isfile(filename):
from ..model_store import get_model_file
loaded = mx.nd.load(get_model_file('resnet%d_v%db' % (num_layers[backbone], 1),
tag=pretrained, root=root))
loaded['conv1.weight'] = mx.nd.concat(
*([loaded['conv1.weight']] * num_input_images), dim=1) / num_input_images
mx.nd.save(filename, loaded)
self.encoder.load_parameters(filename, ctx=ctx)
from ...data import ImageNet1kAttr
attrib = ImageNet1kAttr()
self.encoder.synset = attrib.synset
self.encoder.classes = attrib.classes
self.encoder.classes_long = attrib.classes_long
else:
self.encoder = resnets[backbone](pretrained=pretrained, ctx=ctx, **kwargs)
if backbone not in ('resnet18', 'resnet34'):
self.num_ch_enc[1:] *= 4
def hybrid_forward(self, F, input_image):
# pylint: disable=unused-argument, missing-function-docstring
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
def predict(self, input_image):
# pylint: disable=unused-argument, missing-function-docstring
self.features = []
x = (input_image - 0.45) / 0.225
x = self.encoder.conv1(x)
x = self.encoder.bn1(x)
self.features.append(self.encoder.relu(x))
self.features.append(self.encoder.layer1(self.encoder.maxpool(self.features[-1])))
self.features.append(self.encoder.layer2(self.features[-1]))
self.features.append(self.encoder.layer3(self.features[-1]))
self.features.append(self.encoder.layer4(self.features[-1]))
return self.features
| [
3,
4,
5,
6,
7
] |
1,401 | 67be25e8fdf004515e18e1c20b8d0238222a2172 | <mask token>
class EuclideanLoss(nn.Module):
<mask token>
<mask token>
class CostFunction(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
"""
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
"""
cost = torch.add(y, -d)
cost = torch.add(torch.mul(torch.max(cost, torch.zeros(1)), self.
c_p), torch.mul(torch.max(-cost, torch.zeros(1)), self.c_h))
cost = torch.sum(cost)
return cost
| <mask token>
class EuclideanLoss(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
<mask token>
class CostFunction(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
"""
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
"""
cost = torch.add(y, -d)
cost = torch.add(torch.mul(torch.max(cost, torch.zeros(1)), self.
c_p), torch.mul(torch.max(-cost, torch.zeros(1)), self.c_h))
cost = torch.sum(cost)
return cost
| <mask token>
class EuclideanLoss(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
"""
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
"""
diff = torch.add(y, -d)
diff = torch.add(torch.mul(torch.max(diff, torch.zeros(1)), self.
c_p), torch.mul(torch.max(-diff, torch.zeros(1)), self.c_h))
diff = torch.norm(diff)
diff = torch.sum(diff)
return diff
class CostFunction(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
"""
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
"""
cost = torch.add(y, -d)
cost = torch.add(torch.mul(torch.max(cost, torch.zeros(1)), self.
c_p), torch.mul(torch.max(-cost, torch.zeros(1)), self.c_h))
cost = torch.sum(cost)
return cost
| import torch
import torch.nn as nn
import numpy as np
class EuclideanLoss(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
"""
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
"""
diff = torch.add(y, -d)
diff = torch.add(torch.mul(torch.max(diff, torch.zeros(1)), self.
c_p), torch.mul(torch.max(-diff, torch.zeros(1)), self.c_h))
diff = torch.norm(diff)
diff = torch.sum(diff)
return diff
class CostFunction(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
"""
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
"""
cost = torch.add(y, -d)
cost = torch.add(torch.mul(torch.max(cost, torch.zeros(1)), self.
c_p), torch.mul(torch.max(-cost, torch.zeros(1)), self.c_h))
cost = torch.sum(cost)
return cost
| import torch
import torch.nn as nn
import numpy as np
class EuclideanLoss(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
'''
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
'''
diff = torch.add(y, -d)
diff = torch.add(torch.mul(torch.max(diff, torch.zeros(1)), self.c_p), torch.mul(torch.max(-diff, torch.zeros(1)), self.c_h))
diff = torch.norm(diff)
diff = torch.sum(diff)
return diff
class CostFunction(nn.Module):
def __init__(self, c_p, c_h):
super().__init__()
self.c_p = c_p
self.c_h = c_h
def forward(self, y, d):
'''
y: prediction, size = (n_product, n_obs)
d: actual sales, size = (n_product, n_obs)
'''
cost = torch.add(y, -d)
cost = torch.add(torch.mul(torch.max(cost, torch.zeros(1)), self.c_p), torch.mul(torch.max(-cost, torch.zeros(1)), self.c_h))
cost = torch.sum(cost)
return cost | [
4,
5,
6,
7,
8
] |
1,402 | 22909e41e4f9ad0280c22ec11ecfbccff87efae1 | <mask token>
| <mask token>
if __name__ == '__main__':
cases = sys.stdin.readline()
for i in range(int(cases)):
sys.stdin.readline()
lineas, columnas = sys.stdin.readline().strip().split(' ')
lineas = int(lineas)
columnas = int(columnas)
list_lines = []
for linea in range(lineas):
list_lines.append(list(sys.stdin.readline().strip().lower()))
numWords = int(sys.stdin.readline().strip())
list_words = []
for word in range(numWords):
list_words.append(list(sys.stdin.readline().strip().lower()))
for word in list_words:
palEncont = False
for fila in range(lineas):
for colum in range(columnas):
if list_lines[fila][colum] == word[0]:
tamPalab = len(word)
cont = 0
punt = 0
while cont < tamPalab:
if colum + punt < columnas and list_lines[fila][
colum + punt] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum + punt < columnas and fila + punt <
lineas and list_lines[fila + punt][colum +
punt] == word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if fila + punt < lineas and list_lines[fila + punt
][colum] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum - punt >= 0 and fila + punt < lineas and
list_lines[fila + punt][colum - punt] ==
word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if colum - punt >= 0 and list_lines[fila][colum -
punt] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum - punt >= 0 and fila - punt >= 0 and
list_lines[fila - punt][colum - punt] ==
word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if fila - punt >= 0 and list_lines[fila - punt][
colum] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum + punt < columnas and fila - punt >=
0 and list_lines[fila - punt][colum + punt] ==
word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
if palEncont:
break
if palEncont:
break
if i < int(cases) - 1:
print()
| import sys
if __name__ == '__main__':
cases = sys.stdin.readline()
for i in range(int(cases)):
sys.stdin.readline()
lineas, columnas = sys.stdin.readline().strip().split(' ')
lineas = int(lineas)
columnas = int(columnas)
list_lines = []
for linea in range(lineas):
list_lines.append(list(sys.stdin.readline().strip().lower()))
numWords = int(sys.stdin.readline().strip())
list_words = []
for word in range(numWords):
list_words.append(list(sys.stdin.readline().strip().lower()))
for word in list_words:
palEncont = False
for fila in range(lineas):
for colum in range(columnas):
if list_lines[fila][colum] == word[0]:
tamPalab = len(word)
cont = 0
punt = 0
while cont < tamPalab:
if colum + punt < columnas and list_lines[fila][
colum + punt] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum + punt < columnas and fila + punt <
lineas and list_lines[fila + punt][colum +
punt] == word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if fila + punt < lineas and list_lines[fila + punt
][colum] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum - punt >= 0 and fila + punt < lineas and
list_lines[fila + punt][colum - punt] ==
word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if colum - punt >= 0 and list_lines[fila][colum -
punt] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum - punt >= 0 and fila - punt >= 0 and
list_lines[fila - punt][colum - punt] ==
word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if fila - punt >= 0 and list_lines[fila - punt][
colum] == word[cont]:
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
cont = 0
punt = 0
while cont < tamPalab:
if (colum + punt < columnas and fila - punt >=
0 and list_lines[fila - punt][colum + punt] ==
word[cont]):
cont += 1
punt += 1
else:
break
if cont == tamPalab:
print('' + str(fila + 1) + ' ' + str(colum + 1))
palEncont = True
break
if palEncont:
break
if palEncont:
break
if i < int(cases) - 1:
print()
| import sys
if __name__ == '__main__':
cases = sys.stdin.readline()
for i in range(int(cases)):
sys.stdin.readline()
lineas, columnas = sys.stdin.readline().strip().split(" ")
lineas = int(lineas)
columnas = int(columnas)
list_lines = []
for linea in range(lineas):
list_lines.append(list(sys.stdin.readline().strip().lower()))
numWords = int(sys.stdin.readline().strip())
list_words = []
for word in range(numWords):
list_words.append(list(sys.stdin.readline().strip().lower()))
for word in list_words:
palEncont = False
for fila in range(lineas):
for colum in range(columnas):
if list_lines[fila][colum] == word[0]:
tamPalab = len(word)
#Centro -> Derecha
cont = 0
punt = 0
while( cont < tamPalab ):
if colum+punt < columnas and list_lines[fila][colum+punt] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Abajo-Derecha
cont = 0
punt = 0
while( cont < tamPalab ):
if colum+punt < columnas and fila+punt < lineas and list_lines[fila+punt][colum+punt] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Abajo
cont = 0
punt = 0
while( cont < tamPalab ):
if fila+punt < lineas and list_lines[fila+punt][colum] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Abajo-Izquierda
cont = 0
punt = 0
while( cont < tamPalab ):
if colum-punt >= 0 and fila+punt < lineas and list_lines[fila+punt][colum-punt] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Izquierda
cont = 0
punt = 0
while( cont < tamPalab ):
if colum-punt >= 0 and list_lines[fila][colum-punt] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Arriba-Izquierda
cont = 0
punt = 0
while( cont < tamPalab ):
if colum-punt >= 0 and fila-punt >= 0 and list_lines[fila-punt][colum-punt] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Arriba
cont = 0
punt = 0
while( cont < tamPalab ):
if fila-punt >= 0 and list_lines[fila-punt][colum] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
#Centro -> Arriba-Derecha
cont = 0
punt = 0
while( cont < tamPalab ):
if colum+punt < columnas and fila-punt >= 0 and list_lines[fila-punt][colum+punt] == word[cont]:
cont += 1
punt += 1
else:
break
if( cont == tamPalab ):
print( "" + str(fila+1) + " " + str(colum+1) )
palEncont = True
break
if palEncont:
break
if palEncont:
break;
if i < int(cases)-1:
print()
| null | [
0,
1,
2,
3
] |
1,403 | a3301180e53da4a6970c082e72d8721b29dcae2e | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# importing regular stuff
import os
import sys
import thread
import threading
import time
import datetime
from datetime import datetime
import random
import filecmp
import ConfigParser
import socket
#my stuff will go here
import include.action as action
import include.logreader as logreader
import include.command as command
import include.logger as log
import include.database as database
import include.timetrack as timetrack
#### code start ####
legit = True
serverstop = False
#### version ####
v = "3.6-revival"
print "Starting up MineMon "+v
time.sleep(0.2)
print "Author: Oscar Carlberg"
#### Load settings ####
setting_file = sys.argv[1]
config = ConfigParser.RawConfigParser()
config.read(setting_file)
#### Connect to MC rcon ####
mchost = config.get('config', 'rhost')
mcport = config.get('config', 'rport')
mcpwd = config.get('config', 'rpass')
#### Connect to MySQL ####
myhost = config.get('config', 'mysqlhost')
myuser = config.get('config', 'mysqluser')
mypass = config.get('config', 'mysqlpass')
mydb = config.get('config', 'mysqldb')
database.settings(myhost, myuser, mypass, mydb)
#### some settings-var ####
mcpath = config.get('config', 'mcpath')
mapurl = config.get('config', 'mapurl')
helpurl = config.get('config', 'helpurl')
screen = config.get('config', 'screen')
mc_mem = config.get('config', 'mc_mem')
gmail = config.get('config', 'gmail')
gpw = config.get('config', 'gmailpw')
mailrcvr = config.get('config', 'sendto')
#### announce that i'm running ####
try:
action.connect(mchost, mcport, mcpwd)
except:
print "Coult not connect to Minecraft Rcon!"
sys.exit()
action.load(gmail, gpw, mailrcvr, screen, mc_mem)
action.say("§aMinecraft Monitor Version "+v+" now running!", 1)
action.say("§aType !help for available commands", 0)
ops = action.load_op(mcpath)
timetrk=timetrack.playtime()
#### check if enabled & op func ####
def enabled(onoroff):
#Check if regular command or feature
if "!" in onoroff:
setting = database.check_enabled_command(onoroff)
#If not enabled say so.
if not setting:
action.say("This command has been disabled for this world!", 0)
return setting
else:
try:
setting = config.get('config', onoroff)
except:
setting = "disabled"
print "NO setting entry for "+onoroff+", disabled."
if "enabled" in setting:
return True
else:
action.say("This command has been disabled for this world!", 0)
return False
def silent_enabled(onoroff):
try:
setting = config.get('config', onoroff)
except:
setting = "disabled"
print "NO setting entry for "+onoroff+", disabled."
if "enabled" in setting:
return True
else:
return False
def check_op(name, command):
op = database.check_command_op(command)
#If commmand does not need op, return OK
if not op:
return True
else:
#else, check if user is op, and return true
if name.lower() in ops:
return True
#if not, deny.
else:
action.say("This command is not allowed for non-op's.", 0)
def nick_washer(nick):
while "§" in nick:
nick = nick.replace("§1", "")
nick = nick.replace("§2", "")
nick = nick.replace("§3", "")
nick = nick.replace("§4", "")
nick = nick.replace("§5", "")
nick = nick.replace("§6", "")
nick = nick.replace("§7", "")
nick = nick.replace("§8", "")
nick = nick.replace("§9", "")
nick = nick.replace("§a", "")
nick = nick.replace("§b", "")
nick = nick.replace("§c", "")
nick = nick.replace("§d", "")
nick = nick.replace("§e", "")
nick = nick.replace("§f", "")
nick = nick.replace("§r", "")
#print "Washed: "+nick
return nick
#### Trigger on chattlog stuff ####
def trigger(name):
global serverstop
if "!help" in chatlog:
if enabled("!help"):
if check_op(name, "!help"):
helpcmnd = command.help(helpurl, chatlog)
log.save2(timestamp, "SYSTEM", "!help", name, "] [", helpcmnd)
elif "!sheen" in chatlog:
if enabled("!sheen"):
if check_op(name, "!sheen"):
command.sheen()
log.save(timestamp, "TEXT", "!sheen", name)
elif "joined the game" in chatlog and not "[Rcon]" in chatlog:
if enabled("login_manner"):
player = command.login(chatlog, v, helpurl)
log.save(timestamp, "GREEN", "Login:", player)
elif "left the game" in chatlog and not "[Rcon]" in chatlog:
if enabled("logout_manner"):
player = command.logout(chatlog)
log.save(timestamp, "RED", "Logout:", player)
elif "!hax" in chatlog and not "[Rcon]" in chatlog:
if enabled("!hax"):
if check_op(name, "!hax"):
command.hax(name)
log.save(timestamp, "SYSTEM", "!hax", name)
elif "!unhax" in chatlog and not "[Rcon]" in chatlog:
if enabled("!unhax"):
if check_op(name, "!unhax"):
command.unhax(name)
log.save(timestamp, "SYSTEM", "!unhax", name)
elif "!adv" in chatlog and not "[Rcon]" in chatlog:
if enabled("!adv"):
if check_op(name, "!adv"):
command.adv(name)
log.save(timestamp, "SYSTEM", "!adv", name)
elif "!day" in chatlog:
if enabled("!day"):
if check_op(name, "!day"):
command.day()
log.save(timestamp, "SYSTEM", "!day", name)
elif "!night" in chatlog:
if enabled("!night"):
if check_op(name, "!night"):
command.night()
log.save(timestamp, "SYSTEM", "!night", name)
elif "!tp" in chatlog and not "[Rcon]" in chatlog:
if enabled("!tp"):
if check_op(name, "!tp"):
who = command.tp(name, chatlog)
log.save2(timestamp, "TEXT", "!tp", name, "] -> [", who)
elif "!pull" in chatlog and not "[Rcon]" in chatlog:
if enabled("!pull"):
if check_op(name, "!pull"):
who = command.pull(name, chatlog)
log.save2(timestamp, "TEXT", "!pull", name, "] <- [", who)
elif "!map" in chatlog:
if enabled("!map"):
if check_op(name, "!map"):
command.map(mapurl)
log.save(timestamp, "SYSTEM", "!map", name)
elif "!version" in chatlog and not "[Rcon]" in chatlog:
if enabled("!version"):
if check_op(name, "!version"):
command.version(v)
log.save(timestamp, "SYSTEM", "!version", name)
elif "!list" in chatlog:
action.say("Deprecated. Press Tab on your keyboard", 0)
elif "!roll" in chatlog and not "[Rcon]" in chatlog:
if enabled("!roll"):
if check_op(name, "!roll"):
roll = command.roll(name)
log.save2(timestamp, "TEXT", "!roll", name, "] [", roll)
elif "!rain" in chatlog and not "[Rcon]" in chatlog:
if enabled("!rain"):
if check_op(name, "!rain"):
command.rain()
log.save(timestamp, "SYSTEM", "!rain", name)
elif "!xp" in chatlog and not "[Rcon]" in chatlog:
if enabled("!xp"):
if check_op(name, "!xp"):
command.xp(name)
log.save(timestamp, "TEXT", "!xp", name)
elif "!kit" in chatlog and not "[Rcon]" in chatlog:
if enabled("!kit"):
if check_op(name, "!kit"):
command.kit(name)
log.save(timestamp, "TEXT", "!kit", name)
elif "!leatherset" in chatlog and not "[Rcon]" in chatlog:
if enabled("!leatherset"):
if check_op(name, "!leatherset"):
command.leatherset(name)
log.save(timestamp, "TEXT", "!leatherset", name)
elif "!diamondset" in chatlog and not "[Rcon]" in chatlog:
if enabled("!diamondset"):
if check_op(name, "!diamondset"):
command.diamondset(name)
log.save(timestamp, "TEXT", "!diamondset", name)
elif "!bow" in chatlog and not "[Rcon]" in chatlog:
if enabled("!bow"):
if check_op(name, "!bow"):
command.bow(name)
log.save(timestamp, "TEXT", "!bow", name)
elif "!train" in chatlog and not "[Rcon]" in chatlog:
if enabled("!train"):
if check_op(name, "!train"):
command.train(name)
log.save(timestamp, "TEXT", "!train", name)
elif "!sleep" in chatlog and not "[Rcon]" in chatlog:
if enabled("!sleep"):
if check_op(name, "!sleep"):
command.sleep(name)
log.save(timestamp, "TEXT", "!sleep", name)
elif "!rail" in chatlog and not "[Rcon]" in chatlog:
if enabled("!rail"):
if check_op(name, "!rail"):
command.rail(name)
log.save(timestamp, "TEXT", "!rail", name)
elif "!food" in chatlog and not "[Rcon]" in chatlog:
if enabled("!food"):
if check_op(name, "!food"):
command.food(name)
log.save(timestamp, "TEXT", "!food", name)
elif "!item" in chatlog and not "[Rcon]" in chatlog:
if enabled("!item"):
if check_op(name, "!item"):
item = command.item(name, chatlog)
log.save2(timestamp, "TEXT", "!item", name, "] [", item)
elif "!restart" in chatlog and not "[Rcon]" in chatlog:
if enabled("!restart"):
if check_op(name, "!restart"):
command.restart()
log.save(timestamp, "SYSTEM", "!restart", name)
elif "!monsters" in chatlog:
if enabled("!monsters"):
if check_op(name, "!monsters"):
onoff = command.monsters(mcpath)
log.save2(timestamp, "SYSTEM", "!monsters", name, "] [", onoff)
elif "!update" in chatlog:
if enabled("!update"):
if check_op(name, "!update") or "Banned Player" in chatlog:
status = command.update(mcpath, mcport)
log.save2(timestamp, "SYSTEM", "!update", name, "] [", status)
elif "!temphax" in chatlog and not "[Rcon]" in chatlog:
if enabled("!temphax"):
if check_op(name, "!temphax"):
who = command.temphax(chatlog)
log.save2(timestamp, "TEXT", "!temphax", name, "] -> [", who)
elif "!report" in chatlog and not "[Rcon]" in chatlog:
if enabled("!report"):
if check_op(name, "!report"):
command.mail(name, chatlog, False)
log.save(timestamp, "SYSTEM", "!report", name)
elif "!played" in chatlog and not "[Rcon]" in chatlog:
if enabled("!played"):
if check_op(name, "!played"):
print "Checking played with name:"+ str(name)
command.played(name)
log.save(timestamp, "TEXT", "!played", name)
elif "!world" in chatlog and not "[Rcon]" in chatlog:
if enabled("!world"):
if check_op(name, "!world"):
success = command.world(name, chatlog, mcpath)
if success:
log.save2(timestamp, "SYSTEM", "!world", name, "] [", success)
elif "!clear" in chatlog and not "[Rcon]" in chatlog:
if enabled("!clear"):
if check_op(name, "!clear"):
command.clear(name)
log.save(timestamp, "TEXT", "!clear", name)
elif "!spawn" in chatlog and not "[Rcon]" in chatlog:
if enabled("!spawn"):
if check_op(name, "!spawn"):
command.spawn(name)
log.save(timestamp, "TEXT", "!spawn", name)
elif "!gateway" in chatlog and not "[Rcon]" in chatlog:
if enabled("!gateway"):
if check_op(name, "!gateway"):
gw = command.gateway(name, chatlog)
log.save2(timestamp, "TEXT", "!gateway", name, gw[0], gw[1])
elif "!dial" in chatlog and not "[Rcon]" in chatlog:
if enabled("!dial"):
if check_op(name, "!dial"):
dest = command.dial(name, chatlog)
log.save2(timestamp, "TEXT", "!dial", name, "] -> [", dest)
elif "!warp" in chatlog and not "[Rcon]" in chatlog:
if enabled("!warp"):
if check_op(name, "!warp"):
dest = command.warp(name, chatlog)
log.save2(timestamp, "TEXT", "!warp", name, "] -> [", dest)
elif "Opped" in chatlog or "De-opped" in chatlog:
global ops
ops = action.load_op(mcpath)
action.say("Detecting change in OP's, refreshing list!", 0)
log.save(timestamp, "SYSTEM", "OP-refresh", "SYSTEM")
elif "[INFO] Done (" in chatlog or "[INFO] RCON running on" in chatlog:
print "< STARTING SERVER > - Reconnecting to rcon"
action.connect(mchost, mcport, mcpwd)
log.raw_log("< STARTING SERVER >")
serverstop = False
global timetrk
if silent_enabled("timetrack"):
timetrk=timetrack.playtime()
timetrk.start()
print "< Playtime-tracking started >"
elif "[INFO] Saving chunks" in chatlog and serverstop == False:
print "< STOPPING SERVER >"
log.raw_log("< STOPPING SERVER >")
serverstop = True
if silent_enabled("timetrack"):
try:
timetrk.stop()
while timetrk.isAlive():
time.sleep(1)
del timetrk
print "< Playtime-tracking stopped >"
except:
print "Could not stop timetrack!"
log.raw_log("Could not stop timetrack!")
#old non-supported commands
elif "!tnt" in chatlog or "!stone" in chatlog or "!wood" in chatlog or "!dirt" in chatlog:
action.say("Deprecated command. use !hax or !item", 0)
# Un-commented since mc console now logs severe @ disconnect
# elif "[SEVERE]" in chatlog or "(SourceFile:" in chatlog and not "<" in chatlog:
# command.mail("SYSTEM", "MINECRAFT SEVERE EXCEPTION - TRYING TO RESTART", True)
# action.say("§c[FATAL]: Minecraft Server encountered a serious error.", 4)
# action.say("§c[WARNING] MineMon will try to restart the server as a precaution", 3)
# time.sleep(2)
# command.restart()
elif "qwophidden" in chatlog:
command.late()
else:
if '<' in chatlog:
log.save_chat(name, chatlog)
#### Name extractor
def extract_name(player):
# extrahera namn
player = player[34:]
bort = '>'
player = player.split(bort, 1)[0]
return player
#### Mainloop ####
def func_checkLastLine(lastLine):
global chatlog
global timestamp
chatlog = lastLine.replace("\n", "")
timestamp = datetime.now()
name = extract_name(lastLine)
name = nick_washer(name)
#print "running trigger on name: "+str(name)
trigger(name)
#### start of S3rR1 hax, i dont even what is this ####
class newLoopingThread (threading.Thread):
def __init__(self, threadID):
self.threadID = threadID
threading.Thread.__init__(self)
def run(self):
func_loop()
def func_loop():
tempList = fileList
while running:
time.sleep(0.5)
fileHandle = open(logfile, 'r')
newLines = fileHandle.readlines()
if newLines != tempList and tempList != None:
tempList = newLines
newList = [item for item in tempList if item != '\n']
if len(newList) > 0: func_checkLastLine(newList[len(newList) - 1])
def func_getLastLine():
fileHandle = open(logfile, 'r')
allLines = fileHandle.readlines()
allLines = [item for item in allLines if item != '\n']
return allLines[len(allLines) - 1]
#### Start application
running = True
logfile = mcpath + "logs/latest.log"
fileHandle = open(logfile, 'r')
fileList = fileHandle.readlines()
loopThread = newLoopingThread(1)
loopThread.start()
if silent_enabled("timetrack"):
print "Timetracking enabled, starting timer"
timetrk.start()
#log the start
log.raw_log("Minecraft Monitor Version "+v+" started!")
#### exit ####
print "press any key to exit"
raw_input()
running = False
print "Waiting for looping thread to stop..."
while loopThread.isAlive(): time.sleep(0.5)
if enabled("timetrack"):
try:
timetrk.stop()
time.sleep(1)
except:
print "Could not stop timetracking, although its enabled - perhaps MC is not running?"
action.say("§cMinecraft Monitor Version "+v+" stopped!", 0)
#log the shutdown
log.raw_log("Minecraft Monitor Version "+v+" stopped!")
| null | null | null | null | [
0
] |
1,404 | 8928c2ff49cbad2a54252d41665c10437a471eeb | <mask token>
def same_folders(src1, src2):
"""Assert if folder contains diffrent files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extention - json/yaml.
Open:
- Choose file by file extention.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /
'classifier-sample_new.json')
else:
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /
'playbook-sample_test1.yml')
old_data = file.read_text()
file.write_text('{123dfdsf,}\nfdsfdsf')
try:
yield
finally:
file.write_text(old_data)
<mask token>
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /
PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=
ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /
'Sample01')
<mask token>
| <mask token>
def same_folders(src1, src2):
"""Assert if folder contains diffrent files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extention - json/yaml.
Open:
- Choose file by file extention.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /
'classifier-sample_new.json')
else:
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /
'playbook-sample_test1.yml')
old_data = file.read_text()
file.write_text('{123dfdsf,}\nfdsfdsf')
try:
yield
finally:
file.write_text(old_data)
<mask token>
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /
PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=
ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /
'Sample01')
<mask token>
@pytest.mark.parametrize(argnames='suffix', argvalues=['yml', 'json'])
def test_malformed_file_failue(suffix: str, mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
with destroy_by_ext(suffix):
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
def test_duplicate_file_failure(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
with duplicate_file():
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
| <mask token>
def same_folders(src1, src2):
"""Assert if folder contains diffrent files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extention - json/yaml.
Open:
- Choose file by file extention.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /
'classifier-sample_new.json')
else:
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /
'playbook-sample_test1.yml')
old_data = file.read_text()
file.write_text('{123dfdsf,}\nfdsfdsf')
try:
yield
finally:
file.write_text(old_data)
<mask token>
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /
PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=
ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /
'Sample01')
def test_create_content_artifacts(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 0
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')
<mask token>
@pytest.mark.parametrize(argnames='suffix', argvalues=['yml', 'json'])
def test_malformed_file_failue(suffix: str, mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
with destroy_by_ext(suffix):
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
def test_duplicate_file_failure(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
with duplicate_file():
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
| <mask token>
def same_folders(src1, src2):
"""Assert if folder contains diffrent files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extention - json/yaml.
Open:
- Choose file by file extention.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'Classifiers' /
'classifier-sample_new.json')
else:
file = (TEST_CONTENT_REPO / 'Packs' / 'Sample01' / 'TestPlaybooks' /
'playbook-sample_test1.yml')
old_data = file.read_text()
file.write_text('{123dfdsf,}\nfdsfdsf')
try:
yield
finally:
file.write_text(old_data)
@contextmanager
def duplicate_file():
"""Create duplicate file name in content repository.
Open:
- Create copy of file in content.
Close:
- Delete copied file.
"""
file = (TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / TEST_PLAYBOOKS_DIR /
'playbook-sample_test1.yml')
new_file = (TEST_CONTENT_REPO / PACKS_DIR / 'Sample02' /
TEST_PLAYBOOKS_DIR / 'playbook-sample_test1.yml')
try:
copyfile(file, new_file)
yield
finally:
new_file.unlink()
@contextmanager
def temp_dir():
"""Create Temp direcotry for test.
Open:
- Create temp directory.
Close:
- Delete temp directory.
"""
temp = UNIT_TEST_DATA / 'temp'
try:
temp.mkdir(parents=True, exist_ok=True)
yield temp
finally:
rmtree(temp)
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, Pack, create_dirs, dump_pack
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO /
PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01', src2=
ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' /
'Sample01')
def test_create_content_artifacts(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 0
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')
def test_create_private_content_artifacts(private_repo):
from demisto_sdk.commands.common.content import Content
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
config.content = Content(private_repo)
exit_code = create_content_artifacts(artifact_manager=config)
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'private')
assert exit_code == 0
@pytest.mark.parametrize(argnames='suffix', argvalues=['yml', 'json'])
def test_malformed_file_failue(suffix: str, mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
with destroy_by_ext(suffix):
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
def test_duplicate_file_failure(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import ArtifactsManager, create_content_artifacts
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp, content_version=
'6.0.0', zip=False, suffix='', cpus=1, packs=False)
with duplicate_file():
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
| from contextlib import contextmanager
from filecmp import cmp, dircmp
from shutil import copyfile, copytree, rmtree
import pytest
from demisto_sdk.commands.common.constants import PACKS_DIR, TEST_PLAYBOOKS_DIR
from demisto_sdk.commands.common.tools import src_root
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_PRIVATE_CONTENT_REPO = TEST_DATA / 'private_content_slim'
UNIT_TEST_DATA = (src_root() / 'commands' / 'create_artifacts' / 'tests' / 'data')
COMMON_SERVER = UNIT_TEST_DATA / 'common_server'
ARTIFACTS_EXPEXTED_RESULTS = TEST_DATA / 'artifacts'
def same_folders(src1, src2):
"""Assert if folder contains diffrent files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extention - json/yaml.
Open:
- Choose file by file extention.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = TEST_CONTENT_REPO / "Packs" / "Sample01" / "Classifiers" / "classifier-sample_new.json"
else:
file = TEST_CONTENT_REPO / "Packs" / "Sample01" / "TestPlaybooks" / "playbook-sample_test1.yml"
old_data = file.read_text()
file.write_text("{123dfdsf,}\nfdsfdsf")
try:
yield
finally:
file.write_text(old_data)
@contextmanager
def duplicate_file():
"""Create duplicate file name in content repository.
Open:
- Create copy of file in content.
Close:
- Delete copied file.
"""
file = TEST_CONTENT_REPO / PACKS_DIR / "Sample01" / TEST_PLAYBOOKS_DIR / "playbook-sample_test1.yml"
new_file = TEST_CONTENT_REPO / PACKS_DIR / "Sample02" / TEST_PLAYBOOKS_DIR / "playbook-sample_test1.yml"
try:
copyfile(file, new_file)
yield
finally:
new_file.unlink()
@contextmanager
def temp_dir():
"""Create Temp direcotry for test.
Open:
- Create temp directory.
Close:
- Delete temp directory.
"""
temp = UNIT_TEST_DATA / 'temp'
try:
temp.mkdir(parents=True, exist_ok=True)
yield temp
finally:
rmtree(temp)
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
# Mock git working directory
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, Pack, create_dirs, dump_pack)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO / PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01',
src2=ARTIFACTS_EXPEXTED_RESULTS / 'content' / 'content_packs' / 'Sample01')
def test_create_content_artifacts(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 0
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'content')
def test_create_private_content_artifacts(private_repo):
from demisto_sdk.commands.common.content import Content
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
config.content = Content(private_repo)
exit_code = create_content_artifacts(artifact_manager=config)
assert same_folders(temp, ARTIFACTS_EXPEXTED_RESULTS / 'private')
assert exit_code == 0
@pytest.mark.parametrize(argnames="suffix", argvalues=["yml", "json"])
def test_malformed_file_failue(suffix: str, mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
with destroy_by_ext(suffix):
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
def test_duplicate_file_failure(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, create_content_artifacts)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
with duplicate_file():
exit_code = create_content_artifacts(artifact_manager=config)
assert exit_code == 1
| [
6,
8,
9,
12,
15
] |
1,405 | 28233cb4a56ee805e66f34e6abd49137503d5f7b | <mask token>
| <mask token>
class Article(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Article(models.Model):
created = models.DateTimeField(auto_now_add=True)
published = models.DateTimeField(null=True, blank=True)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
content = models.TextField()
| from django.db import models
class Article(models.Model):
created = models.DateTimeField(auto_now_add=True)
published = models.DateTimeField(null=True, blank=True)
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
content = models.TextField()
| null | [
0,
1,
2,
3
] |
1,406 | e59763991974f4bfcd126879dd9aabd44bd89419 | <mask token>
def get_jwst_coords(outDir):
log.info('Creating and saving aperture')
jwst_pup = poppy.MultiHexagonAperture(rings=2, flattoflat=FLAT_TO_FLAT)
jwst_pup.display(colorbar=False)
plt.title('JWST telescope pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_aperture.pdf'))
plt.clf()
jwst_pup.display(colorbar=False)
plt.title('JWST telescope exit pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
ycen *= -1
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_exit_pupil.pdf'))
pupil_dir = jwst_pup.sample(wavelength=WVLN, npix=IM_SIZE_PUPIL,
grid_size=FLAT_DIAM, return_scale=True)
util.write_fits(pupil_dir[0], os.path.join(outDir, 'pupil.fits'))
seg_position = np.zeros((NB_SEG, 2))
for i in range(NB_SEG + 1):
if i == 0:
continue
else:
seg_position[i - 1, 1], seg_position[i - 1, 0
] = jwst_pup._hex_center(i)
seg_position[i - 1, 1] *= -1
return seg_position
def nircam_coro(filter, fpm, ppm, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Create NIRCam image with specified filter and coronagraph, and aberration input.
Parameters
----------
filter : string
Filter name
fpm : string
Name of focal-plane mask
ppm : string
Name of Lyot stop (ppm = "pupil-plane mask")
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc.image_mask = fpm
nc.pupil_mask = ppm
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def nircam_nocoro(filter, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Parameters
----------
filter : string
Filter name
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def set_up_nircam():
"""Return a configured instance of the NIRCam simulator on JWST.
Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros
the OTE.
Returns
-------
Tuple of NIRCam instance, and its OTE
"""
nircam = webbpsf.NIRCam()
nircam.include_si_wfe = False
nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name')
nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop')
nircam, ote = webbpsf.enable_adjustable_ote(nircam)
ote.zero(zero_original=True)
return nircam, ote
def set_up_cgi():
"""Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
Returns
-------
CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in ** 2 != nbactuator:
error_msg = (
f'The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!'
)
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi
def display_ote_and_psf(inst, ote, opd_vmax=500, psf_vmax=0.1, title=
'OPD and PSF', **kwargs):
"""Display OTE and PSF of a JWST instrument next to each other.
Adapted from:
https://github.com/spacetelescope/webbpsf/blob/develop/notebooks/Simulated%20OTE%20Mirror%20Move%20Demo.ipynb
Parameters
----------
inst : WebbPSF instrument instance
for example: webbpsf.NIRCam()
ote :
OTE of inst, usually obtained with: instrument, ote = webbpsf.enable_adjustable_ote(instrument)
opd_vmax : float
max display value for the OPD
psf_vmax : float
max display valued for PSF
title : string
plot title
kwargs
"""
psf = inst.calc_psf(nlambda=1)
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(121)
ote.display_opd(ax=ax1, vmax=opd_vmax, colorbar_orientation=
'horizontal', title='OPD with aberrated segments')
plt.subplot(122)
webbpsf.display_psf(psf, ext=2, vmax=psf_vmax, vmin=psf_vmax / 10000.0,
colorbar_orientation='horizontal', title='PSF simulation')
plt.suptitle(title, fontsize=16)
| <mask token>
try:
import webbpsf
os.environ['WEBBPSF_PATH'] = CONFIG_PASTIS.get('local', 'webbpsf_data_path'
)
WSS_SEGS = webbpsf.constants.SEGNAMES_WSS_ORDER
except ImportError:
log.info('WebbPSF was not imported.')
<mask token>
def get_jwst_coords(outDir):
log.info('Creating and saving aperture')
jwst_pup = poppy.MultiHexagonAperture(rings=2, flattoflat=FLAT_TO_FLAT)
jwst_pup.display(colorbar=False)
plt.title('JWST telescope pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_aperture.pdf'))
plt.clf()
jwst_pup.display(colorbar=False)
plt.title('JWST telescope exit pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
ycen *= -1
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_exit_pupil.pdf'))
pupil_dir = jwst_pup.sample(wavelength=WVLN, npix=IM_SIZE_PUPIL,
grid_size=FLAT_DIAM, return_scale=True)
util.write_fits(pupil_dir[0], os.path.join(outDir, 'pupil.fits'))
seg_position = np.zeros((NB_SEG, 2))
for i in range(NB_SEG + 1):
if i == 0:
continue
else:
seg_position[i - 1, 1], seg_position[i - 1, 0
] = jwst_pup._hex_center(i)
seg_position[i - 1, 1] *= -1
return seg_position
def nircam_coro(filter, fpm, ppm, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Create NIRCam image with specified filter and coronagraph, and aberration input.
Parameters
----------
filter : string
Filter name
fpm : string
Name of focal-plane mask
ppm : string
Name of Lyot stop (ppm = "pupil-plane mask")
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc.image_mask = fpm
nc.pupil_mask = ppm
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def nircam_nocoro(filter, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Parameters
----------
filter : string
Filter name
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def set_up_nircam():
"""Return a configured instance of the NIRCam simulator on JWST.
Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros
the OTE.
Returns
-------
Tuple of NIRCam instance, and its OTE
"""
nircam = webbpsf.NIRCam()
nircam.include_si_wfe = False
nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name')
nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop')
nircam, ote = webbpsf.enable_adjustable_ote(nircam)
ote.zero(zero_original=True)
return nircam, ote
def set_up_cgi():
"""Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
Returns
-------
CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in ** 2 != nbactuator:
error_msg = (
f'The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!'
)
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi
def display_ote_and_psf(inst, ote, opd_vmax=500, psf_vmax=0.1, title=
'OPD and PSF', **kwargs):
"""Display OTE and PSF of a JWST instrument next to each other.
Adapted from:
https://github.com/spacetelescope/webbpsf/blob/develop/notebooks/Simulated%20OTE%20Mirror%20Move%20Demo.ipynb
Parameters
----------
inst : WebbPSF instrument instance
for example: webbpsf.NIRCam()
ote :
OTE of inst, usually obtained with: instrument, ote = webbpsf.enable_adjustable_ote(instrument)
opd_vmax : float
max display value for the OPD
psf_vmax : float
max display valued for PSF
title : string
plot title
kwargs
"""
psf = inst.calc_psf(nlambda=1)
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(121)
ote.display_opd(ax=ax1, vmax=opd_vmax, colorbar_orientation=
'horizontal', title='OPD with aberrated segments')
plt.subplot(122)
webbpsf.display_psf(psf, ext=2, vmax=psf_vmax, vmin=psf_vmax / 10000.0,
colorbar_orientation='horizontal', title='PSF simulation')
plt.suptitle(title, fontsize=16)
| <mask token>
log = logging.getLogger()
try:
import webbpsf
os.environ['WEBBPSF_PATH'] = CONFIG_PASTIS.get('local', 'webbpsf_data_path'
)
WSS_SEGS = webbpsf.constants.SEGNAMES_WSS_ORDER
except ImportError:
log.info('WebbPSF was not imported.')
NB_SEG = CONFIG_PASTIS.getint('JWST', 'nb_subapertures')
FLAT_TO_FLAT = CONFIG_PASTIS.getfloat('JWST', 'flat_to_flat')
WVLN = CONFIG_PASTIS.getfloat('JWST', 'lambda') * u.nm
IM_SIZE_PUPIL = CONFIG_PASTIS.getint('numerical', 'tel_size_px')
FLAT_DIAM = CONFIG_PASTIS.getfloat('JWST', 'flat_diameter') * u.m
IM_SIZE_E2E = CONFIG_PASTIS.getint('numerical', 'im_size_px_webbpsf')
def get_jwst_coords(outDir):
log.info('Creating and saving aperture')
jwst_pup = poppy.MultiHexagonAperture(rings=2, flattoflat=FLAT_TO_FLAT)
jwst_pup.display(colorbar=False)
plt.title('JWST telescope pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_aperture.pdf'))
plt.clf()
jwst_pup.display(colorbar=False)
plt.title('JWST telescope exit pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
ycen *= -1
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_exit_pupil.pdf'))
pupil_dir = jwst_pup.sample(wavelength=WVLN, npix=IM_SIZE_PUPIL,
grid_size=FLAT_DIAM, return_scale=True)
util.write_fits(pupil_dir[0], os.path.join(outDir, 'pupil.fits'))
seg_position = np.zeros((NB_SEG, 2))
for i in range(NB_SEG + 1):
if i == 0:
continue
else:
seg_position[i - 1, 1], seg_position[i - 1, 0
] = jwst_pup._hex_center(i)
seg_position[i - 1, 1] *= -1
return seg_position
def nircam_coro(filter, fpm, ppm, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Create NIRCam image with specified filter and coronagraph, and aberration input.
Parameters
----------
filter : string
Filter name
fpm : string
Name of focal-plane mask
ppm : string
Name of Lyot stop (ppm = "pupil-plane mask")
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc.image_mask = fpm
nc.pupil_mask = ppm
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def nircam_nocoro(filter, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Parameters
----------
filter : string
Filter name
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def set_up_nircam():
"""Return a configured instance of the NIRCam simulator on JWST.
Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros
the OTE.
Returns
-------
Tuple of NIRCam instance, and its OTE
"""
nircam = webbpsf.NIRCam()
nircam.include_si_wfe = False
nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name')
nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop')
nircam, ote = webbpsf.enable_adjustable_ote(nircam)
ote.zero(zero_original=True)
return nircam, ote
def set_up_cgi():
"""Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
Returns
-------
CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in ** 2 != nbactuator:
error_msg = (
f'The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!'
)
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi
def display_ote_and_psf(inst, ote, opd_vmax=500, psf_vmax=0.1, title=
'OPD and PSF', **kwargs):
"""Display OTE and PSF of a JWST instrument next to each other.
Adapted from:
https://github.com/spacetelescope/webbpsf/blob/develop/notebooks/Simulated%20OTE%20Mirror%20Move%20Demo.ipynb
Parameters
----------
inst : WebbPSF instrument instance
for example: webbpsf.NIRCam()
ote :
OTE of inst, usually obtained with: instrument, ote = webbpsf.enable_adjustable_ote(instrument)
opd_vmax : float
max display value for the OPD
psf_vmax : float
max display valued for PSF
title : string
plot title
kwargs
"""
psf = inst.calc_psf(nlambda=1)
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(121)
ote.display_opd(ax=ax1, vmax=opd_vmax, colorbar_orientation=
'horizontal', title='OPD with aberrated segments')
plt.subplot(122)
webbpsf.display_psf(psf, ext=2, vmax=psf_vmax, vmin=psf_vmax / 10000.0,
colorbar_orientation='horizontal', title='PSF simulation')
plt.suptitle(title, fontsize=16)
| <mask token>
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import logging
import poppy
from pastis.config import CONFIG_PASTIS
import pastis.util as util
log = logging.getLogger()
try:
import webbpsf
os.environ['WEBBPSF_PATH'] = CONFIG_PASTIS.get('local', 'webbpsf_data_path'
)
WSS_SEGS = webbpsf.constants.SEGNAMES_WSS_ORDER
except ImportError:
log.info('WebbPSF was not imported.')
NB_SEG = CONFIG_PASTIS.getint('JWST', 'nb_subapertures')
FLAT_TO_FLAT = CONFIG_PASTIS.getfloat('JWST', 'flat_to_flat')
WVLN = CONFIG_PASTIS.getfloat('JWST', 'lambda') * u.nm
IM_SIZE_PUPIL = CONFIG_PASTIS.getint('numerical', 'tel_size_px')
FLAT_DIAM = CONFIG_PASTIS.getfloat('JWST', 'flat_diameter') * u.m
IM_SIZE_E2E = CONFIG_PASTIS.getint('numerical', 'im_size_px_webbpsf')
def get_jwst_coords(outDir):
log.info('Creating and saving aperture')
jwst_pup = poppy.MultiHexagonAperture(rings=2, flattoflat=FLAT_TO_FLAT)
jwst_pup.display(colorbar=False)
plt.title('JWST telescope pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_aperture.pdf'))
plt.clf()
jwst_pup.display(colorbar=False)
plt.title('JWST telescope exit pupil')
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
ycen *= -1
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1))
plt.savefig(os.path.join(outDir, 'JWST_exit_pupil.pdf'))
pupil_dir = jwst_pup.sample(wavelength=WVLN, npix=IM_SIZE_PUPIL,
grid_size=FLAT_DIAM, return_scale=True)
util.write_fits(pupil_dir[0], os.path.join(outDir, 'pupil.fits'))
seg_position = np.zeros((NB_SEG, 2))
for i in range(NB_SEG + 1):
if i == 0:
continue
else:
seg_position[i - 1, 1], seg_position[i - 1, 0
] = jwst_pup._hex_center(i)
seg_position[i - 1, 1] *= -1
return seg_position
def nircam_coro(filter, fpm, ppm, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Create NIRCam image with specified filter and coronagraph, and aberration input.
Parameters
----------
filter : string
Filter name
fpm : string
Name of focal-plane mask
ppm : string
Name of Lyot stop (ppm = "pupil-plane mask")
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc.image_mask = fpm
nc.pupil_mask = ppm
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def nircam_nocoro(filter, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Parameters
----------
filter : string
Filter name
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
nc = webbpsf.NIRCam()
nc.filter = filter
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def set_up_nircam():
"""Return a configured instance of the NIRCam simulator on JWST.
Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros
the OTE.
Returns
-------
Tuple of NIRCam instance, and its OTE
"""
nircam = webbpsf.NIRCam()
nircam.include_si_wfe = False
nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name')
nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop')
nircam, ote = webbpsf.enable_adjustable_ote(nircam)
ote.zero(zero_original=True)
return nircam, ote
def set_up_cgi():
"""Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
Returns
-------
CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in ** 2 != nbactuator:
error_msg = (
f'The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!'
)
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi
def display_ote_and_psf(inst, ote, opd_vmax=500, psf_vmax=0.1, title=
'OPD and PSF', **kwargs):
"""Display OTE and PSF of a JWST instrument next to each other.
Adapted from:
https://github.com/spacetelescope/webbpsf/blob/develop/notebooks/Simulated%20OTE%20Mirror%20Move%20Demo.ipynb
Parameters
----------
inst : WebbPSF instrument instance
for example: webbpsf.NIRCam()
ote :
OTE of inst, usually obtained with: instrument, ote = webbpsf.enable_adjustable_ote(instrument)
opd_vmax : float
max display value for the OPD
psf_vmax : float
max display valued for PSF
title : string
plot title
kwargs
"""
psf = inst.calc_psf(nlambda=1)
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(121)
ote.display_opd(ax=ax1, vmax=opd_vmax, colorbar_orientation=
'horizontal', title='OPD with aberrated segments')
plt.subplot(122)
webbpsf.display_psf(psf, ext=2, vmax=psf_vmax, vmin=psf_vmax / 10000.0,
colorbar_orientation='horizontal', title='PSF simulation')
plt.suptitle(title, fontsize=16)
| """
This is a module containing convenience functions to create the JWST aperture and coronagraphic images with WebbPSF.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import logging
import poppy
from pastis.config import CONFIG_PASTIS
import pastis.util as util
log = logging.getLogger()
try:
import webbpsf
# Setting to ensure that PyCharm finds the webbpsf-data folder. If you don't know where it is, find it with:
# webbpsf.utils.get_webbpsf_data_path()
# --> e.g.: >>source activate pastis >>ipython >>import webbpsf >>webbpsf.utils.get_webbpsf_data_path()
os.environ['WEBBPSF_PATH'] = CONFIG_PASTIS.get('local', 'webbpsf_data_path')
WSS_SEGS = webbpsf.constants.SEGNAMES_WSS_ORDER
except ImportError:
log.info('WebbPSF was not imported.')
NB_SEG = CONFIG_PASTIS.getint('JWST', 'nb_subapertures')
FLAT_TO_FLAT = CONFIG_PASTIS.getfloat('JWST', 'flat_to_flat')
WVLN = CONFIG_PASTIS.getfloat('JWST', 'lambda') * u.nm
IM_SIZE_PUPIL = CONFIG_PASTIS.getint('numerical', 'tel_size_px')
FLAT_DIAM = CONFIG_PASTIS.getfloat('JWST', 'flat_diameter') * u.m
IM_SIZE_E2E = CONFIG_PASTIS.getint('numerical', 'im_size_px_webbpsf')
def get_jwst_coords(outDir):
### Generate the pupil with segments and spiders
# Use poppy to create JWST aperture without spiders
log.info('Creating and saving aperture')
jwst_pup = poppy.MultiHexagonAperture(rings=2, flattoflat=FLAT_TO_FLAT) # Create JWST pupil without spiders
jwst_pup.display(colorbar=False) # Show pupil (will be saved to file)
plt.title('JWST telescope pupil')
# Number the segments
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1)) # -0.1 is for shifting the numbers closer to the segment centers
# Save a PDF version of the pupil
plt.savefig(os.path.join(outDir, 'JWST_aperture.pdf'))
# Since WebbPSF creates images by controlling the exit pupil,
# let's also create the exit pupil instead of the entrance pupil.
# I do this by flipping the y-coordinates of the segments.
plt.clf()
jwst_pup.display(colorbar=False) # Show pupil
plt.title('JWST telescope exit pupil')
# Number the segments
for i in range(NB_SEG + 1):
ycen, xcen = jwst_pup._hex_center(i)
ycen *= -1
plt.annotate(str(i), size='x-large', xy=(xcen - 0.1, ycen - 0.1)) # -0.1 is for shifting the number labels closer to the segment centers
# Save a PDF version of the exit pupil
plt.savefig(os.path.join(outDir, 'JWST_exit_pupil.pdf'))
# Get pupil as fits image
pupil_dir = jwst_pup.sample(wavelength=WVLN, npix=IM_SIZE_PUPIL, grid_size=FLAT_DIAM, return_scale=True)
# If the image size is equivalent to the total diameter of the telescope, we don't have to worry about sampling later
# But for the JWST case with poppy it makes such a small difference that I am skipping it for now
util.write_fits(pupil_dir[0], os.path.join(outDir, 'pupil.fits'))
### Get the coordinates of the central pixel of each segment
seg_position = np.zeros((NB_SEG, 2)) # holds x and y position of each central pixel
for i in range(NB_SEG + 1): # our pupil is still counting the central segment as seg 0, so we need to include it
# in the loop, however, we will just discard the values for the center
if i == 0: # Segment 0 is the central segment, which we want to skip and not put into seg_position
continue # Continues with the next iteration of the loop
else:
seg_position[i - 1, 1], seg_position[i - 1, 0] = jwst_pup._hex_center(i) # y, x = center position
seg_position[i - 1, 1] *= -1 # inverting the y-axis because we want to work with the EXIT PUPIL!!!
# Units are meters!!!
return seg_position
def nircam_coro(filter, fpm, ppm, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Create NIRCam image with specified filter and coronagraph, and aberration input.
Parameters
----------
filter : string
Filter name
fpm : string
Name of focal-plane mask
ppm : string
Name of Lyot stop (ppm = "pupil-plane mask")
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
# Set up NIRCam and coronagraph
nc = webbpsf.NIRCam()
nc.filter = filter
nc.image_mask = fpm
nc.pupil_mask = ppm
# Adjust OTE with aberrations
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False # set SI internal WFE to zero
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
# Calculate PSF
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def nircam_nocoro(filter, Aber_WSS):
"""-- Deprecated function still used in analytical PASTIS and some notebooks. --
Parameters
----------
filter : string
Filter name
Aber_WSS : list or array
list of Zernike coefficients ordered in WSS convention and in METERS
Returns
-------
psf_webbpsf : ndarray
PSF image
"""
# Create NIRCam object
nc = webbpsf.NIRCam()
# Set filter
nc.filter = filter
# Adjust OTE with aberrations
nc, ote = webbpsf.enable_adjustable_ote(nc)
nc.include_si_wfe = False # set SI internal WFE to zero
ote.reset()
ote.zero()
for i in range(NB_SEG):
seg = WSS_SEGS[i].split('-')[0]
ote._apply_hexikes_to_seg(seg, Aber_WSS[i, :])
# Calculate PSF
psf_nc = nc.calc_psf(oversample=1, fov_pixels=int(IM_SIZE_E2E), nlambda=1)
psf_webbpsf = psf_nc[1].data
return psf_webbpsf
def set_up_nircam():
"""Return a configured instance of the NIRCam simulator on JWST.
Sets up the Lyot stop and filter from the configfile, turns of science instrument (SI) internal WFE and zeros
the OTE.
Returns
-------
Tuple of NIRCam instance, and its OTE
"""
nircam = webbpsf.NIRCam()
nircam.include_si_wfe = False
nircam.filter = CONFIG_PASTIS.get('JWST', 'filter_name')
nircam.pupil_mask = CONFIG_PASTIS.get('JWST', 'pupil_plane_stop')
nircam, ote = webbpsf.enable_adjustable_ote(nircam)
ote.zero(zero_original=True) # https://github.com/spacetelescope/webbpsf/blob/96537c459996f682ac6e9af808809ca13fb85e87/webbpsf/opds.py#L1125
return nircam, ote
def set_up_cgi():
"""Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
Returns
-------
CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
# Set actuators numbesr
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in**2 != nbactuator:
error_msg = f"The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!"
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi
def display_ote_and_psf(inst, ote, opd_vmax=500, psf_vmax=0.1, title="OPD and PSF", **kwargs):
"""Display OTE and PSF of a JWST instrument next to each other.
Adapted from:
https://github.com/spacetelescope/webbpsf/blob/develop/notebooks/Simulated%20OTE%20Mirror%20Move%20Demo.ipynb
Parameters
----------
inst : WebbPSF instrument instance
for example: webbpsf.NIRCam()
ote :
OTE of inst, usually obtained with: instrument, ote = webbpsf.enable_adjustable_ote(instrument)
opd_vmax : float
max display value for the OPD
psf_vmax : float
max display valued for PSF
title : string
plot title
kwargs
"""
psf = inst.calc_psf(nlambda=1)
plt.figure(figsize=(12, 8))
ax1 = plt.subplot(121)
ote.display_opd(ax=ax1, vmax=opd_vmax, colorbar_orientation='horizontal', title='OPD with aberrated segments')
plt.subplot(122)
webbpsf.display_psf(psf, ext=2, vmax=psf_vmax, vmin=psf_vmax / 1e4, colorbar_orientation='horizontal', title="PSF simulation")
plt.suptitle(title, fontsize=16)
| [
6,
7,
8,
9,
10
] |
1,407 | 39b6ca21b8d4856e2b2edfcbd00b75fbce6dfff7 | <mask token>
class addUserForm(forms.Form):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class addUserForm(forms.Form):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def clean(self):
cleaned_data = super(addUserForm, self).clean()
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if (not username and not first_name and not last_name and not email and
not password and not confirm_password):
raise forms.ValidationError('There are errors in the fields...!')
| <mask token>
class addUserForm(forms.Form):
username = forms.CharField(label='User Name', required='required',
disabled='', min_length=6, max_length=128, help_text='', widget=
forms.TextInput(attrs={'style': '', 'placeholder': ''}))
first_name = forms.CharField(label='First Name', required='required',
disabled='', min_length=3, max_length=128, help_text='')
last_name = forms.CharField(label='Last Name', required='required',
disabled='', min_length=3, max_length=128, help_text='')
email = forms.EmailField(label='Email', required='required', disabled=
'', min_length=6, max_length=128, help_text='', validators=[
validate_domainonly_email])
password = forms.CharField(label='Password', required='required',
disabled='', min_length=6, max_length=128, help_text='', validators
=[RegexValidator('^(\\w+\\d+|\\d+\\w+)+$', message=
'Password should be a combination of Alphabets and Numbers')])
confirm_password = forms.CharField(label='Confirm Password', required=
'required', disabled='', min_length=6, max_length=128, help_text='')
def clean(self):
cleaned_data = super(addUserForm, self).clean()
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if (not username and not first_name and not last_name and not email and
not password and not confirm_password):
raise forms.ValidationError('There are errors in the fields...!')
| from django import forms
from django.core.validators import RegexValidator
from dashboard.validators import validate_domainonly_email
class addUserForm(forms.Form):
username = forms.CharField(label='User Name', required='required',
disabled='', min_length=6, max_length=128, help_text='', widget=
forms.TextInput(attrs={'style': '', 'placeholder': ''}))
first_name = forms.CharField(label='First Name', required='required',
disabled='', min_length=3, max_length=128, help_text='')
last_name = forms.CharField(label='Last Name', required='required',
disabled='', min_length=3, max_length=128, help_text='')
email = forms.EmailField(label='Email', required='required', disabled=
'', min_length=6, max_length=128, help_text='', validators=[
validate_domainonly_email])
password = forms.CharField(label='Password', required='required',
disabled='', min_length=6, max_length=128, help_text='', validators
=[RegexValidator('^(\\w+\\d+|\\d+\\w+)+$', message=
'Password should be a combination of Alphabets and Numbers')])
confirm_password = forms.CharField(label='Confirm Password', required=
'required', disabled='', min_length=6, max_length=128, help_text='')
def clean(self):
cleaned_data = super(addUserForm, self).clean()
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if (not username and not first_name and not last_name and not email and
not password and not confirm_password):
raise forms.ValidationError('There are errors in the fields...!')
| from django import forms
from django.core.validators import RegexValidator
from dashboard.validators import validate_domainonly_email
class addUserForm(forms.Form):
username = forms.CharField(label='User Name', required="required", disabled="", min_length=6, max_length=128,
help_text="",
widget=forms.TextInput(
attrs={
'style': '',
'placeholder': '',
}
))
first_name = forms.CharField(label='First Name', required="required", disabled="", min_length=3, max_length=128,
help_text="")
last_name = forms.CharField(label='Last Name', required="required", disabled="", min_length=3, max_length=128,
help_text="")
email = forms.EmailField(label='Email', required="required", disabled="", min_length=6, max_length=128,
help_text="", validators=[validate_domainonly_email])
password = forms.CharField(label='Password', required="required", disabled="", min_length=6, max_length=128,
help_text="", validators=[
RegexValidator('^(\w+\d+|\d+\w+)+$', message="Password should be a combination of Alphabets and Numbers")])
confirm_password = forms.CharField(label='Confirm Password', required="required", disabled="", min_length=6,
max_length=128,
help_text="")
def clean(self):
cleaned_data = super(addUserForm, self).clean()
username = cleaned_data.get('username')
first_name = cleaned_data.get('first_name')
last_name = cleaned_data.get('last_name')
email = cleaned_data.get('email')
password = cleaned_data.get('password')
confirm_password = cleaned_data.get('confirm_password')
if not username and not first_name and not last_name and not email and not password and not confirm_password:
raise forms.ValidationError('There are errors in the fields...!')
# class editUserForm(forms.Form):
# username = forms.CharField(label='User Name', required="required", disabled="disabled", min_length="6",
# max_length=128, help_text="")
# first_name = forms.CharField(label='First Name', max_length=254, help_text="")
# last_name = forms.CharField(label='Last Name', max_length=254, help_text="")
# email = forms.EmailField(label='Email', max_length=8, help_text="")
#
# def clean(self):
# cleaned_data = super(editUserForm, self).clean()
# username = cleaned_data.get('username')
# first_name = cleaned_data.get('first_name')
# last_name = cleaned_data.get('last_name')
# email = cleaned_data.get('email')
# if not username and not first_name and not last_name and not email:
# raise forms.ValidationError('There are errors in the fields...!')
| [
1,
2,
3,
4,
5
] |
1,408 | 0b0b22043dda94ea57344fb3bf47255ad85c7f5b | <mask token>
class SpotListView(APIView):
<mask token>
| <mask token>
def get_one_spot(region):
comments_data = get_comment_data()
data = {}
data['id'] = region.id
data['name'] = region.name
data['address'] = region.address
data['lng'] = region.lng
data['lat'] = region.lat
spot_comment_data = comments_data[comments_data['search_key'] == str(
region.search_key)]
data['commentNumber'] = spot_comment_data.iloc[:, 0].size
data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())
return data
<mask token>
class SpotListView(APIView):
def get(self, request, *args, **kwargs):
try:
return get_spot_list(request)
except KeyError:
return json_error(error_string='请求错误', code=500)
| <mask token>
def get_one_spot(region):
comments_data = get_comment_data()
data = {}
data['id'] = region.id
data['name'] = region.name
data['address'] = region.address
data['lng'] = region.lng
data['lat'] = region.lat
spot_comment_data = comments_data[comments_data['search_key'] == str(
region.search_key)]
data['commentNumber'] = spot_comment_data.iloc[:, 0].size
data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())
return data
def get_spot_list(request):
res = {}
try:
list = [get_one_spot(region) for region in regioninfo.objects]
res['list'] = list
return json_response(res)
except Exception:
return json_error(error_string='查询发生错误', code=12, api='spotlist')
class SpotListView(APIView):
def get(self, request, *args, **kwargs):
try:
return get_spot_list(request)
except KeyError:
return json_error(error_string='请求错误', code=500)
| from rest_framework.views import APIView
from ..Models.ConnectToDBModel import *
from ..Models.RegionInfoModel import *
from .CommonView import *
def get_one_spot(region):
comments_data = get_comment_data()
data = {}
data['id'] = region.id
data['name'] = region.name
data['address'] = region.address
data['lng'] = region.lng
data['lat'] = region.lat
spot_comment_data = comments_data[comments_data['search_key'] == str(
region.search_key)]
data['commentNumber'] = spot_comment_data.iloc[:, 0].size
data['commentScore'] = get_score(spot_comment_data['comment_score'].mean())
return data
def get_spot_list(request):
res = {}
try:
list = [get_one_spot(region) for region in regioninfo.objects]
res['list'] = list
return json_response(res)
except Exception:
return json_error(error_string='查询发生错误', code=12, api='spotlist')
class SpotListView(APIView):
def get(self, request, *args, **kwargs):
try:
return get_spot_list(request)
except KeyError:
return json_error(error_string='请求错误', code=500)
| # -*- coding: utf-8 -*-
from rest_framework.views import APIView
from ..Models.ConnectToDBModel import *
from ..Models.RegionInfoModel import *
from .CommonView import *
def get_one_spot(region):
comments_data = get_comment_data();
data = {};
data['id'] = region.id;
data['name'] = region.name;
data['address'] = region.address;
data['lng'] = region.lng;
data['lat'] = region.lat;
spot_comment_data = comments_data[(comments_data['search_key'] == str(region.search_key))]
data['commentNumber'] = spot_comment_data.iloc[:, 0].size;
data['commentScore'] = get_score(spot_comment_data['comment_score'].mean());
return data;
def get_spot_list(request):
#进行解码token
# username = decodeToken(request);
# print(username);
res = {};
try:
list = [get_one_spot(region) for region in regioninfo.objects];
# 返回所有的文档对象列表
res['list'] = list;
return json_response(res);
except Exception:
return json_error(error_string='查询发生错误',code = 12,api = "spotlist");
class SpotListView(APIView):
def get(self, request, *args, **kwargs):
try:
return get_spot_list(request);
except KeyError:
return json_error(error_string="请求错误", code=500);
| [
1,
3,
4,
5,
6
] |
1,409 | 11576597429e119cf4887a88139df4a9e6d7eb66 | <mask token>
| from tkinter import *
from tkinter import filedialog
from tkinter import scrolledtext
import tkinter as tk
import os
import sys
import subprocess
import shlex
from subprocess import check_output
import pathlib
| null | null | null | [
0,
1
] |
1,410 | 18bc8a8b1cbb544cfbe581e32ee5e509d67beafd | <mask token>
def main():
global targets
username, password = get_credentials('laozi')
remote_host = Machine(username, password)
for target in targets:
remote_host.connect(target)
stdin, stdout = remote_host.create_channel(target, input_file)
slb.send_cmd(stdin, stdout, input_file)
remote_dir = input('Which directory should I list?')
remote_host.list_content(remote_dir)
remote_file = input('Which file should I retrieve?')
for f in remote_file:
remote_host.retrieve(remote_dir, remote_file)
<mask token>
| <mask token>
def main():
global targets
username, password = get_credentials('laozi')
remote_host = Machine(username, password)
for target in targets:
remote_host.connect(target)
stdin, stdout = remote_host.create_channel(target, input_file)
slb.send_cmd(stdin, stdout, input_file)
remote_dir = input('Which directory should I list?')
remote_host.list_content(remote_dir)
remote_file = input('Which file should I retrieve?')
for f in remote_file:
remote_host.retrieve(remote_dir, remote_file)
if __name__ == '__main__':
main()
| <mask token>
targets = ['45.32.13.245']
input_file = 'cmd'
def main():
global targets
username, password = get_credentials('laozi')
remote_host = Machine(username, password)
for target in targets:
remote_host.connect(target)
stdin, stdout = remote_host.create_channel(target, input_file)
slb.send_cmd(stdin, stdout, input_file)
remote_dir = input('Which directory should I list?')
remote_host.list_content(remote_dir)
remote_file = input('Which file should I retrieve?')
for f in remote_file:
remote_host.retrieve(remote_dir, remote_file)
if __name__ == '__main__':
main()
| from connection import Machine
from credentials import get_credentials
targets = ['45.32.13.245']
input_file = 'cmd'
def main():
global targets
username, password = get_credentials('laozi')
remote_host = Machine(username, password)
for target in targets:
remote_host.connect(target)
stdin, stdout = remote_host.create_channel(target, input_file)
slb.send_cmd(stdin, stdout, input_file)
remote_dir = input('Which directory should I list?')
remote_host.list_content(remote_dir)
remote_file = input('Which file should I retrieve?')
for f in remote_file:
remote_host.retrieve(remote_dir, remote_file)
if __name__ == '__main__':
main()
| from connection import Machine
from credentials import get_credentials
targets = ['45.32.13.245']
#targets = ['localhost']
input_file = 'cmd'
def main():
global targets
username, password = get_credentials('laozi')
remote_host = Machine(username, password)
for target in targets:
remote_host.connect(target)
stdin, stdout = remote_host.create_channel(target, input_file)
slb.send_cmd(stdin, stdout, input_file)
remote_dir = input('Which directory should I list?')
remote_host.list_content(remote_dir)
remote_file = input('Which file should I retrieve?')
for f in remote_file:
remote_host.retrieve(remote_dir, remote_file)
if __name__ == '__main__':
main()
| [
1,
2,
3,
4,
5
] |
1,411 | 7701a98d836dc9551a4e2eb4b7d9c10307b3f665 | <mask token>
class NLPARM(object):
<mask token>
<mask token>
def __init__(self):
pass
def add(self, card=None, comment=''):
if comment:
self._comment = comment
self.nlparm_id = integer(card, 1, 'nlparm_id')
self.ninc = integer_or_blank(card, 2, 'ninc', 10)
self.dt = double_or_blank(card, 3, 'dt', 0.0)
self.kMethod = string_or_blank(card, 4, 'kMethod', 'AUTO')
self.kStep = integer_or_blank(card, 5, 'kStep', 5)
self.maxIter = integer_or_blank(card, 6, 'maxIter', 25)
self.conv = string_or_blank(card, 7, 'conv', 'PW')
self.intOut = string_or_blank(card, 8, 'intOut', 'NO')
self.epsU = double_or_blank(card, 9, 'epsU', 0.01)
self.epsP = double_or_blank(card, 10, 'epsP', 0.01)
self.epsW = double_or_blank(card, 11, 'epsW', 0.01)
self.maxDiv = integer_or_blank(card, 12, 'maxDiv', 3)
if self.kMethod == 'PFNT':
self.maxQn = integer_or_blank(card, 13, 'maxQn', 0)
else:
self.maxQn = integer_or_blank(card, 13, 'maxQn', self.maxIter)
self.maxLs = integer_or_blank(card, 14, 'maxLs', 4)
self.fStress = double_or_blank(card, 15, 'fStress', 0.2)
self.lsTol = double_or_blank(card, 16, 'lsTol', 0.5)
self.maxBisect = integer_or_blank(card, 17, '', 5)
self.maxR = double_or_blank(card, 21, 'maxR', 20.0)
self.rTolB = double_or_blank(card, 23, 'rTolB', 20.0)
assert len(card) <= 24, 'len(NLPARM card) = %i' % len(card)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.
kMethod, self.kStep, self.maxIter, self.conv, self.intOut, self
.epsU, self.epsP, self.epsW, self.maxDiv, self.maxQn, self.
maxLs, self.fStress, self.lsTol, self.maxBisect, None, None,
None, self.maxR, None, self.rTolB]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kMethod = set_blank_if_default(self.kMethod, 'AUTO')
kStep = set_blank_if_default(self.kStep, 5)
maxIter = set_blank_if_default(self.maxIter, 25)
conv = set_blank_if_default(self.conv, 'PW')
intOut = set_blank_if_default(self.intOut, 'NO')
epsU = set_blank_if_default(self.epsU, 0.01)
epsP = set_blank_if_default(self.epsP, 0.01)
epsW = set_blank_if_default(self.epsW, 0.01)
maxDiv = set_blank_if_default(self.maxDiv, 3)
maxQn = set_blank_if_default(self.maxQn, self.maxIter)
maxLs = set_blank_if_default(self.maxLs, 4)
fStress = set_blank_if_default(self.fStress, 0.2)
lsTol = set_blank_if_default(self.lsTol, 0.5)
maxBisect = set_blank_if_default(self.maxBisect, 5)
maxR = set_blank_if_default(self.maxR, 20.0)
rTolB = set_blank_if_default(self.rTolB, 20.0)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kMethod, kStep,
maxIter, conv, intOut, epsU, epsP, epsW, maxDiv, maxQn, maxLs,
fStress, lsTol, maxBisect, None, None, None, maxR, None, rTolB]
return list_fields
def write_bdf(self, f, size=8):
card = self.raw_fields()
f.write(print_card(card, size=size))
| <mask token>
class NLPARM(object):
<mask token>
type = 'NLPARM'
def __init__(self):
pass
def add(self, card=None, comment=''):
if comment:
self._comment = comment
self.nlparm_id = integer(card, 1, 'nlparm_id')
self.ninc = integer_or_blank(card, 2, 'ninc', 10)
self.dt = double_or_blank(card, 3, 'dt', 0.0)
self.kMethod = string_or_blank(card, 4, 'kMethod', 'AUTO')
self.kStep = integer_or_blank(card, 5, 'kStep', 5)
self.maxIter = integer_or_blank(card, 6, 'maxIter', 25)
self.conv = string_or_blank(card, 7, 'conv', 'PW')
self.intOut = string_or_blank(card, 8, 'intOut', 'NO')
self.epsU = double_or_blank(card, 9, 'epsU', 0.01)
self.epsP = double_or_blank(card, 10, 'epsP', 0.01)
self.epsW = double_or_blank(card, 11, 'epsW', 0.01)
self.maxDiv = integer_or_blank(card, 12, 'maxDiv', 3)
if self.kMethod == 'PFNT':
self.maxQn = integer_or_blank(card, 13, 'maxQn', 0)
else:
self.maxQn = integer_or_blank(card, 13, 'maxQn', self.maxIter)
self.maxLs = integer_or_blank(card, 14, 'maxLs', 4)
self.fStress = double_or_blank(card, 15, 'fStress', 0.2)
self.lsTol = double_or_blank(card, 16, 'lsTol', 0.5)
self.maxBisect = integer_or_blank(card, 17, '', 5)
self.maxR = double_or_blank(card, 21, 'maxR', 20.0)
self.rTolB = double_or_blank(card, 23, 'rTolB', 20.0)
assert len(card) <= 24, 'len(NLPARM card) = %i' % len(card)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.
kMethod, self.kStep, self.maxIter, self.conv, self.intOut, self
.epsU, self.epsP, self.epsW, self.maxDiv, self.maxQn, self.
maxLs, self.fStress, self.lsTol, self.maxBisect, None, None,
None, self.maxR, None, self.rTolB]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kMethod = set_blank_if_default(self.kMethod, 'AUTO')
kStep = set_blank_if_default(self.kStep, 5)
maxIter = set_blank_if_default(self.maxIter, 25)
conv = set_blank_if_default(self.conv, 'PW')
intOut = set_blank_if_default(self.intOut, 'NO')
epsU = set_blank_if_default(self.epsU, 0.01)
epsP = set_blank_if_default(self.epsP, 0.01)
epsW = set_blank_if_default(self.epsW, 0.01)
maxDiv = set_blank_if_default(self.maxDiv, 3)
maxQn = set_blank_if_default(self.maxQn, self.maxIter)
maxLs = set_blank_if_default(self.maxLs, 4)
fStress = set_blank_if_default(self.fStress, 0.2)
lsTol = set_blank_if_default(self.lsTol, 0.5)
maxBisect = set_blank_if_default(self.maxBisect, 5)
maxR = set_blank_if_default(self.maxR, 20.0)
rTolB = set_blank_if_default(self.rTolB, 20.0)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kMethod, kStep,
maxIter, conv, intOut, epsU, epsP, epsW, maxDiv, maxQn, maxLs,
fStress, lsTol, maxBisect, None, None, None, maxR, None, rTolB]
return list_fields
def write_bdf(self, f, size=8):
card = self.raw_fields()
f.write(print_card(card, size=size))
| <mask token>
class NLPARM(object):
"""
Defines a set of parameters for nonlinear static analysis iteration
strategy.
+--------+--------+------+------+---------+-------+---------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+======+======+=========+=======+=========+=========+========+
| NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | MAXBIS | | | | MAXR | | RTOLB | CONV |
+--------+--------+------+------+---------+-------+---------+---------+--------+
"""
type = 'NLPARM'
def __init__(self):
pass
def add(self, card=None, comment=''):
if comment:
self._comment = comment
self.nlparm_id = integer(card, 1, 'nlparm_id')
self.ninc = integer_or_blank(card, 2, 'ninc', 10)
self.dt = double_or_blank(card, 3, 'dt', 0.0)
self.kMethod = string_or_blank(card, 4, 'kMethod', 'AUTO')
self.kStep = integer_or_blank(card, 5, 'kStep', 5)
self.maxIter = integer_or_blank(card, 6, 'maxIter', 25)
self.conv = string_or_blank(card, 7, 'conv', 'PW')
self.intOut = string_or_blank(card, 8, 'intOut', 'NO')
self.epsU = double_or_blank(card, 9, 'epsU', 0.01)
self.epsP = double_or_blank(card, 10, 'epsP', 0.01)
self.epsW = double_or_blank(card, 11, 'epsW', 0.01)
self.maxDiv = integer_or_blank(card, 12, 'maxDiv', 3)
if self.kMethod == 'PFNT':
self.maxQn = integer_or_blank(card, 13, 'maxQn', 0)
else:
self.maxQn = integer_or_blank(card, 13, 'maxQn', self.maxIter)
self.maxLs = integer_or_blank(card, 14, 'maxLs', 4)
self.fStress = double_or_blank(card, 15, 'fStress', 0.2)
self.lsTol = double_or_blank(card, 16, 'lsTol', 0.5)
self.maxBisect = integer_or_blank(card, 17, '', 5)
self.maxR = double_or_blank(card, 21, 'maxR', 20.0)
self.rTolB = double_or_blank(card, 23, 'rTolB', 20.0)
assert len(card) <= 24, 'len(NLPARM card) = %i' % len(card)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.
kMethod, self.kStep, self.maxIter, self.conv, self.intOut, self
.epsU, self.epsP, self.epsW, self.maxDiv, self.maxQn, self.
maxLs, self.fStress, self.lsTol, self.maxBisect, None, None,
None, self.maxR, None, self.rTolB]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kMethod = set_blank_if_default(self.kMethod, 'AUTO')
kStep = set_blank_if_default(self.kStep, 5)
maxIter = set_blank_if_default(self.maxIter, 25)
conv = set_blank_if_default(self.conv, 'PW')
intOut = set_blank_if_default(self.intOut, 'NO')
epsU = set_blank_if_default(self.epsU, 0.01)
epsP = set_blank_if_default(self.epsP, 0.01)
epsW = set_blank_if_default(self.epsW, 0.01)
maxDiv = set_blank_if_default(self.maxDiv, 3)
maxQn = set_blank_if_default(self.maxQn, self.maxIter)
maxLs = set_blank_if_default(self.maxLs, 4)
fStress = set_blank_if_default(self.fStress, 0.2)
lsTol = set_blank_if_default(self.lsTol, 0.5)
maxBisect = set_blank_if_default(self.maxBisect, 5)
maxR = set_blank_if_default(self.maxR, 20.0)
rTolB = set_blank_if_default(self.rTolB, 20.0)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kMethod, kStep,
maxIter, conv, intOut, epsU, epsP, epsW, maxDiv, maxQn, maxLs,
fStress, lsTol, maxBisect, None, None, None, maxR, None, rTolB]
return list_fields
def write_bdf(self, f, size=8):
card = self.raw_fields()
f.write(print_card(card, size=size))
| from pyNastran.bdf.fieldWriter import print_card
from pyNastran.bdf.bdfInterface.assign_type import integer, integer_or_blank, double_or_blank, string_or_blank
class NLPARM(object):
"""
Defines a set of parameters for nonlinear static analysis iteration
strategy.
+--------+--------+------+------+---------+-------+---------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+======+======+=========+=======+=========+=========+========+
| NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | MAXBIS | | | | MAXR | | RTOLB | CONV |
+--------+--------+------+------+---------+-------+---------+---------+--------+
"""
type = 'NLPARM'
def __init__(self):
pass
def add(self, card=None, comment=''):
if comment:
self._comment = comment
self.nlparm_id = integer(card, 1, 'nlparm_id')
self.ninc = integer_or_blank(card, 2, 'ninc', 10)
self.dt = double_or_blank(card, 3, 'dt', 0.0)
self.kMethod = string_or_blank(card, 4, 'kMethod', 'AUTO')
self.kStep = integer_or_blank(card, 5, 'kStep', 5)
self.maxIter = integer_or_blank(card, 6, 'maxIter', 25)
self.conv = string_or_blank(card, 7, 'conv', 'PW')
self.intOut = string_or_blank(card, 8, 'intOut', 'NO')
self.epsU = double_or_blank(card, 9, 'epsU', 0.01)
self.epsP = double_or_blank(card, 10, 'epsP', 0.01)
self.epsW = double_or_blank(card, 11, 'epsW', 0.01)
self.maxDiv = integer_or_blank(card, 12, 'maxDiv', 3)
if self.kMethod == 'PFNT':
self.maxQn = integer_or_blank(card, 13, 'maxQn', 0)
else:
self.maxQn = integer_or_blank(card, 13, 'maxQn', self.maxIter)
self.maxLs = integer_or_blank(card, 14, 'maxLs', 4)
self.fStress = double_or_blank(card, 15, 'fStress', 0.2)
self.lsTol = double_or_blank(card, 16, 'lsTol', 0.5)
self.maxBisect = integer_or_blank(card, 17, '', 5)
self.maxR = double_or_blank(card, 21, 'maxR', 20.0)
self.rTolB = double_or_blank(card, 23, 'rTolB', 20.0)
assert len(card) <= 24, 'len(NLPARM card) = %i' % len(card)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.
kMethod, self.kStep, self.maxIter, self.conv, self.intOut, self
.epsU, self.epsP, self.epsW, self.maxDiv, self.maxQn, self.
maxLs, self.fStress, self.lsTol, self.maxBisect, None, None,
None, self.maxR, None, self.rTolB]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kMethod = set_blank_if_default(self.kMethod, 'AUTO')
kStep = set_blank_if_default(self.kStep, 5)
maxIter = set_blank_if_default(self.maxIter, 25)
conv = set_blank_if_default(self.conv, 'PW')
intOut = set_blank_if_default(self.intOut, 'NO')
epsU = set_blank_if_default(self.epsU, 0.01)
epsP = set_blank_if_default(self.epsP, 0.01)
epsW = set_blank_if_default(self.epsW, 0.01)
maxDiv = set_blank_if_default(self.maxDiv, 3)
maxQn = set_blank_if_default(self.maxQn, self.maxIter)
maxLs = set_blank_if_default(self.maxLs, 4)
fStress = set_blank_if_default(self.fStress, 0.2)
lsTol = set_blank_if_default(self.lsTol, 0.5)
maxBisect = set_blank_if_default(self.maxBisect, 5)
maxR = set_blank_if_default(self.maxR, 20.0)
rTolB = set_blank_if_default(self.rTolB, 20.0)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kMethod, kStep,
maxIter, conv, intOut, epsU, epsP, epsW, maxDiv, maxQn, maxLs,
fStress, lsTol, maxBisect, None, None, None, maxR, None, rTolB]
return list_fields
def write_bdf(self, f, size=8):
card = self.raw_fields()
f.write(print_card(card, size=size))
| from pyNastran.bdf.fieldWriter import print_card
from pyNastran.bdf.bdfInterface.assign_type import (integer, integer_or_blank,
double_or_blank, string_or_blank)
class NLPARM(object):
"""
Defines a set of parameters for nonlinear static analysis iteration
strategy.
+--------+--------+------+------+---------+-------+---------+---------+--------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+========+========+======+======+=========+=======+=========+=========+========+
| NLPARM | ID | NINC | DT | KMETHOD | KSTEP | MAXITER | CONV | INTOUT |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | ESPU | EPSP | EPSW | MAXDIV | MAXQN | MAXLS | FSTRESS | LSTOL |
+--------+--------+------+------+---------+-------+---------+---------+--------+
| | MAXBIS | | | | MAXR | | RTOLB | CONV |
+--------+--------+------+------+---------+-------+---------+---------+--------+
"""
type = 'NLPARM'
def __init__(self):
pass
def add(self, card=None, comment=''):
if comment:
self._comment = comment
self.nlparm_id = integer(card, 1, 'nlparm_id')
self.ninc = integer_or_blank(card, 2, 'ninc', 10)
self.dt = double_or_blank(card, 3, 'dt', 0.0)
self.kMethod = string_or_blank(card, 4, 'kMethod', 'AUTO')
self.kStep = integer_or_blank(card, 5, 'kStep', 5)
self.maxIter = integer_or_blank(card, 6, 'maxIter', 25)
self.conv = string_or_blank(card, 7, 'conv', 'PW')
self.intOut = string_or_blank(card, 8, 'intOut', 'NO')
# line 2
self.epsU = double_or_blank(card, 9, 'epsU', 0.01)
self.epsP = double_or_blank(card, 10, 'epsP', 0.01)
self.epsW = double_or_blank(card, 11, 'epsW', 0.01)
self.maxDiv = integer_or_blank(card, 12, 'maxDiv', 3)
if self.kMethod == 'PFNT':
self.maxQn = integer_or_blank(card, 13, 'maxQn', 0)
else:
self.maxQn = integer_or_blank(card, 13, 'maxQn', self.maxIter)
self.maxLs = integer_or_blank(card, 14, 'maxLs', 4)
self.fStress = double_or_blank(card, 15, 'fStress', 0.2)
self.lsTol = double_or_blank(card, 16, 'lsTol', 0.5)
# line 3
self.maxBisect = integer_or_blank(card, 17, '', 5)
self.maxR = double_or_blank(card, 21, 'maxR', 20.)
self.rTolB = double_or_blank(card, 23, 'rTolB', 20.)
assert len(card) <= 24, 'len(NLPARM card) = %i' % len(card)
def raw_fields(self):
list_fields = ['NLPARM', self.nlparm_id, self.ninc, self.dt, self.kMethod,
self.kStep, self.maxIter, self.conv, self.intOut, self.epsU,
self.epsP, self.epsW, self.maxDiv, self.maxQn, self.maxLs,
self.fStress, self.lsTol, self.maxBisect, None, None, None,
self.maxR, None, self.rTolB]
return list_fields
def repr_fields(self):
ninc = set_blank_if_default(self.ninc, 10)
dt = set_blank_if_default(self.dt, 0.0)
kMethod = set_blank_if_default(self.kMethod, 'AUTO')
kStep = set_blank_if_default(self.kStep, 5)
maxIter = set_blank_if_default(self.maxIter, 25)
conv = set_blank_if_default(self.conv, 'PW')
intOut = set_blank_if_default(self.intOut, 'NO')
epsU = set_blank_if_default(self.epsU, 0.01)
epsP = set_blank_if_default(self.epsP, 0.01)
epsW = set_blank_if_default(self.epsW, 0.01)
maxDiv = set_blank_if_default(self.maxDiv, 3)
maxQn = set_blank_if_default(self.maxQn, self.maxIter)
maxLs = set_blank_if_default(self.maxLs, 4)
fStress = set_blank_if_default(self.fStress, 0.2)
lsTol = set_blank_if_default(self.lsTol, 0.5)
maxBisect = set_blank_if_default(self.maxBisect, 5)
maxR = set_blank_if_default(self.maxR, 20.)
rTolB = set_blank_if_default(self.rTolB, 20.)
list_fields = ['NLPARM', self.nlparm_id, ninc, dt, kMethod, kStep, maxIter,
conv, intOut, epsU, epsP, epsW, maxDiv, maxQn, maxLs,
fStress, lsTol, maxBisect, None, None, None, maxR, None,
rTolB]
return list_fields
def write_bdf(self, f, size=8):
card = self.raw_fields()
f.write(print_card(card, size=size)) | [
6,
7,
8,
9,
10
] |
1,412 | 456d79a69c170a59af742648f16e0171cd5a2412 | <mask token>
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_full_paths_to_files(files_dir, filenames):
return [os.path.join(files_dir, f) for f in filenames]
def process_images(im_filenames, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
images = (mpimg.imread(fname) for fname in im_filenames)
return (find_and_draw_lanes(im) for im in images)
def save_images(images, destination_filenames):
for fname, im in zip(destination_filenames, images):
mpimg.imsave(fname, im)
<mask token>
def visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):
runner = CompGraphRunner(cg, frozen_tokens=params)
ag = to_agraph(runner.token_manager.to_networkx())
ag.layout('dot')
ag.draw(fname_dst)
<mask token>
| <mask token>
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_full_paths_to_files(files_dir, filenames):
return [os.path.join(files_dir, f) for f in filenames]
def process_images(im_filenames, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
images = (mpimg.imread(fname) for fname in im_filenames)
return (find_and_draw_lanes(im) for im in images)
def save_images(images, destination_filenames):
for fname, im in zip(destination_filenames, images):
mpimg.imsave(fname, im)
def process_and_save_video(video_fname_src, video_fname_dst, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
video_src = VideoFileClip(video_fname_src)
video_dst = video_src.fl_image(find_and_draw_lanes)
video_dst.write_videofile(video_fname_dst, audio=False)
def visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):
runner = CompGraphRunner(cg, frozen_tokens=params)
ag = to_agraph(runner.token_manager.to_networkx())
ag.layout('dot')
ag.draw(fname_dst)
<mask token>
| <mask token>
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_full_paths_to_files(files_dir, filenames):
return [os.path.join(files_dir, f) for f in filenames]
def process_images(im_filenames, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
images = (mpimg.imread(fname) for fname in im_filenames)
return (find_and_draw_lanes(im) for im in images)
def save_images(images, destination_filenames):
for fname, im in zip(destination_filenames, images):
mpimg.imsave(fname, im)
def process_and_save_video(video_fname_src, video_fname_dst, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
video_src = VideoFileClip(video_fname_src)
video_dst = video_src.fl_image(find_and_draw_lanes)
video_dst.write_videofile(video_fname_dst, audio=False)
def visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):
runner = CompGraphRunner(cg, frozen_tokens=params)
ag = to_agraph(runner.token_manager.to_networkx())
ag.layout('dot')
ag.draw(fname_dst)
if __name__ == '__main__':
""" INITIALIZATION """
im_dir_src = 'test_images'
im_dir_dst = 'test_images_output'
create_dir(im_dir_dst)
im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src))
im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src))
video_dir_src = 'test_videos'
video_dir_dst = 'test_videos_output'
create_dir(video_dir_dst)
video_files = 'solidWhiteRight.mp4', 'solidYellowLeft.mp4'
video_files_src = get_full_paths_to_files(video_dir_src, video_files)
video_files_dst = get_full_paths_to_files(video_dir_dst, video_files)
params_1 = DEFAULT_PARAMS.copy()
params_1['canny_lo'] = 50
params_1['canny_hi'] = 150
""" MEDIA GENERATION """
visualize_pipeline('pipeline.png')
images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS)
save_images(images_dst, im_files_dst)
process_and_save_video(video_files_src[0], video_files_dst[0],
COMP_GRAPH, DEFAULT_PARAMS)
process_and_save_video(video_files_src[1], video_files_dst[1],
COMP_GRAPH, params_1)
| <mask token>
COMP_GRAPH = lanespipeline.computational_graph
DEFAULT_PARAMS = lanespipeline.parameters
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_full_paths_to_files(files_dir, filenames):
return [os.path.join(files_dir, f) for f in filenames]
def process_images(im_filenames, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
images = (mpimg.imread(fname) for fname in im_filenames)
return (find_and_draw_lanes(im) for im in images)
def save_images(images, destination_filenames):
for fname, im in zip(destination_filenames, images):
mpimg.imsave(fname, im)
def process_and_save_video(video_fname_src, video_fname_dst, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
video_src = VideoFileClip(video_fname_src)
video_dst = video_src.fl_image(find_and_draw_lanes)
video_dst.write_videofile(video_fname_dst, audio=False)
def visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):
runner = CompGraphRunner(cg, frozen_tokens=params)
ag = to_agraph(runner.token_manager.to_networkx())
ag.layout('dot')
ag.draw(fname_dst)
if __name__ == '__main__':
""" INITIALIZATION """
im_dir_src = 'test_images'
im_dir_dst = 'test_images_output'
create_dir(im_dir_dst)
im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src))
im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src))
video_dir_src = 'test_videos'
video_dir_dst = 'test_videos_output'
create_dir(video_dir_dst)
video_files = 'solidWhiteRight.mp4', 'solidYellowLeft.mp4'
video_files_src = get_full_paths_to_files(video_dir_src, video_files)
video_files_dst = get_full_paths_to_files(video_dir_dst, video_files)
params_1 = DEFAULT_PARAMS.copy()
params_1['canny_lo'] = 50
params_1['canny_hi'] = 150
""" MEDIA GENERATION """
visualize_pipeline('pipeline.png')
images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS)
save_images(images_dst, im_files_dst)
process_and_save_video(video_files_src[0], video_files_dst[0],
COMP_GRAPH, DEFAULT_PARAMS)
process_and_save_video(video_files_src[1], video_files_dst[1],
COMP_GRAPH, params_1)
| '''
Generate the output images and videos, including rendering of the pipeline
'''
import os
import matplotlib.image as mpimg
import cv2
from moviepy.editor import VideoFileClip
from networkx.drawing.nx_agraph import to_agraph
import lanespipeline
import lanefinder
from compgraph import CompGraph, CompGraphRunner
COMP_GRAPH = lanespipeline.computational_graph
DEFAULT_PARAMS = lanespipeline.parameters
def create_dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_full_paths_to_files(files_dir, filenames):
return [os.path.join(files_dir, f) for f in filenames]
def process_images(im_filenames, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
images = (mpimg.imread(fname) for fname in im_filenames)
return (find_and_draw_lanes(im) for im in images)
def save_images(images, destination_filenames):
for fname, im in zip(destination_filenames, images):
mpimg.imsave(fname, im)
def process_and_save_video(video_fname_src, video_fname_dst, cg, params):
finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)
video_src = VideoFileClip(video_fname_src)
video_dst = video_src.fl_image(find_and_draw_lanes)
video_dst.write_videofile(video_fname_dst, audio=False)
def visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):
runner = CompGraphRunner(cg, frozen_tokens=params)
ag = to_agraph(runner.token_manager.to_networkx())
ag.layout('dot')
ag.draw(fname_dst)
if __name__ == '__main__':
''' INITIALIZATION '''
im_dir_src = 'test_images'
im_dir_dst = 'test_images_output'
create_dir(im_dir_dst)
im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src))
im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src))
video_dir_src = 'test_videos'
video_dir_dst = 'test_videos_output'
create_dir(video_dir_dst)
video_files = ('solidWhiteRight.mp4', 'solidYellowLeft.mp4')
video_files_src = get_full_paths_to_files(video_dir_src, video_files)
video_files_dst = get_full_paths_to_files(video_dir_dst, video_files)
params_1 = DEFAULT_PARAMS.copy()
params_1['canny_lo'] = 50
params_1['canny_hi'] = 150
''' MEDIA GENERATION '''
visualize_pipeline('pipeline.png')
images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS)
save_images(images_dst, im_files_dst)
process_and_save_video(video_files_src[0], video_files_dst[0], COMP_GRAPH, DEFAULT_PARAMS)
process_and_save_video(video_files_src[1], video_files_dst[1], COMP_GRAPH, params_1)
| [
5,
6,
7,
8,
10
] |
1,413 | 86b24ddaae0d3477a3f82295224b7e84805eed91 | <mask token>
class AuthenticatorTest(absltest.TestCase):
<mask token>
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
<mask token>
| <mask token>
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
<mask token>
| <mask token>
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
if __name__ == '__main__':
absltest.main()
| <mask token>
from absl.testing import absltest
from model import authenticator
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path)
if __name__ == '__main__':
absltest.main()
| # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for authenticator."""
from absl.testing import absltest
from model import authenticator
class AuthenticatorTest(absltest.TestCase):
"""Tests GetGoogleSheetsService method in authenticator module."""
def testGetGoogleSheetsServiceByCred_badFilePath_raisesFileNotFoundError(
self,
):
bad_file_path = './credential.json'
with self.assertRaises(FileNotFoundError):
authenticator.GetGoogleSheetsServiceByCredential(
gcp_credential_path=bad_file_path
)
if __name__ == '__main__':
absltest.main()
| [
2,
3,
4,
5,
6
] |
1,414 | 8c05259ce577e6b6a6efdf778832e9bb817e47fd | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]
operations = [migrations.RemoveField(model_name='hotelingroup', name=
'negative_votes'), migrations.RemoveField(model_name='hotelingroup',
name='positive_votes'), migrations.RemoveField(model_name=
'hotelingroup', name='voters'), migrations.AddField(model_name=
'hotelingroup', name='negative_voters', field=models.
ManyToManyField(related_name='hotelingroup_negative_voters', to=
settings.AUTH_USER_MODEL)), migrations.AddField(model_name=
'hotelingroup', name='positive_voters', field=models.
ManyToManyField(related_name='hotelingroup_positive_voters', to=
settings.AUTH_USER_MODEL))]
| from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('Assemblage', '0002_auto_20161014_1710')]
operations = [migrations.RemoveField(model_name='hotelingroup', name=
'negative_votes'), migrations.RemoveField(model_name='hotelingroup',
name='positive_votes'), migrations.RemoveField(model_name=
'hotelingroup', name='voters'), migrations.AddField(model_name=
'hotelingroup', name='negative_voters', field=models.
ManyToManyField(related_name='hotelingroup_negative_voters', to=
settings.AUTH_USER_MODEL)), migrations.AddField(model_name=
'hotelingroup', name='positive_voters', field=models.
ManyToManyField(related_name='hotelingroup_positive_voters', to=
settings.AUTH_USER_MODEL))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 19:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Assemblage', '0002_auto_20161014_1710'),
]
operations = [
migrations.RemoveField(
model_name='hotelingroup',
name='negative_votes',
),
migrations.RemoveField(
model_name='hotelingroup',
name='positive_votes',
),
migrations.RemoveField(
model_name='hotelingroup',
name='voters',
),
migrations.AddField(
model_name='hotelingroup',
name='negative_voters',
field=models.ManyToManyField(related_name='hotelingroup_negative_voters', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='hotelingroup',
name='positive_voters',
field=models.ManyToManyField(related_name='hotelingroup_positive_voters', to=settings.AUTH_USER_MODEL),
),
]
| [
0,
1,
2,
3,
4
] |
1,415 | 5fdcbccb99880da79eb0efbdecd328ca1cf73d7f | <mask token>
class SetupGuideLandingPageTests(WagtailPageTests):
<mask token>
<mask token>
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
| <mask token>
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
<mask token>
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
| <mask token>
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
| from wagtail.tests.utils import WagtailPageTests
from setup_guide.models import SetupGuideLandingPage, SetupGuidePage
from home.models import HomePage
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
self.assertAllowedSubpageTypes(SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
| from wagtail.tests.utils import WagtailPageTests
from setup_guide.models import SetupGuideLandingPage, SetupGuidePage
from home.models import HomePage
class SetupGuideLandingPageTests(WagtailPageTests):
def test_can_create_under_homepage(self):
self.assertCanCreateAt(HomePage, SetupGuideLandingPage)
def test_setup_guide_page_subpages(self):
# A SetupGuidePage can only have other SetupGuidePage children
self.assertAllowedSubpageTypes(
SetupGuideLandingPage, {SetupGuidePage})
class SetupGuidePageTests(WagtailPageTests):
def test_can_create_under_landing_page(self):
self.assertCanCreateAt(SetupGuideLandingPage, SetupGuidePage)
| [
3,
4,
5,
6,
7
] |
1,416 | 678189ac5b0105c90178647843335f9d4402dc66 | <mask token>
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
<mask token>
| <mask token>
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1
while True:
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]
time.sleep(sampleTime)
print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)
| <mask token>
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1
while True:
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]
time.sleep(sampleTime)
print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)
| import smbus
import time, datetime
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1
while True:
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]
time.sleep(sampleTime)
print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)
| #!/usr/bin/python3
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# ADC121C_MQ131
# This code is designed to work with the ADC121C_I2CGAS_MQ131 I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Gas?sku=ADC121C_I2CGAS_MQ131#tabs-0-product_tabset-2
import smbus
import time, datetime
# Get I2C bus
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1.99 * raw_adc) / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1 # seconds
# ADC121C_MQ131 address, 0x50(80)
# Read data back from 0x00(00), 2 bytes
# raw_adc MSB, raw_adc LSB
while True:
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1.99 * raw_adc) / 4096.0 + 0.01
timestmp = ((str(datetime.datetime.utcnow())).split(' ')[1]).split('.')[0]
time.sleep(sampleTime)
# Output data to screen
print(timestmp, "UTC", "Ozone Concentration : %.2f ppm" %ppm)
| [
1,
2,
3,
4,
5
] |
1,417 | f2056ff46ce6e38c3b6ca553bbdec7f59d60b198 | <mask token>
| <mask token>
print("Usa el punto '.' para los decimales")
for contador in range(1, numalumnos + 1):
print(f'\nDatos del alumno número {contador} de {numalumnos}:')
teorica = float(input('- Introduce la nota de la parte teórica: '))
practica = float(input('- Introduce la nota de la parte practica: '))
nota = teorica * 60 / 100 + practica * 40 / 100
print(f'La nota final del alumno número {contador} es {nota:.2f}.\n')
print('Ya se han calculado todas las notas.')
| numalumnos = int(input('Introduce el número total de alumnos:\n'))
print("Usa el punto '.' para los decimales")
for contador in range(1, numalumnos + 1):
print(f'\nDatos del alumno número {contador} de {numalumnos}:')
teorica = float(input('- Introduce la nota de la parte teórica: '))
practica = float(input('- Introduce la nota de la parte practica: '))
nota = teorica * 60 / 100 + practica * 40 / 100
print(f'La nota final del alumno número {contador} es {nota:.2f}.\n')
print('Ya se han calculado todas las notas.')
| # Ejercicio 28 - Hoja VI (5) - Indicar la nota ponderada según el criterio dado
# (parte teórica 60%, práctica 40%) de cada uno de un número determinado de alumnos
numalumnos=int(input("Introduce el número total de alumnos:\n"))
print("Usa el punto '.' para los decimales")
for contador in range(1,numalumnos+1):
print(f"\nDatos del alumno número {contador} de {numalumnos}:")
teorica=float(input("- Introduce la nota de la parte teórica: "))
practica=float(input("- Introduce la nota de la parte practica: "))
nota=(teorica*60/100)+(practica*40/100)
print(f"La nota final del alumno número {contador} es {nota:.2f}.\n")
print("Ya se han calculado todas las notas.")
| null | [
0,
1,
2,
3
] |
1,418 | a52cbe6dbf4b4fc82d09e5f34e6e135933f3af38 | <mask token>
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
<mask token>
| <mask token>
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
def test():
while True:
usr = input('Enter the string to convert\n\n\t')
if usr != '':
return usr
else:
print('\nNo string entered.')
<mask token>
| <mask token>
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
def test():
while True:
usr = input('Enter the string to convert\n\n\t')
if usr != '':
return usr
else:
print('\nNo string entered.')
if __name__ == '__main__':
main()
| <mask token>
import sys
import time
import binascii
def main():
print('\n', sys.version_info)
try:
while True:
print('\n\nPress Ctrl+C to exit.')
usr = test()
out = binascii.hexlify(bytes(usr, encoding='utf8'))
print('\nHex:\t\t', out)
print('Base 10:\t', int(out, 16))
time.sleep(0.5)
except KeyboardInterrupt:
print('\tProgram Terminated\n\n')
sys.exit(0)
def test():
while True:
usr = input('Enter the string to convert\n\n\t')
if usr != '':
return usr
else:
print('\nNo string entered.')
if __name__ == '__main__':
main()
| #!/usr/bin/python3
'''
generator.py
This program inputs a strings, and outputs the corresponding hex
Creator: Ethan Knight
Email: [email protected]
Published: 20181116
'''
import sys
import time
import binascii
def main():
print("\n", sys.version_info)
try:
while True:
print("\n\nPress Ctrl+C to exit.")
usr=test()
out=binascii.hexlify(bytes(usr, encoding="utf8"))
print("\nHex:\t\t", out)
print("Base 10:\t", int(out,16))
time.sleep(.5)
except KeyboardInterrupt:
print("\tProgram Terminated\n\n")
sys.exit(0)
def test():
while True:
usr=input("Enter the string to convert\n\n\t")
if usr!="":
return usr
else:
print("\nNo string entered.")
if __name__=="__main__":
main()
| [
1,
2,
3,
4,
5
] |
1,419 | 67516551b595c02e70a0ba4005df8a97ba71b17e | <mask token>
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
<mask token>
| <mask token>
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
<mask token>
print(int(fibonaci(n)))
| <mask token>
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
n = int(input())
print(int(fibonaci(n)))
| import numpy as np
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
n = int(input())
print(int(fibonaci(n)))
| # Uses python3
import numpy as np
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=(n + 1))
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
n = int(input())
print(int(fibonaci(n)))
| [
1,
2,
3,
4,
5
] |
1,420 | 9c8a213fc8a7397662eebb74d6ee1ad34cb884d9 | <mask token>
def create_train_kmeans(data, number_of_clusters):
k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)
start = time.time()
k.fit(data)
end = time.time()
print('Training took {} seconds'.format(end - start))
return k
<mask token>
| <mask token>
def load_imgs():
category_dir = os.listdir(DIR)
stats = []
result_imgs = []
result_labels = []
for thing in category_dir:
if thing != '.DS_Store':
label = thing
path = os.path.join(DIR, thing)
file_names = os.listdir(path)
for file in file_names:
result_labels.append(label)
image = cv2.imread(os.path.join(path, file))
image = cv2.resize(image, (224, 224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image = image / 255
result_imgs.append(image)
result_imgs = np.asarray(result_imgs)
result_labels = np.asarray(result_labels)
return result_imgs, result_labels
<mask token>
def create_train_kmeans(data, number_of_clusters):
k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)
start = time.time()
k.fit(data)
end = time.time()
print('Training took {} seconds'.format(end - start))
return k
<mask token>
def print_scores(true, pred):
acc = accuracy_score(true, pred)
f1 = f1_score(true, pred, average='macro')
return '\n\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}'.format(f1, acc)
| <mask token>
def load_imgs():
category_dir = os.listdir(DIR)
stats = []
result_imgs = []
result_labels = []
for thing in category_dir:
if thing != '.DS_Store':
label = thing
path = os.path.join(DIR, thing)
file_names = os.listdir(path)
for file in file_names:
result_labels.append(label)
image = cv2.imread(os.path.join(path, file))
image = cv2.resize(image, (224, 224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image = image / 255
result_imgs.append(image)
result_imgs = np.asarray(result_imgs)
result_labels = np.asarray(result_labels)
return result_imgs, result_labels
<mask token>
def create_train_kmeans(data, number_of_clusters):
k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)
start = time.time()
k.fit(data)
end = time.time()
print('Training took {} seconds'.format(end - start))
return k
def cluster_label_count(clusters, labels):
count = {}
unique_clusters = list(set(clusters))
unique_labels = list(set(labels))
for cluster in unique_clusters:
count[cluster] = {}
for label in unique_labels:
count[cluster][label] = 0
for i in range(len(clusters)):
count[clusters[i]][labels[i]] += 1
cluster_df = pd.DataFrame(count)
return cluster_df
<mask token>
def print_scores(true, pred):
acc = accuracy_score(true, pred)
f1 = f1_score(true, pred, average='macro')
return '\n\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}'.format(f1, acc)
| import time
import os, os.path
import random
import cv2
import glob
import keras
import matplotlib
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
def load_imgs():
category_dir = os.listdir(DIR)
stats = []
result_imgs = []
result_labels = []
for thing in category_dir:
if thing != '.DS_Store':
label = thing
path = os.path.join(DIR, thing)
file_names = os.listdir(path)
for file in file_names:
result_labels.append(label)
image = cv2.imread(os.path.join(path, file))
image = cv2.resize(image, (224, 224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image = image / 255
result_imgs.append(image)
result_imgs = np.asarray(result_imgs)
result_labels = np.asarray(result_labels)
return result_imgs, result_labels
def covnet_transform(covnet_model, raw_images):
pred = covnet_model.predict(raw_images)
flat = pred.reshape(raw_images.shape[0], -1)
return flat
def create_train_kmeans(data, number_of_clusters):
k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)
start = time.time()
k.fit(data)
end = time.time()
print('Training took {} seconds'.format(end - start))
return k
def cluster_label_count(clusters, labels):
count = {}
unique_clusters = list(set(clusters))
unique_labels = list(set(labels))
for cluster in unique_clusters:
count[cluster] = {}
for label in unique_labels:
count[cluster][label] = 0
for i in range(len(clusters)):
count[clusters[i]][labels[i]] += 1
cluster_df = pd.DataFrame(count)
return cluster_df
from sklearn.metrics import accuracy_score, f1_score
def print_scores(true, pred):
acc = accuracy_score(true, pred)
f1 = f1_score(true, pred, average='macro')
return '\n\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}'.format(f1, acc)
| import time
import os, os.path
import random
import cv2
import glob
import keras
import matplotlib
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
#DIR = "./Data_example_ph2"
#codes = os.listdir(DIR)
#codes.pop(0)
#codes.sort()
def load_imgs():
category_dir = os.listdir(DIR)
stats=[]
result_imgs = []
result_labels = []
for thing in category_dir:
if thing!='.DS_Store':
label= thing
path = os.path.join(DIR,thing)
file_names = os.listdir(path)
for file in file_names:
result_labels.append(label)
image = cv2.imread(os.path.join(path,file))
image = cv2.resize(image, (224,224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image =image/255
result_imgs.append(image)
result_imgs = np.asarray(result_imgs)
result_labels = np.asarray(result_labels)
return result_imgs,result_labels
#X_train,X_lables = load_imgs()
#vgg16_model = keras.applications.vgg16.VGG16(include_top=False, weights="imagenet", input_shape=(224,224,3))
def covnet_transform(covnet_model, raw_images):
# Pass our training data through the network
pred = covnet_model.predict(raw_images)
# Flatten the array
flat = pred.reshape(raw_images.shape[0], -1)
return flat
def create_train_kmeans(data, number_of_clusters):
# n_jobs is set to -1 to use all available CPU cores. This makes a big difference on an 8-core CPU
# especially when the data size gets much bigger. #perfMatters
k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)
# Let's do some timings to see how long it takes to train.
start = time.time()
# Train it up
k.fit(data)
# Stop the timing
end = time.time()
# And see how long that took
print("Training took {} seconds".format(end-start))
return k
#vgg16_output = covnet_transform(vgg16_model, X_train)
#K_vgg16 = create_train_kmeans(vgg16_output)
#k_vgg16_pred = K_vgg16.predict(vgg16_output)
def cluster_label_count(clusters, labels):
count = {}
# Get unique clusters and labels
unique_clusters = list(set(clusters))
unique_labels = list(set(labels))
# Create counter for each cluster/label combination and set it to 0
for cluster in unique_clusters:
count[cluster] = {}
for label in unique_labels:
count[cluster][label] = 0
# Let's count
for i in range(len(clusters)):
count[clusters[i]][labels[i]] +=1
cluster_df = pd.DataFrame(count)
return cluster_df
#vgg16_pred_codes = [codes[x] for x in k_vgg16_pred]
from sklearn.metrics import accuracy_score, f1_score
def print_scores(true, pred):
acc = accuracy_score(true, pred)
f1 = f1_score(true, pred, average="macro")
return "\n\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}".format(f1,acc)
#print("KMeans VGG16:", print_scores(X_lables, vgg16_pred_codes))
| [
1,
3,
4,
6,
7
] |
1,421 | e03dfa0e02313c5478d4e97dcaf3bc27915bd878 | <mask token>
class CalendarAppointmentSlot(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
@api.constrains('hour')
def check_hour(self):
if any(slot.hour < 0.0 or slot.hour >= 24.0 for slot in self):
raise ValidationError(_(
'Please enter a valid hour between 0:00 and 24:00 for your slots.'
))
def name_get(self):
weekdays = dict(self._fields['weekday'].selection)
return self.mapped(lambda slot: (slot.id, '%s, %02d:%02d' % (
weekdays.get(slot.weekday), int(slot.hour), int(round(slot.hour %
1 * 60)))))
class CalendarAppointmentQuestion(models.Model):
_name = 'calendar.appointment.question'
_description = 'Online Appointment : Questions'
_order = 'sequence'
sequence = fields.Integer('Sequence')
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
name = fields.Char('Question', translate=True, required=True)
placeholder = fields.Char('Placeholder', translate=True)
question_required = fields.Boolean('Required Answer')
question_type = fields.Selection([('char', 'Single line text'), ('text',
'Multi-line text'), ('select', 'Dropdown (one answer)'), ('radio',
'Radio (one answer)'), ('checkbox', 'Checkboxes (multiple answers)'
)], 'Question Type', default='char')
answer_ids = fields.Many2many('calendar.appointment.answer',
'calendar_appointment_question_answer_rel', 'question_id',
'answer_id', string='Available Answers')
class CalendarAppointmentAnswer(models.Model):
_name = 'calendar.appointment.answer'
_description = 'Online Appointment : Answers'
question_id = fields.Many2many('calendar.appointment.question',
'calendar_appointment_question_answer_rel', 'answer_id',
'question_id', string='Questions')
name = fields.Char('Answer', translate=True, required=True)
| <mask token>
class CalendarAppointmentType(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def _get_appointment_slots(self, timezone, employee=None):
""" Fetch available slots to book an appointment
:param timezone: timezone string e.g.: 'Europe/Brussels' or 'Etc/GMT+1'
:param employee: if set will only check available slots for this employee
:returns: list of dicts (1 per month) containing available slots per day per week.
complex structure used to simplify rendering of template
"""
self.ensure_one()
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(
hours=self.min_schedule_hours))
last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(
days=self.max_schedule_days))
slots = self._slots_generate(first_day.astimezone(appt_tz),
last_day.astimezone(appt_tz), timezone)
if not employee or employee in self.employee_ids:
self._slots_available(slots, first_day.astimezone(pytz.UTC),
last_day.astimezone(pytz.UTC), employee)
today = requested_tz.fromutc(datetime.utcnow())
start = today
month_dates_calendar = cal.Calendar(0).monthdatescalendar
months = []
while (start.year, start.month) <= (last_day.year, last_day.month):
dates = month_dates_calendar(start.year, start.month)
for week_index, week in enumerate(dates):
for day_index, day in enumerate(week):
mute_cls = weekend_cls = today_cls = None
today_slots = []
if day.weekday() in (cal.SUNDAY, cal.SATURDAY):
weekend_cls = 'o_weekend'
if day == today.date() and day.month == today.month:
today_cls = 'o_today'
if day.month != start.month:
mute_cls = 'text-muted o_mute_day'
else:
while slots and slots[0][timezone][0].date() <= day:
if slots[0][timezone][0].date(
) == day and 'employee_id' in slots[0]:
today_slots.append({'employee_id': slots[0]
['employee_id'].id, 'datetime': slots[0
][timezone][0].strftime(
'%Y-%m-%d %H:%M:%S'), 'hours': slots[0]
[timezone][0].strftime('%H:%M')})
slots.pop(0)
dates[week_index][day_index] = {'day': day, 'slots':
today_slots, 'mute_cls': mute_cls, 'weekend_cls':
weekend_cls, 'today_cls': today_cls}
months.append({'month': format_datetime(start, 'MMMM Y', locale
=get_lang(self.env).code), 'weeks': dates})
start = start + relativedelta(months=1)
return months
class CalendarAppointmentSlot(models.Model):
_name = 'calendar.appointment.slot'
_description = 'Online Appointment : Time Slot'
_rec_name = 'weekday'
_order = 'weekday, hour'
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
weekday = fields.Selection([('1', 'Monday'), ('2', 'Tuesday'), ('3',
'Wednesday'), ('4', 'Thursday'), ('5', 'Friday'), ('6', 'Saturday'),
('7', 'Sunday')], string='Week Day', required=True)
hour = fields.Float('Starting Hour', required=True, default=8.0)
@api.constrains('hour')
def check_hour(self):
if any(slot.hour < 0.0 or slot.hour >= 24.0 for slot in self):
raise ValidationError(_(
'Please enter a valid hour between 0:00 and 24:00 for your slots.'
))
def name_get(self):
weekdays = dict(self._fields['weekday'].selection)
return self.mapped(lambda slot: (slot.id, '%s, %02d:%02d' % (
weekdays.get(slot.weekday), int(slot.hour), int(round(slot.hour %
1 * 60)))))
class CalendarAppointmentQuestion(models.Model):
_name = 'calendar.appointment.question'
_description = 'Online Appointment : Questions'
_order = 'sequence'
sequence = fields.Integer('Sequence')
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
name = fields.Char('Question', translate=True, required=True)
placeholder = fields.Char('Placeholder', translate=True)
question_required = fields.Boolean('Required Answer')
question_type = fields.Selection([('char', 'Single line text'), ('text',
'Multi-line text'), ('select', 'Dropdown (one answer)'), ('radio',
'Radio (one answer)'), ('checkbox', 'Checkboxes (multiple answers)'
)], 'Question Type', default='char')
answer_ids = fields.Many2many('calendar.appointment.answer',
'calendar_appointment_question_answer_rel', 'question_id',
'answer_id', string='Available Answers')
class CalendarAppointmentAnswer(models.Model):
_name = 'calendar.appointment.answer'
_description = 'Online Appointment : Answers'
question_id = fields.Many2many('calendar.appointment.question',
'calendar_appointment_question_answer_rel', 'answer_id',
'question_id', string='Questions')
name = fields.Char('Answer', translate=True, required=True)
| <mask token>
class CalendarAppointmentType(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def _compute_website_url(self):
super(CalendarAppointmentType, self)._compute_website_url()
for appointment_type in self:
if appointment_type.id:
appointment_type.website_url = '/calendar/%s/appointment' % (
slug(appointment_type),)
<mask token>
def action_calendar_meetings(self):
self.ensure_one()
action = self.env['ir.actions.actions']._for_xml_id(
'calendar.action_calendar_event')
action['context'] = {'default_appointment_type_id': self.id,
'search_default_appointment_type_id': self.id}
return action
<mask token>
<mask token>
def _get_appointment_slots(self, timezone, employee=None):
""" Fetch available slots to book an appointment
:param timezone: timezone string e.g.: 'Europe/Brussels' or 'Etc/GMT+1'
:param employee: if set will only check available slots for this employee
:returns: list of dicts (1 per month) containing available slots per day per week.
complex structure used to simplify rendering of template
"""
self.ensure_one()
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(
hours=self.min_schedule_hours))
last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(
days=self.max_schedule_days))
slots = self._slots_generate(first_day.astimezone(appt_tz),
last_day.astimezone(appt_tz), timezone)
if not employee or employee in self.employee_ids:
self._slots_available(slots, first_day.astimezone(pytz.UTC),
last_day.astimezone(pytz.UTC), employee)
today = requested_tz.fromutc(datetime.utcnow())
start = today
month_dates_calendar = cal.Calendar(0).monthdatescalendar
months = []
while (start.year, start.month) <= (last_day.year, last_day.month):
dates = month_dates_calendar(start.year, start.month)
for week_index, week in enumerate(dates):
for day_index, day in enumerate(week):
mute_cls = weekend_cls = today_cls = None
today_slots = []
if day.weekday() in (cal.SUNDAY, cal.SATURDAY):
weekend_cls = 'o_weekend'
if day == today.date() and day.month == today.month:
today_cls = 'o_today'
if day.month != start.month:
mute_cls = 'text-muted o_mute_day'
else:
while slots and slots[0][timezone][0].date() <= day:
if slots[0][timezone][0].date(
) == day and 'employee_id' in slots[0]:
today_slots.append({'employee_id': slots[0]
['employee_id'].id, 'datetime': slots[0
][timezone][0].strftime(
'%Y-%m-%d %H:%M:%S'), 'hours': slots[0]
[timezone][0].strftime('%H:%M')})
slots.pop(0)
dates[week_index][day_index] = {'day': day, 'slots':
today_slots, 'mute_cls': mute_cls, 'weekend_cls':
weekend_cls, 'today_cls': today_cls}
months.append({'month': format_datetime(start, 'MMMM Y', locale
=get_lang(self.env).code), 'weeks': dates})
start = start + relativedelta(months=1)
return months
class CalendarAppointmentSlot(models.Model):
_name = 'calendar.appointment.slot'
_description = 'Online Appointment : Time Slot'
_rec_name = 'weekday'
_order = 'weekday, hour'
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
weekday = fields.Selection([('1', 'Monday'), ('2', 'Tuesday'), ('3',
'Wednesday'), ('4', 'Thursday'), ('5', 'Friday'), ('6', 'Saturday'),
('7', 'Sunday')], string='Week Day', required=True)
hour = fields.Float('Starting Hour', required=True, default=8.0)
@api.constrains('hour')
def check_hour(self):
if any(slot.hour < 0.0 or slot.hour >= 24.0 for slot in self):
raise ValidationError(_(
'Please enter a valid hour between 0:00 and 24:00 for your slots.'
))
def name_get(self):
weekdays = dict(self._fields['weekday'].selection)
return self.mapped(lambda slot: (slot.id, '%s, %02d:%02d' % (
weekdays.get(slot.weekday), int(slot.hour), int(round(slot.hour %
1 * 60)))))
class CalendarAppointmentQuestion(models.Model):
_name = 'calendar.appointment.question'
_description = 'Online Appointment : Questions'
_order = 'sequence'
sequence = fields.Integer('Sequence')
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
name = fields.Char('Question', translate=True, required=True)
placeholder = fields.Char('Placeholder', translate=True)
question_required = fields.Boolean('Required Answer')
question_type = fields.Selection([('char', 'Single line text'), ('text',
'Multi-line text'), ('select', 'Dropdown (one answer)'), ('radio',
'Radio (one answer)'), ('checkbox', 'Checkboxes (multiple answers)'
)], 'Question Type', default='char')
answer_ids = fields.Many2many('calendar.appointment.answer',
'calendar_appointment_question_answer_rel', 'question_id',
'answer_id', string='Available Answers')
class CalendarAppointmentAnswer(models.Model):
_name = 'calendar.appointment.answer'
_description = 'Online Appointment : Answers'
question_id = fields.Many2many('calendar.appointment.question',
'calendar_appointment_question_answer_rel', 'answer_id',
'question_id', string='Questions')
name = fields.Char('Answer', translate=True, required=True)
| import calendar as cal
import random
import pytz
from datetime import datetime, timedelta, time
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from babel.dates import format_datetime
from odoo import api, fields, models, _
from odoo.tools.misc import get_lang
from odoo.addons.base.models.res_partner import _tz_get
from odoo.addons.http_routing.models.ir_http import slug
from odoo.exceptions import ValidationError
class CalendarAppointmentType(models.Model):
_name = 'calendar.appointment.type'
_description = 'Online Appointment Type'
_inherit = ['mail.thread', 'website.seo.metadata',
'website.published.mixin']
_order = 'sequence'
sequence = fields.Integer('Sequence')
name = fields.Char('Appointment Type', required=True, translate=True)
min_schedule_hours = fields.Float('Schedule before (hours)', required=
True, default=1.0)
max_schedule_days = fields.Integer('Schedule not after (days)',
required=True, default=15)
min_cancellation_hours = fields.Float('Cancel Before (hours)', required
=True, default=1.0)
appointment_duration = fields.Float('Appointment Duration', required=
True, default=1.0)
reminder_ids = fields.Many2many('calendar.alarm', string='Reminders')
location = fields.Char('Location', help='Location of the appointments')
message_confirmation = fields.Html('Confirmation Message', translate=True)
message_intro = fields.Html('Introduction Message', translate=True)
country_ids = fields.Many2many('res.country',
'website_calendar_type_country_rel', string='Restrict Countries',
help=
'Keep empty to allow visitors from any country, otherwise you only allow visitors from selected countries'
)
question_ids = fields.One2many('calendar.appointment.question',
'appointment_type_id', string='Questions', copy=True)
slot_ids = fields.One2many('calendar.appointment.slot',
'appointment_type_id', 'Availabilities', copy=True)
appointment_tz = fields.Selection(_tz_get, string='Timezone', required=
True, default=lambda self: self.env.user.tz, help=
'Timezone where appointment take place')
employee_ids = fields.Many2many('hr.employee',
'website_calendar_type_employee_rel', domain=[('user_id', '!=',
False)], string='Employees')
assignation_method = fields.Selection([('random', 'Random'), ('chosen',
'Chosen by the Customer')], string='Assignment Method', default=
'random', help=
'How employees will be assigned to meetings customers book on your website.'
)
appointment_count = fields.Integer('# Appointments', compute=
'_compute_appointment_count')
def _compute_appointment_count(self):
meeting_data = self.env['calendar.event'].read_group([(
'appointment_type_id', 'in', self.ids)], ['appointment_type_id'
], ['appointment_type_id'])
mapped_data = {m['appointment_type_id'][0]: m[
'appointment_type_id_count'] for m in meeting_data}
for appointment_type in self:
appointment_type.appointment_count = mapped_data.get(
appointment_type.id, 0)
def _compute_website_url(self):
super(CalendarAppointmentType, self)._compute_website_url()
for appointment_type in self:
if appointment_type.id:
appointment_type.website_url = '/calendar/%s/appointment' % (
slug(appointment_type),)
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
default = default or {}
default['name'] = self.name + _(' (copy)')
return super(CalendarAppointmentType, self).copy(default=default)
def action_calendar_meetings(self):
self.ensure_one()
action = self.env['ir.actions.actions']._for_xml_id(
'calendar.action_calendar_event')
action['context'] = {'default_appointment_type_id': self.id,
'search_default_appointment_type_id': self.id}
return action
def _slots_generate(self, first_day, last_day, timezone):
""" Generate all appointment slots (in naive UTC, appointment timezone, and given (visitors) timezone)
between first_day and last_day (datetimes in appointment timezone)
:return: [ {'slot': slot_record, <timezone>: (date_start, date_end), ...},
... ]
"""
def append_slot(day, slot):
local_start = appt_tz.localize(datetime.combine(day, time(hour=
int(slot.hour), minute=int(round(slot.hour % 1 * 60)))))
local_end = appt_tz.localize(datetime.combine(day, time(hour=
int(slot.hour), minute=int(round(slot.hour % 1 * 60)))) +
relativedelta(hours=self.appointment_duration))
slots.append({self.appointment_tz: (local_start, local_end),
timezone: (local_start.astimezone(requested_tz), local_end.
astimezone(requested_tz)), 'UTC': (local_start.astimezone(
pytz.UTC).replace(tzinfo=None), local_end.astimezone(pytz.
UTC).replace(tzinfo=None)), 'slot': slot})
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
slots = []
for slot in self.slot_ids.filtered(lambda x: int(x.weekday) ==
first_day.isoweekday()):
if slot.hour > first_day.hour + first_day.minute / 60.0:
append_slot(first_day.date(), slot)
slot_weekday = [(int(weekday) - 1) for weekday in self.slot_ids.
mapped('weekday')]
for day in rrule.rrule(rrule.DAILY, dtstart=first_day.date() +
timedelta(days=1), until=last_day.date(), byweekday=slot_weekday):
for slot in self.slot_ids.filtered(lambda x: int(x.weekday) ==
day.isoweekday()):
append_slot(day, slot)
return slots
def _slots_available(self, slots, first_day, last_day, employee=None):
""" Fills the slot stucture with an available employee
:param slots: slots structure generated by _slots_generate
:param first_day: start datetime in UTC
:param last_day: end datetime in UTC
:param employee: if set, only consider this employee
if not set, consider all employees assigned to this appointment type
"""
def is_work_available(start_dt, end_dt, intervals):
""" check if the slot is contained in the employee's work hours (defined by intervals)
"""
def find_start_index():
""" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt
"""
def recursive_find_index(lower_bound, upper_bound):
if upper_bound - lower_bound <= 1:
if intervals[upper_bound][0] <= start_dt:
return upper_bound
return lower_bound
index = (upper_bound + lower_bound) // 2
if intervals[index][0] <= start_dt:
return recursive_find_index(index, upper_bound)
else:
return recursive_find_index(lower_bound, index)
if start_dt <= intervals[0][0] - tolerance:
return -1
if end_dt >= intervals[-1][1] + tolerance:
return -1
return recursive_find_index(0, len(intervals) - 1)
if not intervals:
return False
tolerance = timedelta(minutes=1)
start_index = find_start_index()
if start_index != -1:
for index in range(start_index, len(intervals)):
if intervals[index][1] >= end_dt - tolerance:
return True
if len(intervals) == index + 1 or intervals[index + 1][0
] - intervals[index][1] > tolerance:
return False
return False
def is_calendar_available(slot, events, employee):
""" Returns True if the given slot doesn't collide with given events for the employee
"""
start_dt = slot['UTC'][0]
end_dt = slot['UTC'][1]
event_in_scope = lambda ev: fields.Date.to_date(ev.start
) <= fields.Date.to_date(end_dt) and fields.Date.to_date(ev
.stop) >= fields.Date.to_date(start_dt)
for ev in events.filtered(event_in_scope):
if ev.allday:
event_tz = pytz.timezone(ev.event_tz or employee.
user_id.tz or self.env.user.tz or slot['slot'].
appointment_type_id.appointment_tz or 'UTC')
ev_start_dt = datetime.combine(fields.Date.from_string(
ev.start_date), time.min)
ev_stop_dt = datetime.combine(fields.Date.from_string(
ev.stop_date), time.max)
ev_start_dt = event_tz.localize(ev_start_dt).astimezone(
pytz.UTC).replace(tzinfo=None)
ev_stop_dt = event_tz.localize(ev_stop_dt).astimezone(pytz
.UTC).replace(tzinfo=None)
if ev_start_dt < end_dt and ev_stop_dt > start_dt:
return False
elif fields.Datetime.to_datetime(ev.start
) < end_dt and fields.Datetime.to_datetime(ev.stop
) > start_dt:
return False
return True
workhours = {}
meetings = {}
available_employees = [emp.with_context(tz=emp.user_id.tz) for emp in
employee or self.employee_ids]
random.shuffle(available_employees)
for slot in slots:
for emp_pos, emp in enumerate(available_employees):
if emp_pos not in workhours:
workhours[emp_pos] = [(interval[0].astimezone(pytz.UTC)
.replace(tzinfo=None), interval[1].astimezone(pytz.
UTC).replace(tzinfo=None)) for interval in emp.
resource_calendar_id._work_intervals_batch(
first_day, last_day, resources=emp.resource_id)[emp
.resource_id.id]]
if is_work_available(slot['UTC'][0], slot['UTC'][1],
workhours[emp_pos]):
if emp_pos not in meetings:
meetings[emp_pos] = self.env['calendar.event'].search([
('partner_ids.user_ids', '=', emp.user_id.id),
('start', '<', fields.Datetime.to_string(
last_day.replace(hour=23, minute=59, second=59)
)), ('stop', '>', fields.Datetime.to_string(
first_day.replace(hour=0, minute=0, second=0)))])
if is_calendar_available(slot, meetings[emp_pos], emp):
slot['employee_id'] = emp
break
def _get_appointment_slots(self, timezone, employee=None):
""" Fetch available slots to book an appointment
:param timezone: timezone string e.g.: 'Europe/Brussels' or 'Etc/GMT+1'
:param employee: if set will only check available slots for this employee
:returns: list of dicts (1 per month) containing available slots per day per week.
complex structure used to simplify rendering of template
"""
self.ensure_one()
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(
hours=self.min_schedule_hours))
last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(
days=self.max_schedule_days))
slots = self._slots_generate(first_day.astimezone(appt_tz),
last_day.astimezone(appt_tz), timezone)
if not employee or employee in self.employee_ids:
self._slots_available(slots, first_day.astimezone(pytz.UTC),
last_day.astimezone(pytz.UTC), employee)
today = requested_tz.fromutc(datetime.utcnow())
start = today
month_dates_calendar = cal.Calendar(0).monthdatescalendar
months = []
while (start.year, start.month) <= (last_day.year, last_day.month):
dates = month_dates_calendar(start.year, start.month)
for week_index, week in enumerate(dates):
for day_index, day in enumerate(week):
mute_cls = weekend_cls = today_cls = None
today_slots = []
if day.weekday() in (cal.SUNDAY, cal.SATURDAY):
weekend_cls = 'o_weekend'
if day == today.date() and day.month == today.month:
today_cls = 'o_today'
if day.month != start.month:
mute_cls = 'text-muted o_mute_day'
else:
while slots and slots[0][timezone][0].date() <= day:
if slots[0][timezone][0].date(
) == day and 'employee_id' in slots[0]:
today_slots.append({'employee_id': slots[0]
['employee_id'].id, 'datetime': slots[0
][timezone][0].strftime(
'%Y-%m-%d %H:%M:%S'), 'hours': slots[0]
[timezone][0].strftime('%H:%M')})
slots.pop(0)
dates[week_index][day_index] = {'day': day, 'slots':
today_slots, 'mute_cls': mute_cls, 'weekend_cls':
weekend_cls, 'today_cls': today_cls}
months.append({'month': format_datetime(start, 'MMMM Y', locale
=get_lang(self.env).code), 'weeks': dates})
start = start + relativedelta(months=1)
return months
class CalendarAppointmentSlot(models.Model):
_name = 'calendar.appointment.slot'
_description = 'Online Appointment : Time Slot'
_rec_name = 'weekday'
_order = 'weekday, hour'
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
weekday = fields.Selection([('1', 'Monday'), ('2', 'Tuesday'), ('3',
'Wednesday'), ('4', 'Thursday'), ('5', 'Friday'), ('6', 'Saturday'),
('7', 'Sunday')], string='Week Day', required=True)
hour = fields.Float('Starting Hour', required=True, default=8.0)
@api.constrains('hour')
def check_hour(self):
if any(slot.hour < 0.0 or slot.hour >= 24.0 for slot in self):
raise ValidationError(_(
'Please enter a valid hour between 0:00 and 24:00 for your slots.'
))
def name_get(self):
weekdays = dict(self._fields['weekday'].selection)
return self.mapped(lambda slot: (slot.id, '%s, %02d:%02d' % (
weekdays.get(slot.weekday), int(slot.hour), int(round(slot.hour %
1 * 60)))))
class CalendarAppointmentQuestion(models.Model):
_name = 'calendar.appointment.question'
_description = 'Online Appointment : Questions'
_order = 'sequence'
sequence = fields.Integer('Sequence')
appointment_type_id = fields.Many2one('calendar.appointment.type',
'Appointment Type', ondelete='cascade')
name = fields.Char('Question', translate=True, required=True)
placeholder = fields.Char('Placeholder', translate=True)
question_required = fields.Boolean('Required Answer')
question_type = fields.Selection([('char', 'Single line text'), ('text',
'Multi-line text'), ('select', 'Dropdown (one answer)'), ('radio',
'Radio (one answer)'), ('checkbox', 'Checkboxes (multiple answers)'
)], 'Question Type', default='char')
answer_ids = fields.Many2many('calendar.appointment.answer',
'calendar_appointment_question_answer_rel', 'question_id',
'answer_id', string='Available Answers')
class CalendarAppointmentAnswer(models.Model):
_name = 'calendar.appointment.answer'
_description = 'Online Appointment : Answers'
question_id = fields.Many2many('calendar.appointment.question',
'calendar_appointment_question_answer_rel', 'answer_id',
'question_id', string='Questions')
name = fields.Char('Answer', translate=True, required=True)
| # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import calendar as cal
import random
import pytz
from datetime import datetime, timedelta, time
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from babel.dates import format_datetime
from odoo import api, fields, models, _
from odoo.tools.misc import get_lang
from odoo.addons.base.models.res_partner import _tz_get
from odoo.addons.http_routing.models.ir_http import slug
from odoo.exceptions import ValidationError
class CalendarAppointmentType(models.Model):
_name = "calendar.appointment.type"
_description = "Online Appointment Type"
_inherit = ['mail.thread', "website.seo.metadata", 'website.published.mixin']
_order = "sequence"
sequence = fields.Integer('Sequence')
name = fields.Char('Appointment Type', required=True, translate=True)
min_schedule_hours = fields.Float('Schedule before (hours)', required=True, default=1.0)
max_schedule_days = fields.Integer('Schedule not after (days)', required=True, default=15)
min_cancellation_hours = fields.Float('Cancel Before (hours)', required=True, default=1.0)
appointment_duration = fields.Float('Appointment Duration', required=True, default=1.0)
reminder_ids = fields.Many2many('calendar.alarm', string="Reminders")
location = fields.Char('Location', help="Location of the appointments")
message_confirmation = fields.Html('Confirmation Message', translate=True)
message_intro = fields.Html('Introduction Message', translate=True)
country_ids = fields.Many2many(
'res.country', 'website_calendar_type_country_rel', string='Restrict Countries',
help="Keep empty to allow visitors from any country, otherwise you only allow visitors from selected countries")
question_ids = fields.One2many('calendar.appointment.question', 'appointment_type_id', string='Questions', copy=True)
slot_ids = fields.One2many('calendar.appointment.slot', 'appointment_type_id', 'Availabilities', copy=True)
appointment_tz = fields.Selection(
_tz_get, string='Timezone', required=True, default=lambda self: self.env.user.tz,
help="Timezone where appointment take place")
employee_ids = fields.Many2many('hr.employee', 'website_calendar_type_employee_rel', domain=[('user_id', '!=', False)], string='Employees')
assignation_method = fields.Selection([
('random', 'Random'),
('chosen', 'Chosen by the Customer')], string='Assignment Method', default='random',
help="How employees will be assigned to meetings customers book on your website.")
appointment_count = fields.Integer('# Appointments', compute='_compute_appointment_count')
def _compute_appointment_count(self):
meeting_data = self.env['calendar.event'].read_group([('appointment_type_id', 'in', self.ids)], ['appointment_type_id'], ['appointment_type_id'])
mapped_data = {m['appointment_type_id'][0]: m['appointment_type_id_count'] for m in meeting_data}
for appointment_type in self:
appointment_type.appointment_count = mapped_data.get(appointment_type.id, 0)
def _compute_website_url(self):
super(CalendarAppointmentType, self)._compute_website_url()
for appointment_type in self:
if appointment_type.id :
appointment_type.website_url = '/calendar/%s/appointment' % (slug(appointment_type),)
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
default = default or {}
default['name'] = self.name + _(' (copy)')
return super(CalendarAppointmentType, self).copy(default=default)
def action_calendar_meetings(self):
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("calendar.action_calendar_event")
action['context'] = {
'default_appointment_type_id': self.id,
'search_default_appointment_type_id': self.id
}
return action
# --------------------------------------
# Slots Generation
# --------------------------------------
def _slots_generate(self, first_day, last_day, timezone):
""" Generate all appointment slots (in naive UTC, appointment timezone, and given (visitors) timezone)
between first_day and last_day (datetimes in appointment timezone)
:return: [ {'slot': slot_record, <timezone>: (date_start, date_end), ...},
... ]
"""
def append_slot(day, slot):
local_start = appt_tz.localize(datetime.combine(day, time(hour=int(slot.hour), minute=int(round((slot.hour % 1) * 60)))))
local_end = appt_tz.localize(
datetime.combine(day, time(hour=int(slot.hour), minute=int(round((slot.hour % 1) * 60)))) + relativedelta(hours=self.appointment_duration))
slots.append({
self.appointment_tz: (
local_start,
local_end,
),
timezone: (
local_start.astimezone(requested_tz),
local_end.astimezone(requested_tz),
),
'UTC': (
local_start.astimezone(pytz.UTC).replace(tzinfo=None),
local_end.astimezone(pytz.UTC).replace(tzinfo=None),
),
'slot': slot,
})
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
slots = []
for slot in self.slot_ids.filtered(lambda x: int(x.weekday) == first_day.isoweekday()):
if slot.hour > first_day.hour + first_day.minute / 60.0:
append_slot(first_day.date(), slot)
slot_weekday = [int(weekday) - 1 for weekday in self.slot_ids.mapped('weekday')]
for day in rrule.rrule(rrule.DAILY,
dtstart=first_day.date() + timedelta(days=1),
until=last_day.date(),
byweekday=slot_weekday):
for slot in self.slot_ids.filtered(lambda x: int(x.weekday) == day.isoweekday()):
append_slot(day, slot)
return slots
def _slots_available(self, slots, first_day, last_day, employee=None):
""" Fills the slot stucture with an available employee
:param slots: slots structure generated by _slots_generate
:param first_day: start datetime in UTC
:param last_day: end datetime in UTC
:param employee: if set, only consider this employee
if not set, consider all employees assigned to this appointment type
"""
def is_work_available(start_dt, end_dt, intervals):
""" check if the slot is contained in the employee's work hours (defined by intervals)
"""
def find_start_index():
""" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt
"""
def recursive_find_index(lower_bound, upper_bound):
if upper_bound - lower_bound <= 1:
if intervals[upper_bound][0] <= start_dt:
return upper_bound
return lower_bound
index = (upper_bound + lower_bound) // 2
if intervals[index][0] <= start_dt:
return recursive_find_index(index, upper_bound)
else:
return recursive_find_index(lower_bound, index)
if start_dt <= intervals[0][0] - tolerance:
return -1
if end_dt >= intervals[-1][1] + tolerance:
return -1
return recursive_find_index(0, len(intervals) - 1)
if not intervals:
return False
tolerance = timedelta(minutes=1)
start_index = find_start_index()
if start_index != -1:
for index in range(start_index, len(intervals)):
if intervals[index][1] >= end_dt - tolerance:
return True
if len(intervals) == index + 1 or intervals[index + 1][0] - intervals[index][1] > tolerance:
return False
return False
def is_calendar_available(slot, events, employee):
""" Returns True if the given slot doesn't collide with given events for the employee
"""
start_dt = slot['UTC'][0]
end_dt = slot['UTC'][1]
event_in_scope = lambda ev: (
fields.Date.to_date(ev.start) <= fields.Date.to_date(end_dt)
and fields.Date.to_date(ev.stop) >= fields.Date.to_date(start_dt)
)
for ev in events.filtered(event_in_scope):
if ev.allday:
# allday events are considered to take the whole day in the related employee's timezone
event_tz = pytz.timezone(ev.event_tz or employee.user_id.tz or self.env.user.tz or slot['slot'].appointment_type_id.appointment_tz or 'UTC')
ev_start_dt = datetime.combine(fields.Date.from_string(ev.start_date), time.min)
ev_stop_dt = datetime.combine(fields.Date.from_string(ev.stop_date), time.max)
ev_start_dt = event_tz.localize(ev_start_dt).astimezone(pytz.UTC).replace(tzinfo=None)
ev_stop_dt = event_tz.localize(ev_stop_dt).astimezone(pytz.UTC).replace(tzinfo=None)
if ev_start_dt < end_dt and ev_stop_dt > start_dt:
return False
elif fields.Datetime.to_datetime(ev.start) < end_dt and fields.Datetime.to_datetime(ev.stop) > start_dt:
return False
return True
workhours = {}
meetings = {}
# With context will be used in resource.calendar to force the referential user
# for work interval computing to the *user linked to the employee*
available_employees = [emp.with_context(tz=emp.user_id.tz) for emp in (employee or self.employee_ids)]
random.shuffle(available_employees)
for slot in slots:
for emp_pos, emp in enumerate(available_employees):
if emp_pos not in workhours:
workhours[emp_pos] = [
(interval[0].astimezone(pytz.UTC).replace(tzinfo=None),
interval[1].astimezone(pytz.UTC).replace(tzinfo=None))
for interval in emp.resource_calendar_id._work_intervals_batch(
first_day, last_day, resources=emp.resource_id,
)[emp.resource_id.id]
]
if is_work_available(slot['UTC'][0], slot['UTC'][1], workhours[emp_pos]):
if emp_pos not in meetings:
# note: no check is made on the attendee's status (accepted/declined/...)
meetings[emp_pos] = self.env['calendar.event'].search([
('partner_ids.user_ids', '=', emp.user_id.id),
('start', '<', fields.Datetime.to_string(last_day.replace(hour=23, minute=59, second=59))),
('stop', '>', fields.Datetime.to_string(first_day.replace(hour=0, minute=0, second=0)))
])
if is_calendar_available(slot, meetings[emp_pos], emp):
slot['employee_id'] = emp
break
def _get_appointment_slots(self, timezone, employee=None):
""" Fetch available slots to book an appointment
:param timezone: timezone string e.g.: 'Europe/Brussels' or 'Etc/GMT+1'
:param employee: if set will only check available slots for this employee
:returns: list of dicts (1 per month) containing available slots per day per week.
complex structure used to simplify rendering of template
"""
self.ensure_one()
appt_tz = pytz.timezone(self.appointment_tz)
requested_tz = pytz.timezone(timezone)
first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(hours=self.min_schedule_hours))
last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(days=self.max_schedule_days))
# Compute available slots (ordered)
slots = self._slots_generate(first_day.astimezone(appt_tz), last_day.astimezone(appt_tz), timezone)
if not employee or employee in self.employee_ids:
self._slots_available(slots, first_day.astimezone(pytz.UTC), last_day.astimezone(pytz.UTC), employee)
# Compute calendar rendering and inject available slots
today = requested_tz.fromutc(datetime.utcnow())
start = today
month_dates_calendar = cal.Calendar(0).monthdatescalendar
months = []
while (start.year, start.month) <= (last_day.year, last_day.month):
dates = month_dates_calendar(start.year, start.month)
for week_index, week in enumerate(dates):
for day_index, day in enumerate(week):
mute_cls = weekend_cls = today_cls = None
today_slots = []
if day.weekday() in (cal.SUNDAY, cal.SATURDAY):
weekend_cls = 'o_weekend'
if day == today.date() and day.month == today.month:
today_cls = 'o_today'
if day.month != start.month:
mute_cls = 'text-muted o_mute_day'
else:
# slots are ordered, so check all unprocessed slots from until > day
while slots and (slots[0][timezone][0].date() <= day):
if (slots[0][timezone][0].date() == day) and ('employee_id' in slots[0]):
today_slots.append({
'employee_id': slots[0]['employee_id'].id,
'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),
'hours': slots[0][timezone][0].strftime('%H:%M')
})
slots.pop(0)
dates[week_index][day_index] = {
'day': day,
'slots': today_slots,
'mute_cls': mute_cls,
'weekend_cls': weekend_cls,
'today_cls': today_cls
}
months.append({
'month': format_datetime(start, 'MMMM Y', locale=get_lang(self.env).code),
'weeks': dates
})
start = start + relativedelta(months=1)
return months
class CalendarAppointmentSlot(models.Model):
_name = "calendar.appointment.slot"
_description = "Online Appointment : Time Slot"
_rec_name = "weekday"
_order = "weekday, hour"
appointment_type_id = fields.Many2one('calendar.appointment.type', 'Appointment Type', ondelete='cascade')
weekday = fields.Selection([
('1', 'Monday'),
('2', 'Tuesday'),
('3', 'Wednesday'),
('4', 'Thursday'),
('5', 'Friday'),
('6', 'Saturday'),
('7', 'Sunday'),
], string='Week Day', required=True)
hour = fields.Float('Starting Hour', required=True, default=8.0)
@api.constrains('hour')
def check_hour(self):
if any(slot.hour < 0.00 or slot.hour >= 24.00 for slot in self):
raise ValidationError(_("Please enter a valid hour between 0:00 and 24:00 for your slots."))
def name_get(self):
weekdays = dict(self._fields['weekday'].selection)
return self.mapped(lambda slot: (slot.id, "%s, %02d:%02d" % (weekdays.get(slot.weekday), int(slot.hour), int(round((slot.hour % 1) * 60)))))
class CalendarAppointmentQuestion(models.Model):
_name = "calendar.appointment.question"
_description = "Online Appointment : Questions"
_order = "sequence"
sequence = fields.Integer('Sequence')
appointment_type_id = fields.Many2one('calendar.appointment.type', 'Appointment Type', ondelete="cascade")
name = fields.Char('Question', translate=True, required=True)
placeholder = fields.Char('Placeholder', translate=True)
question_required = fields.Boolean('Required Answer')
question_type = fields.Selection([
('char', 'Single line text'),
('text', 'Multi-line text'),
('select', 'Dropdown (one answer)'),
('radio', 'Radio (one answer)'),
('checkbox', 'Checkboxes (multiple answers)')], 'Question Type', default='char')
answer_ids = fields.Many2many('calendar.appointment.answer', 'calendar_appointment_question_answer_rel', 'question_id', 'answer_id', string='Available Answers')
class CalendarAppointmentAnswer(models.Model):
_name = "calendar.appointment.answer"
_description = "Online Appointment : Answers"
question_id = fields.Many2many('calendar.appointment.question', 'calendar_appointment_question_answer_rel', 'answer_id', 'question_id', string='Questions')
name = fields.Char('Answer', translate=True, required=True)
| [
7,
10,
12,
18,
19
] |
1,422 | 1aaace83af0235341d10b8ac3b47d00a944dac37 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('story1', '0006_visitor')]
operations = [migrations.RenameField(model_name='visitor', old_name=
'identitiy_number', new_name='identity_number')]
| from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('story1', '0006_visitor')]
operations = [migrations.RenameField(model_name='visitor', old_name=
'identitiy_number', new_name='identity_number')]
| # Generated by Django 3.1.2 on 2020-10-17 15:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('story1', '0006_visitor'),
]
operations = [
migrations.RenameField(
model_name='visitor',
old_name='identitiy_number',
new_name='identity_number',
),
]
| [
0,
1,
2,
3,
4
] |
1,423 | 6aff61ce5cef537e6b1b19e382d8bf80e3a61693 | <mask token>
| <mask token>
options.register('file', '', VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string, 'File path for storing output')
options.parseArguments()
<mask token>
process.load('FWCore.MessageService.MessageLogger_cfi')
<mask token>
| <mask token>
options = VarParsing.VarParsing()
options.register('file', '', VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string, 'File path for storing output')
options.parseArguments()
file_path = options.file
process = cms.Process('RawAnalyzer')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))
process.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
))
process.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool
(False), outputFile=cms.untracked.string(file_path), badevlist=cms.
vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,
150641153, 151460577, 152364043, 152889525, 153151669, 151148928,
153471157, 149944833, 151407329, 152529024, 150403585, 151124352,
152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313,
152910005, 153348277, 154002162, 149846529, 150489601, 150526465,
151370465, 152959157, 153262261, 153916146, 150202881, 152750261,
153004213), modval=cms.untracked.int32(112))
process.TFileService = cms.Service('TFileService', fileName=cms.string(
'RawAnalyzer.root'))
process.MessageLogger.cerr.FwkReport.reportEvery = 2000
process.p = cms.Path(process.analyzer)
| import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register('file', '', VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string, 'File path for storing output')
options.parseArguments()
file_path = options.file
process = cms.Process('RawAnalyzer')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.maxEvents = cms.untracked.PSet(input=cms.untracked.int32(-1))
process.source = cms.Source('PoolSource', fileNames=cms.untracked.vstring(
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
))
process.analyzer = cms.EDAnalyzer('RawAnalyzer', debugit=cms.untracked.bool
(False), outputFile=cms.untracked.string(file_path), badevlist=cms.
vint32(153647285, 152905909, 153143477, 153217205, 151718625, 153024693,
150641153, 151460577, 152364043, 152889525, 153151669, 151148928,
153471157, 149944833, 151407329, 152529024, 150403585, 151124352,
152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313,
152910005, 153348277, 154002162, 149846529, 150489601, 150526465,
151370465, 152959157, 153262261, 153916146, 150202881, 152750261,
153004213), modval=cms.untracked.int32(112))
process.TFileService = cms.Service('TFileService', fileName=cms.string(
'RawAnalyzer.root'))
process.MessageLogger.cerr.FwkReport.reportEvery = 2000
process.p = cms.Path(process.analyzer)
| import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing()
options.register(
'file','',VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
'File path for storing output')
options.parseArguments()
file_path = options.file
#print file_path
process = cms.Process("RawAnalyzer")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) # -1 means run on all events
#default is HcalTBSource but you can change to PoolSource if you like
#process.source = cms.Source("HcalTBSource",
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
#'root://eoscms//eos/cms/store/data/Run2015B/SingleMuon/RAW/v1/000/251/162/00000/0050EEC0-AD25-E511-9A32-02163E011962.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223708.root'
# 'file:/afs/cern.ch/user/d/drew/USC_223495.root' #HO pedestal
# '/store/group/comm_hcal/LS1/USC_223495.root' #HO pedestal, local
# '/store/group/comm_hcal/LS1/USC_222759.root'
# '/store/group/comm_hcal/LS1/USC_223775.root'
# '/store/group/comm_hcal/LS1/USC_224285.root' #not sure, takend 31/7/2014
# '/store/group/comm_hcal/LS1/USC_224625.root'
'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/244/00000/46080143-C025-E511-9CB7-02163E014166.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/20C23681-852B-E511-9FBC-02163E01413E.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/369E8A59-802B-E511-B85E-02163E01259F.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/488F97C1-8F2B-E511-86B8-02163E0144D2.root'
#'root://eoscms//eos/cms/store/data/Run2015B/HcalNZS/RAW/v1/000/251/883/00000/FAE69354-7E2B-E511-80D7-02163E0125C8.root'
)
)
process.analyzer = cms.EDAnalyzer('RawAnalyzer',
debugit = cms.untracked.bool(False),
outputFile = cms.untracked.string(file_path),
badevlist = cms.vint32(
153647285, 152905909, 153143477, 153217205, 151718625, 153024693, 150641153, 151460577,
152364043, 152889525, 153151669, 151148928, 153471157, 149944833, 151407329, 152529024,
150403585, 151124352, 152368139, 152451200, 152950965, 153135285, 154125042, 154268402,
152261643, 150718977, 152737973, 153409717, 153800866, 151321313, 152910005, 153348277,
154002162, 149846529, 150489601, 150526465, 151370465, 152959157, 153262261, 153916146,
150202881, 152750261, 153004213),
modval = cms.untracked.int32(112)
)
process.TFileService = cms.Service("TFileService",fileName = cms.string("RawAnalyzer.root") )
process.MessageLogger.cerr.FwkReport.reportEvery = 2000 #type out ever <n> events
process.p = cms.Path(process.analyzer)
| [
0,
1,
2,
3,
4
] |
1,424 | d0981d279f7090d5309aa564252dba731a34a66b | <mask token>
| <mask token>
with open('nodes_tags.csv', 'r') as f:
tags = csv.DictReader(f)
for row in tags:
if row['key'] == 'FIXME':
pp(row)
| import csv
from pprint import pprint as pp
with open('nodes_tags.csv', 'r') as f:
tags = csv.DictReader(f)
for row in tags:
if row['key'] == 'FIXME':
pp(row)
| null | null | [
0,
1,
2
] |
1,425 | f0a54feaa165a393c4e87cbac2a38347633acf5a | <mask token>
class HomePageView(TemplateView):
<mask token>
| <mask token>
class HomePageView(TemplateView):
template_name = 'base.html'
| <mask token>
def index(request):
context = 'Welcome home'
return render(request, 'base.html', {'context': context})
class HomePageView(TemplateView):
template_name = 'base.html'
| from django.shortcuts import render
from django.views.generic import TemplateView
def index(request):
context = 'Welcome home'
return render(request, 'base.html', {'context': context})
class HomePageView(TemplateView):
template_name = 'base.html'
| from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
def index(request):
context = 'Welcome home'
return render(request,'base.html',{'context':context})
class HomePageView(TemplateView):
template_name = 'base.html'
| [
1,
2,
3,
4,
5
] |
1,426 | 5000663e3cde9c1a1100c9022707ccae13db0034 | <mask token>
| class BaseService:
<mask token>
<mask token>
| class BaseService:
<mask token>
def post(self, path, body):
result = self._context.http.post(path, body)
return result.json()['Data']
| class BaseService:
def __init__(self, context):
self._context = context
def post(self, path, body):
result = self._context.http.post(path, body)
return result.json()['Data']
| class BaseService:
def __init__(self, context):
self._context = context
def post(self, path, body):
result = self._context.http.post(path, body)
return result.json()["Data"]
| [
0,
1,
2,
3,
4
] |
1,427 | da55d9a6534525e58b6c1d2db997e90a1c9b0f36 | <mask token>
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
<mask token>
| <mask token>
sys.path.append('..')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
<mask token>
print(args)
tranform_data(args)
| <mask token>
sys.path.append('..')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
| import torch
import tarfile
import pickle
import pandas
import json
import argparse
from pathlib import Path
import numpy as np
import shutil
from shutil import copyfile
import os
import re
import pandas as pd
import sys
from numpy import asarray
from numpy import savetxt
sys.path.append('..')
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help=
'dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help=
'maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help=
'extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help=
'if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default='./', help=
'path to save results')
parser.add_argument('--maxsize', type=int, default=None, help=
'max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == 'pkl':
with open(Path(args.data_dir, 'fx_labels'), 'rb') as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range(len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:maxsize]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args)
| import torch
import tarfile
import pickle
import pandas
import json
import argparse
from pathlib import Path
import numpy as np
import shutil
from shutil import copyfile
import os
import re
import pandas as pd
import sys
from numpy import asarray
from numpy import savetxt
sys.path.append("..")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, required=True, help='dir holding sequences as separate files')
parser.add_argument('--maxlen', type=int, default=500, help='maximum length of sequence')
parser.add_argument('--ext', type=str, default='tar.gz', help='extention of files with sequences')
parser.add_argument('--datetime', type=bool, default=False, help='if time values in event sequences are represented in datetime format')
parser.add_argument('--save_dir', type=str, default = './', help='path to save results')
parser.add_argument('--maxsize', type=int, default=None, help='max number of sequences')
args = parser.parse_args()
return args
def tranform_data(args):
"""
Loads the sequences saved in the given directory.
Args:
data_dir (str, Path) - directory containing sequences
save_dir - directory for saving transform data
maxsize (int) - maximum number of sequences to load
maxlen (int) - maximum length of sequence, the sequences longer than maxlen will be truncated
ext (str) - extension of files in data_dir directory
datetime (bool) - variable meaning if time values in files are represented in datetime format
"""
data_dir = args.data_dir
save_dir = args.save_dir
os.makedirs(save_dir)
maxsize = args.maxsize
maxlen = args.maxlen
ext = args.ext
datetime = args.datetime
classes = set()
nb_files = 0
time_col = 'time'
event_col = 'event'
gt_ids = None
if args.ext == "pkl":
with open(Path(args.data_dir, "fx_labels"), "rb") as fp:
gt_ids = pickle.load(fp)[:maxsize]
labels = np.unique(gt_ids)
gt_data = []
for i in range (len(gt_ids)):
gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))
gt = {'cluster_id': gt_data}
print(gt_data)
gt_table = pd.DataFrame(data=gt)
gt_table.to_csv(Path(save_dir, 'clusters.csv'))
if Path(args.data_dir, 'clusters.csv').exists():
gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]
gt_ids.to_csv(Path(save_dir, 'clusters.csv'))
args = parse_arguments()
print(args)
tranform_data(args) | [
2,
3,
4,
5,
6
] |
1,428 | 325cc2fd82c44d0b7e291384159bd48d068e60f1 | <mask token>
@njit
def hammingWeight(n):
res = 0
for i in range(32):
if n & 1 << i:
res += 1
return res
@njit
def getStates():
spinUpStates = [i for i in range(1 << L) if hammingWeight(i) == Nup]
spinDownStates = [i for i in range(1 << L) if hammingWeight(i) == Ndown]
states = [((spinUp << L) + spinDown) for spinUp in spinUpStates for
spinDown in spinDownStates]
return states
@njit
def doubleOccNum(state):
spinUpState = state >> L
spinDownState = state & (1 << L) - 1
return hammingWeight(spinUpState & spinDownState)
def Hamiltonian(states):
n = len(states)
H = sparse.lil_matrix((n, n))
stateIdxMap = {}
for i in range(n):
stateIdxMap[states[i]] = i
for i in range(n):
H[i, i] = U * doubleOccNum(states[i])
for a, b, t in hoppings:
for s in range(2):
if states[i] & 1 << s * L << b and not states[i
] & 1 << s * L << a:
state = states[i] ^ 1 << s * L << b ^ 1 << s * L << a
j = stateIdxMap[state]
H[j, i] = t
return H
<mask token>
| <mask token>
@njit
def hammingWeight(n):
res = 0
for i in range(32):
if n & 1 << i:
res += 1
return res
@njit
def getStates():
spinUpStates = [i for i in range(1 << L) if hammingWeight(i) == Nup]
spinDownStates = [i for i in range(1 << L) if hammingWeight(i) == Ndown]
states = [((spinUp << L) + spinDown) for spinUp in spinUpStates for
spinDown in spinDownStates]
return states
@njit
def doubleOccNum(state):
spinUpState = state >> L
spinDownState = state & (1 << L) - 1
return hammingWeight(spinUpState & spinDownState)
def Hamiltonian(states):
n = len(states)
H = sparse.lil_matrix((n, n))
stateIdxMap = {}
for i in range(n):
stateIdxMap[states[i]] = i
for i in range(n):
H[i, i] = U * doubleOccNum(states[i])
for a, b, t in hoppings:
for s in range(2):
if states[i] & 1 << s * L << b and not states[i
] & 1 << s * L << a:
state = states[i] ^ 1 << s * L << b ^ 1 << s * L << a
j = stateIdxMap[state]
H[j, i] = t
return H
if __name__ == '__main__':
start = time.time()
states = getStates()
end = time.time()
print('Constructing State Cost = %s' % (end - start))
start = time.time()
H = Hamiltonian(states)
end = time.time()
print('Constructing Hamiltonian Cost = %s' % (end - start))
start = time.time()
E, V = sparse_alg.eigsh(H.tocsr(), 1, which='SA')
end = time.time()
print('Solve EigenValue Cost = %s' % (end - start))
print(E)
| <mask token>
L = 12
Nup = 6
Ndown = 6
t = -2.0
U = 10.0
hoppings = [(i, j, t) for i in range(L) for j in range(L) if abs(i - j) == 1]
@njit
def hammingWeight(n):
res = 0
for i in range(32):
if n & 1 << i:
res += 1
return res
@njit
def getStates():
spinUpStates = [i for i in range(1 << L) if hammingWeight(i) == Nup]
spinDownStates = [i for i in range(1 << L) if hammingWeight(i) == Ndown]
states = [((spinUp << L) + spinDown) for spinUp in spinUpStates for
spinDown in spinDownStates]
return states
@njit
def doubleOccNum(state):
spinUpState = state >> L
spinDownState = state & (1 << L) - 1
return hammingWeight(spinUpState & spinDownState)
def Hamiltonian(states):
n = len(states)
H = sparse.lil_matrix((n, n))
stateIdxMap = {}
for i in range(n):
stateIdxMap[states[i]] = i
for i in range(n):
H[i, i] = U * doubleOccNum(states[i])
for a, b, t in hoppings:
for s in range(2):
if states[i] & 1 << s * L << b and not states[i
] & 1 << s * L << a:
state = states[i] ^ 1 << s * L << b ^ 1 << s * L << a
j = stateIdxMap[state]
H[j, i] = t
return H
if __name__ == '__main__':
start = time.time()
states = getStates()
end = time.time()
print('Constructing State Cost = %s' % (end - start))
start = time.time()
H = Hamiltonian(states)
end = time.time()
print('Constructing Hamiltonian Cost = %s' % (end - start))
start = time.time()
E, V = sparse_alg.eigsh(H.tocsr(), 1, which='SA')
end = time.time()
print('Solve EigenValue Cost = %s' % (end - start))
print(E)
| import time
from numba import njit
import scipy.sparse as sparse
import scipy.sparse.linalg as sparse_alg
L = 12
Nup = 6
Ndown = 6
t = -2.0
U = 10.0
hoppings = [(i, j, t) for i in range(L) for j in range(L) if abs(i - j) == 1]
@njit
def hammingWeight(n):
res = 0
for i in range(32):
if n & 1 << i:
res += 1
return res
@njit
def getStates():
spinUpStates = [i for i in range(1 << L) if hammingWeight(i) == Nup]
spinDownStates = [i for i in range(1 << L) if hammingWeight(i) == Ndown]
states = [((spinUp << L) + spinDown) for spinUp in spinUpStates for
spinDown in spinDownStates]
return states
@njit
def doubleOccNum(state):
spinUpState = state >> L
spinDownState = state & (1 << L) - 1
return hammingWeight(spinUpState & spinDownState)
def Hamiltonian(states):
n = len(states)
H = sparse.lil_matrix((n, n))
stateIdxMap = {}
for i in range(n):
stateIdxMap[states[i]] = i
for i in range(n):
H[i, i] = U * doubleOccNum(states[i])
for a, b, t in hoppings:
for s in range(2):
if states[i] & 1 << s * L << b and not states[i
] & 1 << s * L << a:
state = states[i] ^ 1 << s * L << b ^ 1 << s * L << a
j = stateIdxMap[state]
H[j, i] = t
return H
if __name__ == '__main__':
start = time.time()
states = getStates()
end = time.time()
print('Constructing State Cost = %s' % (end - start))
start = time.time()
H = Hamiltonian(states)
end = time.time()
print('Constructing Hamiltonian Cost = %s' % (end - start))
start = time.time()
E, V = sparse_alg.eigsh(H.tocsr(), 1, which='SA')
end = time.time()
print('Solve EigenValue Cost = %s' % (end - start))
print(E)
| import time
from numba import njit
import scipy.sparse as sparse
import scipy.sparse.linalg as sparse_alg
L = 12
Nup = 6
Ndown = 6
t = -2.0
U = 10.0
hoppings = [(i, j, t) for i in range(L) for j in range(L) if abs(i - j) == 1]
@njit
def hammingWeight(n):
res = 0
for i in range(32):
if n & (1 << i):
res += 1
return res
@njit
def getStates():
spinUpStates = [i for i in range(1 << L) if hammingWeight(i) == Nup]
spinDownStates = [i for i in range(1 << L) if hammingWeight(i) == Ndown]
states = [(spinUp << L) + (spinDown) for spinUp in spinUpStates for spinDown in spinDownStates]
return states
@njit
def doubleOccNum(state):
spinUpState = state >> L
spinDownState = state & ((1 << L) - 1)
return hammingWeight(spinUpState & spinDownState)
def Hamiltonian(states):
n = len(states)
H = sparse.lil_matrix((n,n))
stateIdxMap = {}
for i in range(n):
stateIdxMap[states[i]] = i
for i in range(n):
H[i, i] = U * doubleOccNum(states[i])
for a, b, t in hoppings:
for s in range(2):
if (states[i] & (1 << (s * L) << b)) and not (states[i] & (1 << (s * L) << a)):
state = states[i] ^ (1 << (s * L) << b) ^ (1 << (s * L) << a)
j = stateIdxMap[state]
H[j, i] = t
return H
if __name__ == '__main__':
start = time.time()
states = getStates()
end = time.time()
print("Constructing State Cost = %s" % (end - start))
start = time.time()
H = Hamiltonian(states)
end = time.time()
print("Constructing Hamiltonian Cost = %s" % (end - start))
start = time.time()
E,V=sparse_alg.eigsh(H.tocsr(),1,which='SA')
end = time.time()
print("Solve EigenValue Cost = %s" % (end - start))
print(E)
| [
4,
5,
6,
7,
8
] |
1,429 | d7dee3311e202ae50172077940fc625f1cc6836d | <mask token>
| <mask token>
def get_teacherData():
excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True)
workSheet = workBook.sheet_by_name('3-老师模块')
dataList = []
for cnt in range(1, 2):
cellData = workSheet.cell_value(cnt, 6)
repsCellData = workSheet.cell_value(cnt, 8)
dataList.append((cellData, repsCellData))
return dataList
<mask token>
| <mask token>
def get_teacherData():
excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True)
workSheet = workBook.sheet_by_name('3-老师模块')
dataList = []
for cnt in range(1, 2):
cellData = workSheet.cell_value(cnt, 6)
repsCellData = workSheet.cell_value(cnt, 8)
dataList.append((cellData, repsCellData))
return dataList
get_teacherData()
| import xlrd
def get_teacherData():
excelDir = '../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True)
workSheet = workBook.sheet_by_name('3-老师模块')
dataList = []
for cnt in range(1, 2):
cellData = workSheet.cell_value(cnt, 6)
repsCellData = workSheet.cell_value(cnt, 8)
dataList.append((cellData, repsCellData))
return dataList
get_teacherData()
| #time:2020-11-28
import xlrd #读取库
def get_teacherData():
excelDir = r'../data/松勤-教管系统接口测试用例-v1.4.xls'
workBook = xlrd.open_workbook(excelDir, formatting_info=True) # 保存原样---样式
# 2-操作对应的用例表
workSheet = workBook.sheet_by_name('3-老师模块') # 通过表名获取
dataList = []
for cnt in range(1, 2): # 到第四行
cellData = workSheet.cell_value(cnt, 6) # 取第6列 字符串类型
repsCellData = workSheet.cell_value(cnt, 8) # 取第8列 字符串类型 预期结果
dataList.append((cellData, repsCellData))
return dataList # 返回列表
get_teacherData()
| [
0,
1,
2,
3,
4
] |
1,430 | 2d17229afe154937132c1e4f8c138896da34ab61 | <mask token>
| <mask token>
class FilebasedUniqueConfig(AppConfig):
<mask token>
<mask token>
| <mask token>
class FilebasedUniqueConfig(AppConfig):
name = 'papermerge.filebased_unique'
label = 'filebased_unique'
| from django.apps import AppConfig
class FilebasedUniqueConfig(AppConfig):
name = 'papermerge.filebased_unique'
label = 'filebased_unique'
| null | [
0,
1,
2,
3
] |
1,431 | cb469b69bf974d39609f79c4f3be686d8106f971 | <mask token>
| <mask token>
pub_socket.bind('tcp://*:%s' % port)
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze': (thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(0.02)
| <mask token>
port = '42000'
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind('tcp://*:%s' % port)
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze': (thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(0.02)
| from __future__ import print_function
import zmq
import time
import random
import numpy as np
import msgpack as serializer
port = '42000'
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind('tcp://*:%s' % port)
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze': (thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(0.02)
| from __future__ import print_function
import zmq
import time
import random
import numpy as np
import msgpack as serializer
port = '42000'
# let the OS choose the IP and PORT
ipc_sub_url = 'tcp://*:*'
ipc_push_url = 'tcp://*:*'
# starting communication threads
zmq_ctx = zmq.Context()
pub_socket = zmq_ctx.socket(zmq.PUB)
pub_socket.bind("tcp://*:%s" % port)
# send messages
while True:
topic = 'test'
thisX = np.random.rand()
thisY = np.random.rand()
testDict = {'gaze':(thisX, thisY)}
pub_socket.send_string(topic, zmq.SNDMORE)
pub_socket.send(serializer.dumps(testDict, use_bin_type=True))
print(testDict)
time.sleep(.02)
| [
0,
1,
2,
3,
4
] |
1,432 | 17b3f51779bda5a48c4d77c35d6bbdd2aadb13cd | <mask token>
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
<mask token>
| <mask token>
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,
'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=
is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
<mask token>
for layer_i in range(1, 1 + layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
<mask token>
tf.summary.scalar('conv_loss', model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(
model_loss)
<mask token>
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = 'mnist/conv/SGD_batchnorm'
else:
logdir = 'mnist/conv/SGD_no_batchnorm'
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_opt, merged], {inputs: batch_xs,
labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.
validation.images, labels: mnist.validation.labels,
is_training: False})
print(
'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,
labels: batch_ys, is_training: False})
print(
'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:
mnist.validation.labels, is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test
.labels, is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
| <mask token>
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,
'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=
is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
num_batches = 3000
batch_size = 128
learning_rate = 0.002
layer_num = 5
batch_norm = True
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
layer = inputs
for layer_i in range(1, 1 + layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *
orig_shape[3]])
layer = fully_connected(layer, 100, batch_norm, is_training)
logits = tf.layers.dense(layer, 10)
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=
logits, labels=labels))
tf.summary.scalar('conv_loss', model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(
model_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = 'mnist/conv/SGD_batchnorm'
else:
logdir = 'mnist/conv/SGD_no_batchnorm'
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_opt, merged], {inputs: batch_xs,
labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.
validation.images, labels: mnist.validation.labels,
is_training: False})
print(
'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,
labels: batch_ys, is_training: False})
print(
'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:
mnist.validation.labels, is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test
.labels, is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
| import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True, reshape=False)
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False,
activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth * 4, 3, strides,
'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=
is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
num_batches = 3000
batch_size = 128
learning_rate = 0.002
layer_num = 5
batch_norm = True
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
layer = inputs
for layer_i in range(1, 1 + layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] *
orig_shape[3]])
layer = fully_connected(layer, 100, batch_norm, is_training)
logits = tf.layers.dense(layer, 10)
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=
logits, labels=labels))
tf.summary.scalar('conv_loss', model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(
model_loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = 'mnist/conv/SGD_batchnorm'
else:
logdir = 'mnist/conv/SGD_no_batchnorm'
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, summary = sess.run([train_opt, merged], {inputs: batch_xs,
labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.
validation.images, labels: mnist.validation.labels,
is_training: False})
print(
'Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs,
labels: batch_ys, is_training: False})
print(
'Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'
.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels:
mnist.validation.labels, is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test
.labels, is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
| import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
def fully_connected(prev_layer, num_units, batch_norm, is_training=False):
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
if batch_norm:
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, batch_norm, is_training=False):
if layer_depth % 3 == 0:
strides = 2
else:
strides = 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)
if batch_norm:
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
num_batches = 3000
batch_size = 128
learning_rate = 0.002
layer_num = 5
batch_norm = True
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
layer = inputs
for layer_i in range(1, 1+layer_num):
layer = conv_layer(layer, layer_i, batch_norm, is_training)
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
layer = fully_connected(layer, 100, batch_norm, is_training)
logits = tf.layers.dense(layer, 10)
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
tf.summary.scalar('conv_loss',model_loss)
if batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
#train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)
#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
else:
train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(model_loss)
#train_opt = tf.train.RMSPropOptimize(learning_rate).minimize(model_loss)
#train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
merged = tf.summary.merge_all()
if batch_norm:
logdir = "mnist/conv/SGD_batchnorm"
else:
logdir = "mnist/conv/SGD_no_batchnorm"
writer = tf.summary.FileWriter(logdir, sess.graph)
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_,summary = sess.run([train_opt,merged], {inputs: batch_xs, labels: batch_ys, is_training: True})
writer.add_summary(summary, batch_i)
if batch_i % 500 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images, labels: mnist.validation.labels, is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
acc = sess.run(accuracy, {inputs: mnist.validation.images, labels: mnist.validation.labels,is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images, labels: mnist.test.labels,is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc)) | [
1,
3,
4,
5,
6
] |
1,433 | 1ddc261cf174c109583fd0ead1f537673d29090a | <mask token>
| <mask token>
for match in matcher:
print(match.start())
print(match.group())
| <mask token>
x = '[a-zA-Z]'
matcher = re.finditer(x, 'abtABIkz')
for match in matcher:
print(match.start())
print(match.group())
| import re
x = '[a-zA-Z]'
matcher = re.finditer(x, 'abtABIkz')
for match in matcher:
print(match.start())
print(match.group())
| #rules used for pattern matching
# #1. x='[abc]' either a,b or c
#eg:
# import re
# x="[abc]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#2. x='[^abc]' except abc
#eg:
# import re
# x="[^abc]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#3. x='[a-z]' a to z ^ cap means that is not included
#eg
# import re
# x="[a-z]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#eg with ^
# import re
# x="[^a-z]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#4. x='[A-Z]' A TO Z
# import re
# x="[A-Z]"
# matcher=re.finditer(x,"abt SC5kZ")
# for match in matcher:
# print(match.start())
# print(match.group())
#5.X="[a-zA-Z]" BOTH LOWER AND UPPERCASE ARE CHECKED
import re
x="[a-zA-Z]"
matcher=re.finditer(x,"abtABIkz")
for match in matcher:
print(match.start())
print(match.group())
#6. X="[0-9]"
# import re
# x="[0-9]"
# matcher=re.finditer(x,"ab1z7")
# for match in matcher:
# print(match.start())
# print(match.group())
#7.x="[a-zA-Z0-9]"
# import re
# x="[a-zA-Z0-9]"
# matcher=re.finditer(x,"ab72ABIkz")
# for match in matcher:
# print(match.start())
# print(match.group())
#8.x='\s' check space
# import re
# x="\s"
# matcher=re.finditer(x,"ab tAB Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#9.x='\d' check the digits
# import re
# x="\d"
# matcher=re.finditer(x,"ab7tAB12kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#9. x='\D' except digits
# import re
# x="\D"
# matcher=re.finditer(x,"ab001tAB5236Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#10. x='\w' all words except special characters
# import re
# x="\w"
# matcher=re.finditer(x,"ab %tAB @Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#11.x='\W' for special characters
# import re
# x="\W"
# matcher=re.finditer(x,"ab!!tAB@Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
| [
0,
1,
2,
3,
4
] |
1,434 | b220189d506737bf8cff9e600d1cfd4d7bc8435d | # -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import logging
from org.o3project.odenos.core.util.remote_object_interface import RemoteObjectInterface
from org.o3project.odenos.remoteobject.message.request import Request
from org.o3project.odenos.remoteobject.manager.system.component_connection import (
ComponentConnection
)
from org.o3project.odenos.remoteobject.manager.system.\
component_connection_logic_and_network import (
ComponentConnectionLogicAndNetwork)
from org.o3project.odenos.remoteobject.object_property import ObjectProperty
# pylint: disable=R0923
class SystemManagerInterface(RemoteObjectInterface):
COMP_MNGS_PATH = "component_managers"
COMP_MNG_PATH = "component_managers/%s"
EVENT_MNG_PATH = "event_manager"
COMP_TYPES_PATH = "component_types"
COMP_TYPE_PATH = "component_types/%s"
COMPS_PATH = "components"
COMP_PATH = "components/%s"
CONNECTIONS_PATH = "connections"
CONNECTION_PATH = "connections/%s"
OBJECT_PATH = "objects/%s"
def __init__(self, dispatcher, source_object_id=None):
'''
NOTE: source_object_id is required for the ODENOS monitor tool.
'''
logging.debug("Create SystemManagerInterface ID:"
+ dispatcher.system_manager_id)
super(SystemManagerInterface, self).__init__(
dispatcher,
dispatcher.system_manager_id,
source_object_id)
@property
def system_manager_id(self):
return self.object_id
###################################
# Basic request
###################################
# GET Component Managers.
def get_component_managers(self):
logging.debug("GET ComponentManagers")
resp = self._get_object_to_remote_object(self.COMP_MNGS_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Event Manager.
def get_event_manager(self):
logging.debug("GET EventManager")
resp = self._get_object_to_remote_object(self.EVENT_MNG_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET ComponentTypes.
def get_component_types(self):
logging.debug("GET ComponentTypes")
resp = self._get_object_to_remote_object(self.COMP_TYPES_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Components.
def get_components(self):
logging.debug("GET Components")
resp = self._get_object_to_remote_object(self.COMPS_PATH)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Connections.
def get_connections(self):
logging.debug("GET Connections")
resp = self._get_object_to_remote_object(self.CONNECTIONS_PATH)
if resp.is_error(Request.Method.GET):
return None
connections = {}
try:
for conn_id, connection in resp.body.items():
if connection[ComponentConnection.OBJECT_TYPE] ==\
ComponentConnectionLogicAndNetwork.TYPE:
connections[conn_id] =\
ComponentConnectionLogicAndNetwork.create_from_packed(
connection)
else:
connections[conn_id] =\
ComponentConnection.create_from_packed(connection)
except KeyError, err:
logging.error("GET Connections Invalid Response Message"
+ " KeyError: " + str(err))
return None
return connections
# GET Component Manager.
def get_component_manager(self, comp_mgr_id):
logging.debug("GET ComponentManager ComponentMgrID:" + comp_mgr_id)
path = self.COMP_MNG_PATH % comp_mgr_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
def add_component_manager(self, compmgr):
logging.debug("object_property of ComponentManager %s is %s",
compmgr.object_id,
compmgr.object_property.packed_object)
path = "component_managers/%s" % compmgr.object_id
resp = self._put_object_to_remote_object(path, compmgr.object_property)
if resp.is_error(Request.Method.PUT):
logging.error("Failed registration to SystemManager.")
compmgr.set_state(ObjectProperty.State.ERROR)
return
logging.info("Complete ComponentManager registration to SystemManager.")
# GET ComponentType.
def get_component_type(self, comp_type):
logging.debug("GET ComponentType Type:" + comp_type)
path = self.COMP_TYPE_PATH % comp_type
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Component.
def get_component(self, comp_id):
logging.debug("GET Component ComponentID:" + comp_id)
path = self.COMP_PATH % comp_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# GET Connection.
def get_connection(self, conn_id):
logging.debug("GET Connection ConnectionID:" + conn_id)
path = self.CONNECTION_PATH % conn_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
connection = None
try:
if resp.body[ComponentConnection.OBJECT_TYPE] ==\
ComponentConnectionLogicAndNetwork.TYPE:
connection =\
ComponentConnectionLogicAndNetwork.create_from_packed(
resp.body)
else:
connection =\
ComponentConnection.create_from_packed(resp.body)
except KeyError, err:
logging.error("GET Connection Invalid Response Message"
+ " KeyError: " + str(err))
return None
return connection
# GET Object.
def get_object(self, object_id):
logging.debug("GET Object ObjectID:" + object_id)
path = self.OBJECT_PATH % object_id
resp = self._get_object_to_remote_object(path)
if resp.is_error(Request.Method.GET):
return None
return resp.body
# PUT Connection.
def put_connection(self, connection):
logging.debug("PUT Connection ConnectionID:" + connection.id)
path = self.CONNECTION_PATH % connection.id
return self._put_object_to_remote_object(path,
connection)
# PUT ComponentManagers.
def put_component_managers(self, property_):
logging.debug("PUT ComponentManagers")
path = self.COMP_MNG_PATH % property_.object_id
return self._put_object_to_remote_object(path,
property_)
# PUT Components.
def put_components(self, property_):
logging.debug("PUT Components")
path = self.COMP_PATH % property_.object_id
return self._put_object_to_remote_object(path,
property_)
# POST Components.
def post_components(self, property_):
logging.debug("POST Components")
return self._post_object_to_remote_object(self.COMPS_PATH,
property_)
# POST Connections.
def post_connections(self, connection):
logging.debug("POST Connections")
return self._post_object_to_remote_object(self.CONNECTIONS_PATH,
connection)
# DELETE ComponentManagers.
def del_component_managers(self, comp_mgr_id):
logging.debug("DELETE ComponentManagers ComponentMgrID:" + comp_mgr_id)
path = self.COMP_MNG_PATH % comp_mgr_id
return self._del_object_to_remote_object(path)
# DELETE Components.
def del_components(self, comp_id):
logging.debug("DELETE Components ComponentID:" + comp_id)
path = self.COMP_PATH % comp_id
return self._del_object_to_remote_object(path)
# DELETE Components.
def del_connections(self, conn_id):
logging.debug("DELETE Connections ConnectionID:" + conn_id)
path = self.CONNECTION_PATH % conn_id
return self._del_object_to_remote_object(path)
| null | null | null | null | [
0
] |
1,435 | f94894e5d3e6a0ff367911c72f4d863ac32c8baa | <mask token>
| <mask token>
print(type(data))
<mask token>
print(data)
| <mask token>
url = 'http://icanhazdadjoke.com/'
response = requests.get(url, headers={'Accept': 'application/json'})
data = response.text
print(type(data))
data = response.json()
print(data)
| import requests
url = 'http://icanhazdadjoke.com/'
response = requests.get(url, headers={'Accept': 'application/json'})
data = response.text
print(type(data))
data = response.json()
print(data)
| import requests
# url="http://www.google.com"
# response=requests.get(url)
# print(response.status_code)
url = "http://icanhazdadjoke.com/"
response = requests.get(url, headers={"Accept": "application/json"})
data = response.text
print(type(data))
data = response.json()
print(data)
| [
0,
1,
2,
3,
4
] |
1,436 | 507251113d80eaa3684081f7814470053b04dda9 | <mask token>
| <mask token>
if __name__ == '__main__':
scale = 768
bitmap = Image.new('RGB', (scale, scale), 'white')
pix = bitmap.load()
c = complex(-0.585, 0.85)
move = 0.0
maxIter = 255
for x in range(scale):
for y in range(scale):
zx = 1.5 * (x - scale / 2) / (0.5 * scale) + move
zy = 1.0 * (y - scale / 2) / (0.5 * scale) + move
z = complex(zx, zy)
i = maxIter
while abs(z * z) < 4 and i > 1:
z = z ** 2 + c
i -= 1
pix[x, y] = (i << 21) + (i << 10) + i * 8
bitmap.show()
| from PIL import Image
if __name__ == '__main__':
scale = 768
bitmap = Image.new('RGB', (scale, scale), 'white')
pix = bitmap.load()
c = complex(-0.585, 0.85)
move = 0.0
maxIter = 255
for x in range(scale):
for y in range(scale):
zx = 1.5 * (x - scale / 2) / (0.5 * scale) + move
zy = 1.0 * (y - scale / 2) / (0.5 * scale) + move
z = complex(zx, zy)
i = maxIter
while abs(z * z) < 4 and i > 1:
z = z ** 2 + c
i -= 1
pix[x, y] = (i << 21) + (i << 10) + i * 8
bitmap.show()
| #
#River Sheppard
#
#
from PIL import Image
if __name__ == "__main__":
scale = 768
# creating the new image in RGB mode
bitmap = Image.new("RGB", (scale, scale), "white")
# Allocating the storage for the image and
# loading the pixel data.
pix = bitmap.load()
# setting up the variables according to
# the equation to create the fractal
c = complex(-0.585, 0.85)
move = 0.0
maxIter = 255
for x in range(scale):
for y in range(scale):
zx = 1.5*(x - scale/2)/(0.5*scale) + move
zy = 1.0*(y - scale/2)/(0.5*scale) + move
z = complex(zx,zy)
i = maxIter
while abs(z*z) < 4 and i > 1:
z = z**2 + c
i -= 1
# convert byte to RGB (3 bytes), kinda
# magic to get nice colors
pix[x,y] = (i << 21) + (i << 10) + i*8
# to display the created fractal
bitmap.show()
| null | [
0,
1,
2,
3
] |
1,437 | 086c74669b6762a6b35e8a46f816db2f4f172caa | <mask token>
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
<mask token>
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
| <mask token>
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
<mask token>
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
| <mask token>
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
def plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,
box_color='k'):
xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,
vap_cutoff, liq_in, vap_in, int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3)
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])
ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])
ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])
ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')
pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):
ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax1.set_zlabel('y')
plt.xlim([0, xlim])
plt.ylim([0, ylim])
ax1.set_xlim([0, xlim])
ax1.set_ylim([0, ylim])
ax1.set_zlim([0, zlim])
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
| <mask token>
import numpy as np
from itertools import product, combinations
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
def switch_y_z(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0], inter.universe.dimensions[
1], inter.universe.dimensions[2]
for i in range(len(triangles)):
tmp = np.array([triangles[i][:, 0], triangles[i][:, 2], triangles[i
][:, 1]]).T
if triangles[i][:, -1][0] < zlim:
interface1[i] = tmp + np.array([0, liq_cutoff, 0])
interface2[i] = tmp + np.array([0, vap_cutoff, 0])
else:
interface1[i] = tmp - np.array([0, liq_cutoff, 0])
interface2[i] = tmp - np.array([0, vap_cutoff, 0])
return xlim, zlim, ylim, interface1, interface2
def plot_interfaces(inter, liq_cutoff, vap_cutoff, liq_in, vap_in, int_in,
box_color='k'):
xlim, zlim, ylim, interface1, interface2 = switch_y_z(inter, liq_cutoff,
vap_cutoff, liq_in, vap_in, int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3)
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:, 0], pos[:, 2], pos[:, 1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[:, 0], pos_liq[:, 1], pos_liq[:, 2]])
ax1.scatter(xyz_liq[0], xyz_liq[1], xyz_liq[2], color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[:, 0], pos_vap[:, 1], pos_vap[:, 2]])
ax1.scatter(xyz_vap[0], xyz_vap[1], xyz_vap[2], color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[:, 0], pos_int[:, 1], pos_int[:, 2]])
ax1.scatter(xyz_int[0], xyz_int[1], xyz_int[2], color='k')
pts = np.array(list(product([0, xlim], [0, ylim], [0, zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s - e)) in (xlim, ylim, zlim):
ax1.plot3D(*zip(s, e), 'k-', color=box_color, linewidth=1)
ax1.set_xlabel('x')
ax1.set_ylabel('z')
ax1.set_zlabel('y')
plt.xlim([0, xlim])
plt.ylim([0, ylim])
ax1.set_xlim([0, xlim])
ax1.set_ylim([0, ylim])
ax1.set_zlim([0, zlim])
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
"""Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
"""
limits = np.array([ax.get_xlim3d(), ax.get_ylim3d(), ax.get_zlim3d()])
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 14:34:56 2019
ref :
https://stackoverflow.com/questions/11140163/plotting-a-3d-cube-a-sphere-and-a-vector-in-matplotlib
@author: jiedeng
"""
import numpy as np
from itertools import product, combinations
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
def switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in):
triangles = inter.triangulated_surface[0][inter.triangulated_surface[1]]
interface1 = np.zeros_like(triangles)
interface2 = np.zeros_like(triangles)
xlim, zlim, ylim = inter.universe.dimensions[0],inter.universe.dimensions[1],inter.universe.dimensions[2]
for i in range(len(triangles)):
## swap y and z
tmp = np.array([triangles[i][:,0],triangles[i][:,2],triangles[i][:,1]]).T
if triangles[i][:,-1][0] < zlim:
interface1[i] = tmp + np.array([0,liq_cutoff,0])
interface2[i] = tmp + np.array([0,vap_cutoff,0])
else:
interface1[i] = tmp - np.array([0,liq_cutoff,0])
interface2[i] = tmp - np.array([0,vap_cutoff,0])
return xlim,zlim,ylim,interface1,interface2
def plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in,box_color='k'):
xlim,zlim,ylim,interface1,interface2 = switch_y_z(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in)
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(111, projection='3d')
ax1.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax1.grid(False)
# Fancy indexing: `verts[faces]` to generate a collection of triangles
# mesh1 = Poly3DCollection(triangles)
# mesh1.set_edgecolor('none')
# mesh1.set_alpha(0.3)
# ax1.add_collection3d(mesh1)
mesh2 = Poly3DCollection(interface1)
mesh2.set_edgecolor('none')
mesh2.set_alpha(0.3)
ax1.add_collection3d(mesh2)
mesh3 = Poly3DCollection(interface2)
mesh3.set_edgecolor('none')
mesh3.set_alpha(0.3);
# mesh3.set_facecolor('b')
ax1.add_collection3d(mesh3)
pos = inter.universe.atoms.positions
pos = np.array([pos[:,0],pos[:,2],pos[:,1]]).T
pos_liq = pos[liq_in]
xyz_liq = np.vstack([pos_liq[::, 0], pos_liq[::, 1], pos_liq[::, 2]])
ax1.scatter(xyz_liq[0],xyz_liq[1],xyz_liq[2],color='r')
pos_vap = pos[vap_in]
xyz_vap = np.vstack([pos_vap[::, 0], pos_vap[::, 1], pos_vap[::, 2]])
ax1.scatter(xyz_vap[0],xyz_vap[1],xyz_vap[2],color='c')
pos_int = pos[int_in]
xyz_int = np.vstack([pos_int[::, 0], pos_int[::, 1], pos_int[::, 2]])
ax1.scatter(xyz_int[0],xyz_int[1],xyz_int[2],color='k')
pts = np.array(list(product([0,xlim], [0,ylim], [0,zlim])))
for s, e in combinations(pts, 2):
if np.sum(np.abs(s-e)) in (xlim,ylim,zlim):
ax1.plot3D(*zip(s, e), 'k-',color=box_color,linewidth=1)
ax1.set_xlabel("x")
ax1.set_ylabel("z")
ax1.set_zlabel("y")
plt.xlim([0,xlim])
plt.ylim([0,ylim])
# plt.ylim([0,ylim])
ax1.set_xlim([0,xlim])
ax1.set_ylim([0,ylim])
ax1.set_zlim([0,zlim])
# ax1.set_aspect('equal')
set_axes_equal(ax1)
ax1.view_init(0, 10)
plt.tight_layout()
plt.show()
def set_axes_radius(ax, origin, radius):
ax.set_xlim3d([origin[0] - radius, origin[0] + radius])
ax.set_ylim3d([origin[1] - radius, origin[1] + radius])
ax.set_zlim3d([origin[2] - radius, origin[2] + radius])
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
limits = np.array([
ax.get_xlim3d(),
ax.get_ylim3d(),
ax.get_zlim3d(),])
# print(limits)
origin = np.mean(limits, axis=1)
radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))
set_axes_radius(ax, origin, radius)
#plot_interfaces(inter,liq_cutoff,vap_cutoff,liq_in,vap_in,int_in) | [
2,
3,
4,
5,
6
] |
1,438 | 36e350e0d578e169efaafb9e311566d71d6bc59e | <mask token>
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
env.reset()
ob, _, _, _ = env.step(env.action_space.sample())
inx = int(ob.shape[0] / 8)
iny = int(ob.shape[1] / 8)
fitnesses = []
score1 = 0
score2 = 0
fitness = 0.0
done = False
start_time = time.time()
series_of_keys = []
series_of_nnOut = []
while not done:
env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
nnOut = net.activate(imgarray)
for o in nnOut:
if o > 0.0:
keys = [1, 0]
else:
keys = [0, 1]
actions = [0] * 4 + keys + [0] * 2
series_of_keys.append(keys)
series_of_nnOut.append(nnOut)
ob, rew, done, info = env.step(actions)
score1 = info['score1']
score2 = info['score2']
if score1 > 19 or score2 > 19:
done = True
print(series_of_keys)
run_time = time.time() - start_time
fitness = score2 - score1 / (run_time - 2)
return fitness
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'pong_config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat
.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(10, eval_genome)
winner = pop.run(pe.evaluate)
with open('winner-feedforward', 'wb') as f:
pickle.dump(winner, f)
print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename=
'feedforward-fitness.svg')
visualize.plot_species(stats, view=True, filename=
'feedforward-speciation.svg')
node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):
'control'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward.gv')
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled.gv', show_disabled=False)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled-pruned.gv', show_disabled=
False, prune_unused=True)
<mask token>
| <mask token>
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
env.reset()
ob, _, _, _ = env.step(env.action_space.sample())
inx = int(ob.shape[0] / 8)
iny = int(ob.shape[1] / 8)
fitnesses = []
score1 = 0
score2 = 0
fitness = 0.0
done = False
start_time = time.time()
series_of_keys = []
series_of_nnOut = []
while not done:
env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
nnOut = net.activate(imgarray)
for o in nnOut:
if o > 0.0:
keys = [1, 0]
else:
keys = [0, 1]
actions = [0] * 4 + keys + [0] * 2
series_of_keys.append(keys)
series_of_nnOut.append(nnOut)
ob, rew, done, info = env.step(actions)
score1 = info['score1']
score2 = info['score2']
if score1 > 19 or score2 > 19:
done = True
print(series_of_keys)
run_time = time.time() - start_time
fitness = score2 - score1 / (run_time - 2)
return fitness
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'pong_config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat
.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(10, eval_genome)
winner = pop.run(pe.evaluate)
with open('winner-feedforward', 'wb') as f:
pickle.dump(winner, f)
print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename=
'feedforward-fitness.svg')
visualize.plot_species(stats, view=True, filename=
'feedforward-speciation.svg')
node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):
'control'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward.gv')
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled.gv', show_disabled=False)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled-pruned.gv', show_disabled=
False, prune_unused=True)
if __name__ == '__main__':
run()
| <mask token>
env = retro.make(game='Pong-Atari2600')
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
env.reset()
ob, _, _, _ = env.step(env.action_space.sample())
inx = int(ob.shape[0] / 8)
iny = int(ob.shape[1] / 8)
fitnesses = []
score1 = 0
score2 = 0
fitness = 0.0
done = False
start_time = time.time()
series_of_keys = []
series_of_nnOut = []
while not done:
env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
nnOut = net.activate(imgarray)
for o in nnOut:
if o > 0.0:
keys = [1, 0]
else:
keys = [0, 1]
actions = [0] * 4 + keys + [0] * 2
series_of_keys.append(keys)
series_of_nnOut.append(nnOut)
ob, rew, done, info = env.step(actions)
score1 = info['score1']
score2 = info['score2']
if score1 > 19 or score2 > 19:
done = True
print(series_of_keys)
run_time = time.time() - start_time
fitness = score2 - score1 / (run_time - 2)
return fitness
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'pong_config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat
.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(10, eval_genome)
winner = pop.run(pe.evaluate)
with open('winner-feedforward', 'wb') as f:
pickle.dump(winner, f)
print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename=
'feedforward-fitness.svg')
visualize.plot_species(stats, view=True, filename=
'feedforward-speciation.svg')
node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):
'control'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward.gv')
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled.gv', show_disabled=False)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled-pruned.gv', show_disabled=
False, prune_unused=True)
if __name__ == '__main__':
run()
| import retro
import numpy as np
import neat
import pickle
import os
import multiprocessing
import cv2
import time
env = retro.make(game='Pong-Atari2600')
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
env.reset()
ob, _, _, _ = env.step(env.action_space.sample())
inx = int(ob.shape[0] / 8)
iny = int(ob.shape[1] / 8)
fitnesses = []
score1 = 0
score2 = 0
fitness = 0.0
done = False
start_time = time.time()
series_of_keys = []
series_of_nnOut = []
while not done:
env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
nnOut = net.activate(imgarray)
for o in nnOut:
if o > 0.0:
keys = [1, 0]
else:
keys = [0, 1]
actions = [0] * 4 + keys + [0] * 2
series_of_keys.append(keys)
series_of_nnOut.append(nnOut)
ob, rew, done, info = env.step(actions)
score1 = info['score1']
score2 = info['score2']
if score1 > 19 or score2 > 19:
done = True
print(series_of_keys)
run_time = time.time() - start_time
fitness = score2 - score1 / (run_time - 2)
return fitness
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'pong_config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction, neat
.DefaultSpeciesSet, neat.DefaultStagnation, config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(10, eval_genome)
winner = pop.run(pe.evaluate)
with open('winner-feedforward', 'wb') as f:
pickle.dump(winner, f)
print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename=
'feedforward-fitness.svg')
visualize.plot_species(stats, view=True, filename=
'feedforward-speciation.svg')
node_names = {(-1): 'x', (-2): 'dx', (-3): 'theta', (-4): 'dtheta', (0):
'control'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward.gv')
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled.gv', show_disabled=False)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename='winner-feedforward-enabled-pruned.gv', show_disabled=
False, prune_unused=True)
if __name__ == '__main__':
run()
| import retro # pip install gym-retro
import numpy as np # pip install numpy
#import cv2 # pip install opencv-python
import neat # pip install neat-python
import pickle # pip install cloudpickle
import os
import multiprocessing
import cv2
import time
env = retro.make(game='Pong-Atari2600')
def eval_genome(genome, config):
net = neat.nn.FeedForwardNetwork.create(genome, config)
env.reset()
ob, _, _, _ = env.step(env.action_space.sample())
inx = int(ob.shape[0]/8)
iny = int(ob.shape[1]/8)
fitnesses = []
score1=0
score2=0
# Run the given simulation for up to num_steps time steps.
fitness = 0.0
done = False
start_time=time.time()
series_of_keys=[]
series_of_nnOut=[]
while not done:
env.render()
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx, iny))
imgarray = np.ndarray.flatten(ob)
imgarray = np.interp(imgarray, (0, 254), (-1, +1))
nnOut = net.activate(imgarray)
for o in nnOut:
if o > 0.:
keys = [1, 0]
else:
keys = [0, 1]
actions=[0]*4+keys+[0]*2
series_of_keys.append(keys)
series_of_nnOut.append(nnOut)
ob, rew, done, info = env.step(actions)
score1=info['score1']
score2=info['score2']
if score1 >19 or score2 >19:
done = True
print(series_of_keys)
# print(series_of_actions)
run_time=time.time()-start_time
fitness=score2-score1/(run_time-2)
return fitness
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = eval_genome(genome, config)
def run():
# Load the config file, which is assumed to live in
# the same directory as this script.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'pong_config')
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
pop = neat.Population(config)
stats = neat.StatisticsReporter()
pop.add_reporter(stats)
pop.add_reporter(neat.StdOutReporter(True))
pe = neat.ParallelEvaluator(10, eval_genome)
winner = pop.run(pe.evaluate)
# Save the winner.
with open('winner-feedforward', 'wb') as f:
pickle.dump(winner, f)
print(winner)
visualize.plot_stats(stats, ylog=True, view=True, filename="feedforward-fitness.svg")
visualize.plot_species(stats, view=True, filename="feedforward-speciation.svg")
node_names = {-1: 'x', -2: 'dx', -3: 'theta', -4: 'dtheta', 0: 'control'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename="winner-feedforward.gv")
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename="winner-feedforward-enabled.gv", show_disabled=False)
visualize.draw_net(config, winner, view=True, node_names=node_names,
filename="winner-feedforward-enabled-pruned.gv", show_disabled=False, prune_unused=True)
if __name__ == '__main__':
run()
| [
3,
4,
5,
6,
7
] |
1,439 | fd96bf5595ce6ec1f95d0f7a9d1c4ff582826ac0 | <mask token>
class ContentCategory(BaseModel):
<mask token>
<mask token>
<mask token>
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
<mask token>
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
| <mask token>
class ContentCategory(BaseModel):
<mask token>
<mask token>
<mask token>
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
| <mask token>
class ContentCategory(BaseModel):
<mask token>
name = models.CharField(verbose_name='名称', max_length=50)
key = models.CharField(verbose_name='类别键名', max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
| <mask token>
class ContentCategory(BaseModel):
"""广告内容类别"""
name = models.CharField(verbose_name='名称', max_length=50)
key = models.CharField(verbose_name='类别键名', max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
"""广告内容"""
category = models.ForeignKey(ContentCategory, on_delete=models.PROTECT,
verbose_name='类别')
title = models.CharField(verbose_name='标题', max_length=100)
url = models.CharField(verbose_name='内容链接', max_length=300)
image = models.ImageField(verbose_name='图片', null=True, blank=True)
text = models.TextField(verbose_name='内容', null=True, blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示', default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title
| from django.db import models
from utils.models import BaseModel
# Create your models here.
class ContentCategory(BaseModel):
'''广告内容类别'''
name = models.CharField(verbose_name='名称',max_length=50)
key = models.CharField(verbose_name='类别键名',max_length=50)
class Meta:
db_table = 'tb_content_category'
verbose_name = '广告内容类别'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Content(BaseModel):
'''广告内容'''
category = models.ForeignKey(ContentCategory,on_delete=models.PROTECT,verbose_name='类别')
title = models.CharField(verbose_name='标题',max_length=100)
url = models.CharField(verbose_name='内容链接',max_length=300)
image = models.ImageField(verbose_name='图片',null=True,blank=True)
text = models.TextField(verbose_name='内容',null=True,blank=True)
sequence = models.IntegerField(verbose_name='排序')
status = models.BooleanField(verbose_name='是否展示',default=True)
class Meta:
db_table = 'tb_content'
verbose_name = '广告内容'
verbose_name_plural = verbose_name
def __str__(self):
return self.category.name + ':' + self.title | [
5,
6,
7,
8,
10
] |
1,440 | c1e649b73c207ea08235d295c28daad8c91398a7 | """
Python 3.3 introduces the new "yield from" statement to provide straightforward way for
a generator to call out to other generators
"""
def gen1():
yield 'foo'
yield 'bar'
def gen2():
yield 'spam'
yield 'eggs'
"""
For python versions < 3.3
"""
def full_gen():
for word in gen1():
yield word
for word in gen2():
yield word
def full_gen_with_itertools():
import itertools
for word in itertools.chain(gen1(), gen2()):
yield word
fg = full_gen()
print next(fg)
print "--------"
fgi = full_gen_with_itertools()
print next(fgi)
"""
For version > py 3.3
"""
def full_gen():
yield from gen1()
yield from gen2()
"""
Use of the syntax "yield from" is referred to generator delegation
This implementation is different from first two implementations because former implementation
discards any value sent to the generator using "send".
Where as "yield from" syntax simply preserves this as the generator is simply delegating to another generator,
avoiding need of developer to handle this.
""" | null | null | null | null | [
0
] |
1,441 | 263a853f33eb9724101ca87f12b914282dea9981 | <mask token>
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
<mask token>
| <mask token>
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
<mask token>
| <mask token>
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| import xadmin
from .models import EmailVerifyRecord, Banner
from xadmin import views
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display = 'title', 'url', 'index'
class BaseSetting(object):
enable_themes = True
user_bootswatch = True
class GlobalSetting(object):
site_title = '西游记'
site_footer = '咨询在线'
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| import xadmin
from .models import EmailVerifyRecord,Banner
from xadmin import views
class EmailVerifyRecordAdmin(object):
pass
class BannerAdmin(object):
list_display=('title','url','index')
class BaseSetting(object):
enable_themes=True
user_bootswatch=True
#设置xadmin页面标题和页脚
class GlobalSetting(object):
site_title='西游记'
site_footer='咨询在线'
xadmin.site.register(EmailVerifyRecord,EmailVerifyRecordAdmin)
xadmin.site.register(Banner,BannerAdmin)
xadmin.site.register(views.BaseAdminView,BaseSetting)
xadmin.site.register(views.CommAdminView,GlobalSetting)
| [
6,
7,
8,
9,
10
] |
1,442 | ae38995d153deed2e6049b7b65fb5f28dfcef470 | <mask token>
class BaseConnection(object):
<mask token>
<mask token>
<mask token>
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
<mask token>
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
<mask token>
def __enter__(self):
return self
<mask token>
<mask token>
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
<mask token>
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
<mask token>
| <mask token>
class BaseConnection(object):
<mask token>
<mask token>
<mask token>
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None
):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,
interpreter)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
<mask token>
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
<mask token>
| <mask token>
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None
):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,
interpreter)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
<mask token>
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
<mask token>
| import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=
True, detect_sudo=False, use_ssh=False, interpreter=None,
ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because %s is not installed there"
% self.interpreter)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(self._make_connection_string(self.hostname,
use_sudo=False))
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())')
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None
):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,
interpreter)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.
logger, python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway,
module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module,
self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send('%s(%s)' % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = 'module %s does not have attribute %s' % (str(self.module
), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(
args))
else:
source = self._module_source + dump_template % (name, '()')
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable],
stdin=source.encode('utf-8'))
if not out:
if not err:
err = ['Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' %
('\n'.join(out), '\n'.join(err)))
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug(
'trying to determine remote python executable with %s' % executable
)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' %
executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter
)
return conn.interpreter
| import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
| [
19,
23,
27,
30,
31
] |
1,443 | 55c9fe8caf1983f22d5a752574f590fa129e8017 | def pin():
print('wqeqwwqe')
<mask token>
| def pin():
print('wqeqwwqe')
<mask token>
window.title('爱你吆')
window.geometry('400x400+800+200')
window.protocol('WM_DELETE_WINDOW')
<mask token>
label.grid(row=10, column=10)
<mask token>
| def pin():
print('wqeqwwqe')
<mask token>
window = Tk()
window.title('爱你吆')
window.geometry('400x400+800+200')
window.protocol('WM_DELETE_WINDOW')
label = Label(window, text='hey,小姐姐', font=('微软雅黑', 15))
label.grid(row=10, column=10)
window = mainloop()
| def pin():
print('wqeqwwqe')
from tkinter import *
from tkinter import messagebox
from PIL import Image
from PIL import ImageTk
window = Tk()
window.title('爱你吆')
window.geometry('400x400+800+200')
window.protocol('WM_DELETE_WINDOW')
label = Label(window, text='hey,小姐姐', font=('微软雅黑', 15))
label.grid(row=10, column=10)
window = mainloop()
| def pin():
print('wqeqwwqe')
from tkinter import *
from tkinter import messagebox
from PIL import Image
from PIL import ImageTk
window = Tk() #创建一个窗口
window.title('爱你吆') #定义窗口标题
window.geometry('400x400+800+200') #定义窗口大小 窗口显示位置
# window.protocol('WM_DELETE_WINDOW', pin) #摧毁窗口,引到另一个函数命令
window.protocol('WM_DELETE_WINDOW')
##############
label = Label(window, text='hey,小姐姐', font=("微软雅黑", 15))
# text 窗口文本 font 设置字体 fg设置字体颜色
label.grid(row=10, column=10) # 网格布局 显示位置
################# 人
window=mainloop() | [
1,
2,
3,
4,
5
] |
1,444 | 11045cffc6d47902be7236e1d684422317f2c5f9 | <mask token>
@juju.requires_login
def list_models(user='user-admin'):
""" Lists Juju Models
Arguments:
user: Name of user to list models for.
Returns:
Dictionary of known Juju Models (default: user-admin)
"""
models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':
user})
return models['UserModels']
<mask token>
| <mask token>
@juju.requires_login
def list_models(user='user-admin'):
""" Lists Juju Models
Arguments:
user: Name of user to list models for.
Returns:
Dictionary of known Juju Models (default: user-admin)
"""
models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':
user})
return models['UserModels']
@juju.requires_login
def model_info(model):
""" Returns information on select model
Arguments:
model: name of model to inspect
Returns:
Dictionary of model attributes
"""
return juju.CLIENT.Client(request='ModelInfo', params={'Name': model})
<mask token>
| <mask token>
@juju.requires_login
def list_models(user='user-admin'):
""" Lists Juju Models
Arguments:
user: Name of user to list models for.
Returns:
Dictionary of known Juju Models (default: user-admin)
"""
models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':
user})
return models['UserModels']
@juju.requires_login
def model_info(model):
""" Returns information on select model
Arguments:
model: name of model to inspect
Returns:
Dictionary of model attributes
"""
return juju.CLIENT.Client(request='ModelInfo', params={'Name': model})
@juju.requires_login
def model_status():
""" Returns the FullStatus output of a model
Returns:
Dictionary of model status
"""
return juju.CLIENT.Client(request='FullStatus')
| <mask token>
from conjureup import juju
@juju.requires_login
def list_models(user='user-admin'):
""" Lists Juju Models
Arguments:
user: Name of user to list models for.
Returns:
Dictionary of known Juju Models (default: user-admin)
"""
models = juju.CLIENT.ModelManager(request='ListModels', params={'Tag':
user})
return models['UserModels']
@juju.requires_login
def model_info(model):
""" Returns information on select model
Arguments:
model: name of model to inspect
Returns:
Dictionary of model attributes
"""
return juju.CLIENT.Client(request='ModelInfo', params={'Name': model})
@juju.requires_login
def model_status():
""" Returns the FullStatus output of a model
Returns:
Dictionary of model status
"""
return juju.CLIENT.Client(request='FullStatus')
| """ Interfaces to Juju API ModelManager """
from conjureup import juju
@juju.requires_login
def list_models(user='user-admin'):
""" Lists Juju Models
Arguments:
user: Name of user to list models for.
Returns:
Dictionary of known Juju Models (default: user-admin)
"""
models = juju.CLIENT.ModelManager(request="ListModels",
params={'Tag': user})
return models['UserModels']
@juju.requires_login
def model_info(model):
""" Returns information on select model
Arguments:
model: name of model to inspect
Returns:
Dictionary of model attributes
"""
return juju.CLIENT.Client(request="ModelInfo",
params={"Name": model})
@juju.requires_login
def model_status():
""" Returns the FullStatus output of a model
Returns:
Dictionary of model status
"""
return juju.CLIENT.Client(request="FullStatus")
| [
1,
2,
3,
4,
5
] |
1,445 | 7af0566161c909457d40d3856434f1fb1e800aab | import math
import datetime as dt
import cv2
import os
from face import Face
class Video:
def __init__(self, vidSource, variableList=[], showWindow=True):
self.vidcap = cv2.VideoCapture(vidSource)
self.cascade = cv2.CascadeClassifier("face_cascade2.xml")
self.visibleFaceList = [] # contains all Face objects within the frame
self.notVisibleFaceList = []
self.inactiveFaceList = []
self.totalFaceCount = 0 # number of total faces seen so far
self.frameCount = 0 # counter to determine when to detect
self.cleanThresh = 0
# PERHAPS SUBCLASS?
self.frameImage = None # this is whatever kind of image returned by openCV
self.showWindow = showWindow
if self.showWindow:
cv2.namedWindow("show")
#####TWEAKABLE VARIABLES#####
if variableList == []:
# Always between 0 and 1
self.velocityWeight = 0
self.scoreWeight = 1
self.minRemovalScore = 0.1
# Maybe larger than one
self.radiusSize = 0.5
# Probably always larger than one
self.timeOut = 15
self.frameGap = 0
self.cleanThresh = 5
self.usingTime = True
# add a catch statement for if variable list isn't of length 6
else:
# Always between 0 and 1
self.velocityWeight = variableList[0]
self.scoreWeight = variableList[1]
self.minRemovalScore = variableList[2]
# Maybe larger than one
self.radiusSize = variableList[3]
# Probably always larger than one
self.timeOut = variableList[4]
self.frameGap = variableList[5]
self.cleanThresh = variableList[6]
self.usingTime = variableList[7]
def getFaces(self):
return self.visibleFaceList
def getCurrentFrame(self):
return self.frameImage
def pruneFaceList(self):
# for i in range(len(self.notVisibleFaceList)):
i = 0
while i < len(self.notVisibleFaceList):
pos = self.notVisibleFaceList[i].getPosition()
timeSinceDetection = dt.datetime.now()-pos[2]
if timeSinceDetection.total_seconds() > self.timeOut:
print timeSinceDetection.total_seconds()
print self.notVisibleFaceList[i].id
self.inactiveFaceList.append(self.notVisibleFaceList.pop(i))
i += 1
def addNewFace(self, location):
fc = Face()
fc.id = self.totalFaceCount
self.totalFaceCount += 1
fc.setPosition(location)
self.visibleFaceList.append(fc)
def listHelper(self, listChoice, rects):
megaList = []
for i in range(len(rects)):
tempList = []
for j in range(len(listChoice)):
tempList.append(self.scoreForBeingHere(listChoice[j],rects[i]))
# if there are issues, it's with copying
megaList.append(list(tempList))
return megaList
def dualListHelper(self, list1, list2, rects):
megaList = []
breakPoint = 0
for i in range(len(list1)):
tempList = []
for j in range(len(rects)):
tempList.append(self.scoreForBeingHere(list1[i],rects[j]))
breakPoint = i
megaList.append(list(tempList))
for i in range(len(list2)):
tempList = []
for j in range(len(rects)):
tempList.append(self.scoreForBeingHere(list2[i],rects[j]))
# if there are issues, it's with copying
megaList.append(list(tempList))
return megaList, breakPoint
def analyzeFrame(self, rects):
self.pruneFaceList()
#Case 1
# if len(rects)>len(self.visibleFaceList):
# print "case1"
if len(self.visibleFaceList)>0:
megaList, breakPoint = self.dualListHelper(self.visibleFaceList, self.notVisibleFaceList, rects)
assignmentList = [-1]*(len(self.visibleFaceList)+len(self.notVisibleFaceList))
totalAssigned=0
visibleFaces = len(self.visibleFaceList)
totalFaces = len(self.visibleFaceList)+len(self.notVisibleFaceList)
indices = []
while totalAssigned < len(rects):
# print "WHILE"
# print len(rects)
index = 0
highest = 0
highFaceIndex = 0
for i in range(len(megaList)):
if assignmentList[i] == -1:
currentVal = max(megaList[i])
# print currentVal
currentIndex = megaList[i].index(currentVal)
# print currentIndex not in assignmentList
if currentVal > highest and currentIndex not in assignmentList and currentVal > self.minRemovalScore:
highest = currentVal
index = currentIndex
highFaceIndex = i
if highest != 0:
if highFaceIndex > breakPoint:
face = self.notVisibleFaceList.pop(highFaceIndex-breakPoint-1)
self.visibleFaceList.append(face)
index = len(self.visibleFaceList)-1
assignmentList[highFaceIndex] = currentIndex
indices.append(highFaceIndex)
totalAssigned +=1
else:
print "HIGHEST = 0"
for j in range(len(rects)):
# print rects
if j not in assignmentList:
# print "here"
face = Face()
face.id = self.totalFaceCount
self.totalFaceCount += 1
self.visibleFaceList.append(face)
assignmentList.append(j)
indices.append(len(assignmentList)-1)
totalAssigned += 1
# print assignmentList
self.makeAssignments(assignmentList,rects, indices, visibleFaces)
for i in range(visibleFaces-1):
if assignmentList[i] == -1:
face = self.visibleFaceList.pop(i)
self.notVisibleFaceList.append(face)
else:
for i in range(len(rects)):
self.addNewFace(rects[i])
def analyzeFrame2(self, rects):
self.pruneFaceList()
#Case 1
if len(rects)>len(self.visibleFaceList):
# print "case1"
if len(self.visibleFaceList)>0:
megaList, breakPoint = self.dualListHelper(self.visibleFaceList, self.notVisibleFaceList, rects)
assignmentList = []
for i in range(len(megaList)):
highest = 0
index = 0
for j in range(len(megaList[i])):
# ensure that face hasn't been used already
if megaList[i][j] >= highest and j not in assignmentList:
index = j
highest = megaList[i][j]
if highest > self.minRemovalScore:
if i > breakPoint:
face = self.notVisibleFaceList.pop(i-breakPoint-1)
self.visibleFaceList.append(face)
index = len(self.visibleFaceList)-1
assignmentList.append(index)
else:
face = Face()
face.id = self.totalFaceCount
self.totalFaceCount += 1
self.visibleFaceList.append(face)
assignmentList.append(len(self.visibleFaceList)-1)
self.makeAssignments(assignmentList, rects)
k = 0
while k < breakPoint:
if k not in assignmentList:
face = self.visibleFaceList.pop(k)
self.notVisibleFaceList.append(face)
k+=1
else:
for i in range(len(rects)):
self.addNewFace(rects[i])
#Case 2
elif len(rects)==len(self.visibleFaceList):
# print "case2"
megaList, breakPoint = self.dualListHelper(self.visibleFaceList, self.notVisibleFaceList, rects)
assignmentList = []
# print "list"
# print megaList
for i in range(len(megaList)):
highest = 0
index = 0
for j in range(len(megaList[i])):
# ensure that face hasn't been used already
if megaList[i][j] >= highest and j not in assignmentList:
index = j
highest = megaList[i][j]
if highest > self.minRemovalScore:
# print "problem case?"
if i > breakPoint:
face = self.notVisibleFaceList.pop(i-breakPoint-1)
self.visibleFaceList.append(face)
index = len(self.visibleFaceList)-1
assignmentList.append(index)
else:
face = Face()
face.id = self.totalFaceCount
self.totalFaceCount += 1
self.visibleFaceList.append(face)
assignmentList.append(len(self.visibleFaceList)-1)
self.makeAssignments(assignmentList, rects)
k = 0
while k < breakPoint:
if k not in assignmentList:
face = self.visibleFaceList.pop(k)
self.notVisibleFaceList.append(face)
k+=1
#Case 3 (less rects than faces)
else:
# print "case3"
megaList, breakPoint = self.dualListHelper(self.visibleFaceList, self.notVisibleFaceList, rects)
assignmentList = []
probabilityList = []
for i in range(len(megaList)):
highest = 0
index = 0
for j in range(len(megaList[i])):
# ensure that face hasn't been used already
if megaList[i][j] >= highest and j not in assignmentList:
index = j
highest = megaList[i][j]
probabilityList.append(highest)
if highest > self.minRemovalScore:
if i > breakPoint:
face = self.notVisibleFaceList.pop(i-breakPoint-1)
self.visibleFaceList.append(face)
index = len(self.visibleFaceList)-1
assignmentList.append(index)
else:
face = Face()
face.id = self.totalFaceCount
self.totalFaceCount += 1
self.visibleFaceList.append(face)
assignmentList.append(len(self.visibleFaceList)-1)
self.makeAssignments(assignmentList, rects)
k = 0
while k < breakPoint:
if k not in assignmentList:
face = self.visibleFaceList.pop(k)
self.notVisibleFaceList.append(face)
k+=1
l = 0
# while len(assignmentList) > len(rects):
def analyzeFrame3(self,rects):
self.pruneFaceList()
if len(rects)>len(self.visibleFaceList):
if len(self.visibleFaceList)>0:
megaList = self.listHelper(self.visibleFaceList,rects)
# print megaList
assignmentList = []
for i in range(len(megaList)):
highest = 0
index = 0
for j in range(len(megaList[i])):
# ensure that face hasn't been used already
if megaList[i][j] >= highest and j not in assignmentList:
index = j
highest = megaList[i][j]
assignmentList.append(index)
self.makeAssignments(assignmentList, rects)
notList = self.listHelper(self.notVisibleFaceList,rects)
if notList != []:
for i in range(len(rects)):
index = -1
highest = 0
for j in range(len(self.notVisibleFaceList)):
if j not in assignmentList:
# print notList
if notList[i][j] > highest:
index = j
highest = notList[j][i]
if index != -1:
if notList[index][i] > self.minRemovalScore:
face = self.notVisibleFaceList.pop(index)
face.setPosition(rects[i])
self.visibleFaceList.append(face)
else:
fc = Face()
fc.id = self.totalFaceCount
# print fc.id
self.totalFaceCount += 1
fc.setPosition(rects[i])
self.visibleFaceList.append(fc)
else:
for i in range(len(rects)):
fc = Face()
fc.id = self.totalFaceCount
# print fc.id
self.totalFaceCount += 1
fc.setPosition(rects[i])
self.visibleFaceList.append(fc)
else:
for i in range(len(rects)):
fc = Face()
fc.id = self.totalFaceCount
# print fc.id
self.totalFaceCount += 1
fc.setPosition(rects[i])
self.visibleFaceList.append(fc)
elif len(rects)==len(self.visibleFaceList):
megaList = self.listHelper(self.visibleFaceList,rects)
# print megaList
assignmentList = []
for i in range(len(megaList)):
highest = 0
index = 0
for j in range(len(megaList[i])):
if megaList[i][j] >= highest and j not in assignmentList:
index = j
highest = megaList[i][j]
assignmentList.append(index)
self.makeAssignments(assignmentList, rects)
else:
# less rects than faces
megaList = self.listHelper(self.visibleFaceList,rects)
# print megaList
assignmentList = []
probabilityList = []
for i in range(len(megaList)):
highest = 0
index = 0
for j in range(len(megaList[i])):
if megaList[i][j] >= highest and j not in assignmentList:
index = j
highest = megaList[i][j]
assignmentList.append(index)
probabilityList.append(highest)
if len(probabilityList)!=0:
lowIndex = probabilityList.index(min(probabilityList))
self.notVisibleFaceList.append(self.visibleFaceList.pop(lowIndex))
assignmentList.pop(lowIndex)
self.makeAssignments(assignmentList, rects)
def makeAssignments(self, assignmentList, rects, indices, visibleFaces):
# print "assign"
# print assignmentList
# print rects
counter = 0
# print len(self.visibleFaceList)
for i in range(len(assignmentList)):
if rects != []:
if assignmentList[i] != -1:
if i < visibleFaces:
self.visibleFaceList[i].setPosition(rects[assignmentList[i]])
else:
print "HERE"
print assignmentList
print rects
print assignmentList[indices[counter]]
print rects[assignmentList[indices[counter]]]
self.visibleFaceList[counter+visibleFaces].setPosition(rects[assignmentList[indices[counter]]])
counter += 1
def scoreForBeingHere(self, face1, rect):
"""compares face and rect to sees what the chances are that they are the same
returns float between 0 and 1"""
time = dt.datetime.now()
recentPosition = face1.getPosition()
if not (recentPosition==[]):
deltaTime = (time - recentPosition[2]).total_seconds()
velocity = face1.getVelocity()
area = math.pow(face1.getArea(),0.5)
if self.usingTime:
radius = deltaTime*area*self.radiusSize
else:
radius = area*self.radiusSize
middleOfRect = ((rect[2]+rect[0])/2,(rect[3]+rect[1])/2)
middleOfFace = ((recentPosition[1][0]+recentPosition[0][0])/2,(recentPosition[1][1]+recentPosition[0][1])/2)
if velocity != 0:
middleOfFace = (middleOfFace[0] + velocity[0]/velocity[2]*deltaTime*self.velocityWeight, middleOfFace[1] + velocity[1]/velocity[2]*deltaTime*self.velocityWeight)
diffMiddles = math.pow(math.pow(middleOfFace[0]-middleOfRect[0], 2) + math.pow(middleOfFace[1]-middleOfRect[1], 2), 0.5)
# asymptote equation such that after the difference in middles is more than 1 radius away,
# prob will be down to 0.25 but after that it slowly goes to 0 never quite reaching it
x = math.pow(diffMiddles/radius,3)
# decays with increase in time
if self.usingTime:
score = self.scoreWeight/(deltaTime*(3*x+1))
else:
score = self.scoreWeight/((3*x+1))
return score
else:
return 0
def readFrame(self):
"""read frame from openCV info"""
success, self.frameImage = self.vidcap.read()
return success, self.frameImage
def detectAll(self):
"""Run face detection algorithm on the whole picture and make adjustments
to the faces based on where the are and where they should be"""
rects = self.cascade.detectMultiScale(self.frameImage, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))
return rects
# # won't really ever use
# def estimateAll(self):
# """Step forward one frame, update all (visible?) faces based on estimation
# from velocities; don't run face detection algorithm
# Should be run every frame except where detectAll() is run."""
# pass
# # for face in self.visibleFaceList[]:
# # face.estimateNextPosition()
def findFaces(self):
"""detects all faces with the frame then analyzes the frame to determine
which face belongs to which face object"""
rects = self.detectAll()
if len(rects)==0:
rects = []
else:
rects[:, 2:] += rects[:, :2]
self.analyzeFrame(rects)
def display(self):
""" Displays current frame with rectangles and boxes"""
# print len(self.visibleFaceList)
# print "not visible: "
# for face in self.notVisibleFaceList:
# print face.id
# print "visibel: "
for i in range(len(self.visibleFaceList)):
# print self.visibleFaceList[i].id
self.showRectangle(self.visibleFaceList[i].getPosition(),self.visibleFaceList[i].id)
cv2.imshow("show", self.frameImage)
def clean(self):
i = 0
while i < len(self.notVisibleFaceList):
if len(self.notVisibleFaceList[i].prevPositions) < self.cleanThresh:
self.notVisibleFaceList.pop(i)
self.totalFaceCount -= 1
i += 1
def showRectangle(self, pos, IDnum):
cv2.rectangle(self.frameImage, pos[0], pos[1], (255,0,0), 2)
cv2.putText(self.frameImage, str(IDnum), pos[0], cv2.FONT_HERSHEY_SIMPLEX, 2, [0,255,0], 3)
def endWindow(self):
"""stops using webcam (or whatever source is) and removes display window"""
self.vidcap.release()
cv2.destroyWindow("show") | null | null | null | null | [
0
] |
1,446 | aea196566bbbe9d37bf03b9b17a4062659a27bb6 | <mask token>
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ''
self.infoma['id'] = x['testinfo'][0]['id']
self.infoma['module'] = x['testinfo'][0]['module']
self.infoma['intr'] = x['testinfo'][0]['intr']
def base_check(self):
baseCheck = x['basecheck']
if self.response['c'] == baseCheck['c'] and self.response['m'
] == baseCheck['m']:
return True
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '接口未正确返回'
return False
def detailCkeck_list(self, case):
if self.base_check() is True:
if 'list' in self.response:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = self.response['c']
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self, case):
if self.base_check() is True:
if self.response['r']['id'] == case['data']['id']:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '断言预期与实际不符'
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
<mask token>
def test_user_info_conrrect(self):
case1 = x['userinfo']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCheck_id(case1)
def test_user_item_conrrect(self):
case1 = x['useritems']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x['userprojectboards']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
<mask token>
def test_me_orders(self):
case1 = x['me']['case2']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,
'http://192.168.4.15:8001/api/0.2/account/signout', datas='')
<mask token>
| <mask token>
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ''
self.infoma['id'] = x['testinfo'][0]['id']
self.infoma['module'] = x['testinfo'][0]['module']
self.infoma['intr'] = x['testinfo'][0]['intr']
def base_check(self):
baseCheck = x['basecheck']
if self.response['c'] == baseCheck['c'] and self.response['m'
] == baseCheck['m']:
return True
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '接口未正确返回'
return False
def detailCkeck_list(self, case):
if self.base_check() is True:
if 'list' in self.response:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = self.response['c']
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self, case):
if self.base_check() is True:
if self.response['r']['id'] == case['data']['id']:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '断言预期与实际不符'
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
<mask token>
def test_user_info_conrrect(self):
case1 = x['userinfo']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCheck_id(case1)
def test_user_item_conrrect(self):
case1 = x['useritems']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x['userprojectboards']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_me_info(self):
case1 = x['me']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.base_check(case1)
def test_me_orders(self):
case1 = x['me']['case2']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,
'http://192.168.4.15:8001/api/0.2/account/signout', datas='')
<mask token>
| <mask token>
def getYam(homeyaml):
try:
with open(homeyaml, encoding='utf-8') as f:
x = yaml.load(f)
return x
except FileNotFoundError:
print(u'找不到文件')
<mask token>
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ''
self.infoma['id'] = x['testinfo'][0]['id']
self.infoma['module'] = x['testinfo'][0]['module']
self.infoma['intr'] = x['testinfo'][0]['intr']
def base_check(self):
baseCheck = x['basecheck']
if self.response['c'] == baseCheck['c'] and self.response['m'
] == baseCheck['m']:
return True
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '接口未正确返回'
return False
def detailCkeck_list(self, case):
if self.base_check() is True:
if 'list' in self.response:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = self.response['c']
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self, case):
if self.base_check() is True:
if self.response['r']['id'] == case['data']['id']:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '断言预期与实际不符'
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
"""正常测试"""
def test_user_info_conrrect(self):
case1 = x['userinfo']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCheck_id(case1)
def test_user_item_conrrect(self):
case1 = x['useritems']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x['userprojectboards']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_me_info(self):
case1 = x['me']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.base_check(case1)
def test_me_orders(self):
case1 = x['me']['case2']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,
'http://192.168.4.15:8001/api/0.2/account/signout', datas='')
<mask token>
| <mask token>
PATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))
def getYam(homeyaml):
try:
with open(homeyaml, encoding='utf-8') as f:
x = yaml.load(f)
return x
except FileNotFoundError:
print(u'找不到文件')
x = getYam(PATH('./case_user_api.yml'))
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ''
self.infoma['id'] = x['testinfo'][0]['id']
self.infoma['module'] = x['testinfo'][0]['module']
self.infoma['intr'] = x['testinfo'][0]['intr']
def base_check(self):
baseCheck = x['basecheck']
if self.response['c'] == baseCheck['c'] and self.response['m'
] == baseCheck['m']:
return True
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '接口未正确返回'
return False
def detailCkeck_list(self, case):
if self.base_check() is True:
if 'list' in self.response:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = self.response['c']
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self, case):
if self.base_check() is True:
if self.response['r']['id'] == case['data']['id']:
util.DATA['pass'] = util.DATA['pass'] + 1
self.infoma['result'] = '通过'
else:
util.DATA['fail'] = util.DATA['fail'] + 1
self.infoma['result'] = '失败'
self.infoma['reason'] = '断言预期与实际不符'
self.infoma['casename'] = case['casename']
util.DATA['sum'] = util.DATA['sum'] + 1
util.INFO.append(self.infoma)
"""正常测试"""
def test_user_info_conrrect(self):
case1 = x['userinfo']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCheck_id(case1)
def test_user_item_conrrect(self):
case1 = x['useritems']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x['userprojectboards']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def test_me_info(self):
case1 = x['me']['case1']
self.response = Login.req(Login, case1['api'], case1['data'])
self.base_check(case1)
def test_me_orders(self):
case1 = x['me']['case2']
self.response = Login.req(Login, case1['api'], case1['data'])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,
'http://192.168.4.15:8001/api/0.2/account/signout', datas='')
if __name__ == '__main__':
suite = unittest.TestSuite()
filename = 'C:\\Users\\xp\\Desktop\\result.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'自动化测试报告',
description=u'注册- -自动化测试报告')
runner.run(suite)
| import unittest
from common.ReqLogin import req
import os
import yaml
from common import util
from TestCase.runnerBase import TestInterfaceCase
import paramunittest
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def getYam(homeyaml):
try:
with open(homeyaml, encoding='utf-8') as f:
x = yaml.load(f)
return x
except FileNotFoundError:
print(u"找不到文件")
x = getYam(PATH("./case_user_api.yml"))
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ""
self.infoma["id"] = x["testinfo"][0]["id"]
self.infoma["module"] = x["testinfo"][0]["module"]
self.infoma["intr"] = x["testinfo"][0]["intr"]
def base_check(self):
baseCheck = x["basecheck"]
if self.response["c"] == baseCheck["c"] and self.response["m"] == baseCheck["m"]:
return True
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = "接口未正确返回"
return False
def detailCkeck_list(self,case):
if self.base_check() is True:
if "list" in self.response:
util.DATA["pass"] = util.DATA["pass"] + 1
self.infoma["result"] = "通过"
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = self.response["c"]
self.infoma["casename"] = case["casename"]
util.DATA["sum"] = util.DATA["sum"] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self,case):
if self.base_check() is True:
if self.response["r"]["id"] == case["data"]["id"]:
util.DATA["pass"] = util.DATA["pass"] + 1
self.infoma["result"] = "通过"
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = "断言预期与实际不符"
self.infoma["casename"] = case["casename"]
util.DATA["sum"] = util.DATA["sum"] + 1
util.INFO.append(self.infoma)
'''正常测试'''
def test_user_info_conrrect(self):
case1 = x["userinfo"]["case1"]
self.response = Login.req(Login,case1["api"],case1["data"])
self.detailCheck_id(case1)
#
# '''异常测试--value字段长度不够'''
# def test_user_info_poorvalue(self):
# case2 = x["userinfo"]["case2"]
# self.response = Login.req(Login, case2["api"], case2["data"])
# if self.check1() is True:
# if self.response["r"]["id"] != case2["data"]["id"]:
# util.DATA["pass"] = util.DATA["pass"] + 1
# self.infoma["result"] = "通过"
# else:
# util.DATA["fail"] = util.DATA["fail"] + 1
# self.infoma["result"] = "失败"
# self.infoma["reason"] = "断言预期与实际不符"
# self.infoma["casename"] = case2["casename"]
# util.DATA["sum"] = util.DATA["sum"] + 1
# util.INFO.append(self.infoma)
# '''异常测试--接口所需参数为空'''
# def test_user_info_poorkey(self):
# case3 = x["userinfo"]["case3"]
# self.response = Login.req(Login,case3["api"],case3["data"])
# if self.check1() is False:
# if self.response["massage"] == case3["massage"]:
# util.DATA["pass"] = util.DATA["pass"] + 1
# self.infoma["result"] = "通过"
# else:
# util.DATA["fail"] = util.DATA["fail"] + 1
# self.infoma["result"] = "失败"
# self.infoma["reason"] = "断言预期与实际不符"
# self.infoma["casename"] = case3["casename"]
# util.DATA["sum"] = util.DATA["sum"] + 1
# util.INFO.append(self.infoma)
def test_user_item_conrrect(self):
case1 = x["useritems"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x["userprojectboards"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def test_me_info(self):
case1 = x["me"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.base_check(case1)
def test_me_orders(self):
case1 = x["me"]["case2"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,'http://192.168.4.15:8001/api/0.2/account/signout',datas='')
if __name__ =='__main__':
suite = unittest.TestSuite()
# tests = ['test_user_info_conrrect','test_user_info_poorvalue','test_user_info_poorkey']
# suite.addTests(map(UserinfoTest,tests))
# suite.addTest(UserItemsTest("test_user_item_conrrect"))
filename = r'C:\Users\xp\Desktop\result.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'自动化测试报告',
description=u'注册- -自动化测试报告')
runner.run(suite)
| [
10,
11,
13,
15,
17
] |
1,447 | c2cf74893c7f7515a95141bb10be6a446b45a0cc | <mask token>
| <mask token>
def train_once():
os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &')
os.system('xterm -e "pwd ; ./start.sh " &')
return True
| import os
import time
def train_once():
os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &')
os.system('xterm -e "pwd ; ./start.sh " &')
return True
| import os
import time
#if __name__ == "__main__":
# os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml ; echo press RETURN to close this window ; read" &') # delete the echo and the read to don't stop the process and make it run quickly
# os.system('xterm -e "pwd ; ./start.sh ; echo press RETURN to close this window ; read" &')
def train_once():
os.system('xterm -e "pwd ; cd ~ ; torcs -r ~/quickrace.xml " &') # delete the echo and the read to don't stop the process and make it run quickly
os.system('xterm -e "pwd ; ./start.sh " &')
return True
| null | [
0,
1,
2,
3
] |
1,448 | 65da68d33aa382ed6deeff3c66a063ee299c2567 | a=[1,2,3,4,5]
max=0
for i in a:
if i>=max:
max=i
elif i<=min:
min=i
print max
print min
| null | null | null | null | [
0
] |
1,449 | d6f8ec0fd8be0fa7019a84af47d08ab8b5b32d92 | <mask token>
class BaseCollectionSerializer(ResolweBaseSerializer):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def get_status(self, collection):
"""Return status of the collection based on the status of data objects.
When collection contains no data objects None is returned.
"""
status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.
STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,
Data.STATUS_RESOLVING, Data.STATUS_DONE]
status_set = set(collection.data_statuses) if hasattr(collection,
'data_statuses') else collection.data.values_list('status',
flat=True).distinct()
if not status_set:
return None
for status in status_order:
if status in status_set:
return status
logger.warning('Could not determine the status of a collection.',
extra={'collection': collection.__dict__})
return None
class Meta:
"""CollectionSerializer Meta options."""
model = Collection
read_only_fields = ('created', 'descriptor_dirty', 'duplicated',
'id', 'modified', 'data_count', 'status')
update_protected_fields = 'contributor',
fields = read_only_fields + update_protected_fields + ('description',
'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',
'tags')
class CollectionSerializer(BaseCollectionSerializer):
"""Serializer for Collection objects."""
entity_count = serializers.SerializerMethodField(required=False)
def get_entity_count(self, collection):
"""Return number of entities on the collection."""
return collection.entity_count if hasattr(collection, 'entity_count'
) else collection.entity_set.count()
class Meta(BaseCollectionSerializer.Meta):
"""CollectionSerializer Meta options."""
read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (
'entity_count',)
fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)
| <mask token>
class BaseCollectionSerializer(ResolweBaseSerializer):
<mask token>
settings = ProjectableJSONField(required=False)
descriptor = ProjectableJSONField(required=False)
descriptor_schema = DictRelatedField(queryset=DescriptorSchema.objects.
all(), serializer=DescriptorSchemaSerializer, allow_null=True,
required=False)
data_count = serializers.SerializerMethodField(required=False)
status = serializers.SerializerMethodField(required=False)
def get_data_count(self, collection):
"""Return number of data objects on the collection."""
return collection.data_count if hasattr(collection, 'data_count'
) else collection.data.count()
def get_status(self, collection):
"""Return status of the collection based on the status of data objects.
When collection contains no data objects None is returned.
"""
status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.
STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,
Data.STATUS_RESOLVING, Data.STATUS_DONE]
status_set = set(collection.data_statuses) if hasattr(collection,
'data_statuses') else collection.data.values_list('status',
flat=True).distinct()
if not status_set:
return None
for status in status_order:
if status in status_set:
return status
logger.warning('Could not determine the status of a collection.',
extra={'collection': collection.__dict__})
return None
class Meta:
"""CollectionSerializer Meta options."""
model = Collection
read_only_fields = ('created', 'descriptor_dirty', 'duplicated',
'id', 'modified', 'data_count', 'status')
update_protected_fields = 'contributor',
fields = read_only_fields + update_protected_fields + ('description',
'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',
'tags')
class CollectionSerializer(BaseCollectionSerializer):
"""Serializer for Collection objects."""
entity_count = serializers.SerializerMethodField(required=False)
def get_entity_count(self, collection):
"""Return number of entities on the collection."""
return collection.entity_count if hasattr(collection, 'entity_count'
) else collection.entity_set.count()
class Meta(BaseCollectionSerializer.Meta):
"""CollectionSerializer Meta options."""
read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (
'entity_count',)
fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)
| <mask token>
class BaseCollectionSerializer(ResolweBaseSerializer):
"""Base serializer for Collection objects."""
settings = ProjectableJSONField(required=False)
descriptor = ProjectableJSONField(required=False)
descriptor_schema = DictRelatedField(queryset=DescriptorSchema.objects.
all(), serializer=DescriptorSchemaSerializer, allow_null=True,
required=False)
data_count = serializers.SerializerMethodField(required=False)
status = serializers.SerializerMethodField(required=False)
def get_data_count(self, collection):
"""Return number of data objects on the collection."""
return collection.data_count if hasattr(collection, 'data_count'
) else collection.data.count()
def get_status(self, collection):
"""Return status of the collection based on the status of data objects.
When collection contains no data objects None is returned.
"""
status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.
STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,
Data.STATUS_RESOLVING, Data.STATUS_DONE]
status_set = set(collection.data_statuses) if hasattr(collection,
'data_statuses') else collection.data.values_list('status',
flat=True).distinct()
if not status_set:
return None
for status in status_order:
if status in status_set:
return status
logger.warning('Could not determine the status of a collection.',
extra={'collection': collection.__dict__})
return None
class Meta:
"""CollectionSerializer Meta options."""
model = Collection
read_only_fields = ('created', 'descriptor_dirty', 'duplicated',
'id', 'modified', 'data_count', 'status')
update_protected_fields = 'contributor',
fields = read_only_fields + update_protected_fields + ('description',
'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',
'tags')
class CollectionSerializer(BaseCollectionSerializer):
"""Serializer for Collection objects."""
entity_count = serializers.SerializerMethodField(required=False)
def get_entity_count(self, collection):
"""Return number of entities on the collection."""
return collection.entity_count if hasattr(collection, 'entity_count'
) else collection.entity_set.count()
class Meta(BaseCollectionSerializer.Meta):
"""CollectionSerializer Meta options."""
read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (
'entity_count',)
fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)
| <mask token>
import logging
from rest_framework import serializers
from resolwe.flow.models import Collection, Data, DescriptorSchema
from resolwe.rest.fields import ProjectableJSONField
from .base import ResolweBaseSerializer
from .descriptor import DescriptorSchemaSerializer
from .fields import DictRelatedField
logger = logging.getLogger(__name__)
class BaseCollectionSerializer(ResolweBaseSerializer):
"""Base serializer for Collection objects."""
settings = ProjectableJSONField(required=False)
descriptor = ProjectableJSONField(required=False)
descriptor_schema = DictRelatedField(queryset=DescriptorSchema.objects.
all(), serializer=DescriptorSchemaSerializer, allow_null=True,
required=False)
data_count = serializers.SerializerMethodField(required=False)
status = serializers.SerializerMethodField(required=False)
def get_data_count(self, collection):
"""Return number of data objects on the collection."""
return collection.data_count if hasattr(collection, 'data_count'
) else collection.data.count()
def get_status(self, collection):
"""Return status of the collection based on the status of data objects.
When collection contains no data objects None is returned.
"""
status_order = [Data.STATUS_ERROR, Data.STATUS_UPLOADING, Data.
STATUS_PROCESSING, Data.STATUS_PREPARING, Data.STATUS_WAITING,
Data.STATUS_RESOLVING, Data.STATUS_DONE]
status_set = set(collection.data_statuses) if hasattr(collection,
'data_statuses') else collection.data.values_list('status',
flat=True).distinct()
if not status_set:
return None
for status in status_order:
if status in status_set:
return status
logger.warning('Could not determine the status of a collection.',
extra={'collection': collection.__dict__})
return None
class Meta:
"""CollectionSerializer Meta options."""
model = Collection
read_only_fields = ('created', 'descriptor_dirty', 'duplicated',
'id', 'modified', 'data_count', 'status')
update_protected_fields = 'contributor',
fields = read_only_fields + update_protected_fields + ('description',
'descriptor', 'descriptor_schema', 'name', 'settings', 'slug',
'tags')
class CollectionSerializer(BaseCollectionSerializer):
"""Serializer for Collection objects."""
entity_count = serializers.SerializerMethodField(required=False)
def get_entity_count(self, collection):
"""Return number of entities on the collection."""
return collection.entity_count if hasattr(collection, 'entity_count'
) else collection.entity_set.count()
class Meta(BaseCollectionSerializer.Meta):
"""CollectionSerializer Meta options."""
read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (
'entity_count',)
fields = BaseCollectionSerializer.Meta.fields + ('entity_count',)
| """Resolwe collection serializer."""
import logging
from rest_framework import serializers
from resolwe.flow.models import Collection, Data, DescriptorSchema
from resolwe.rest.fields import ProjectableJSONField
from .base import ResolweBaseSerializer
from .descriptor import DescriptorSchemaSerializer
from .fields import DictRelatedField
logger = logging.getLogger(__name__)
class BaseCollectionSerializer(ResolweBaseSerializer):
"""Base serializer for Collection objects."""
settings = ProjectableJSONField(required=False)
descriptor = ProjectableJSONField(required=False)
descriptor_schema = DictRelatedField(
queryset=DescriptorSchema.objects.all(),
serializer=DescriptorSchemaSerializer,
allow_null=True,
required=False,
)
data_count = serializers.SerializerMethodField(required=False)
status = serializers.SerializerMethodField(required=False)
def get_data_count(self, collection):
"""Return number of data objects on the collection."""
# Use 'data_count' attribute when available. It is created in the
# BaseCollectionViewSet class.
return (
collection.data_count
if hasattr(collection, "data_count")
else collection.data.count()
)
def get_status(self, collection):
"""Return status of the collection based on the status of data objects.
When collection contains no data objects None is returned.
"""
status_order = [
Data.STATUS_ERROR,
Data.STATUS_UPLOADING,
Data.STATUS_PROCESSING,
Data.STATUS_PREPARING,
Data.STATUS_WAITING,
Data.STATUS_RESOLVING,
Data.STATUS_DONE,
]
# Use 'data_statuses' attribute when available. It is created in the
# BaseCollectionViewSet class. It contains all the distinct statuses of the
# data objects in the collection.
status_set = (
set(collection.data_statuses)
if hasattr(collection, "data_statuses")
else collection.data.values_list("status", flat=True).distinct()
)
if not status_set:
return None
for status in status_order:
if status in status_set:
return status
logger.warning(
"Could not determine the status of a collection.",
extra={"collection": collection.__dict__},
)
return None
class Meta:
"""CollectionSerializer Meta options."""
model = Collection
read_only_fields = (
"created",
"descriptor_dirty",
"duplicated",
"id",
"modified",
"data_count",
"status",
)
update_protected_fields = ("contributor",)
fields = (
read_only_fields
+ update_protected_fields
+ (
"description",
"descriptor",
"descriptor_schema",
"name",
"settings",
"slug",
"tags",
)
)
class CollectionSerializer(BaseCollectionSerializer):
"""Serializer for Collection objects."""
entity_count = serializers.SerializerMethodField(required=False)
def get_entity_count(self, collection):
"""Return number of entities on the collection."""
# Use 'entity_count' attribute when available. It is created in the
# BaseCollectionViewSet class.
return (
collection.entity_count
if hasattr(collection, "entity_count")
else collection.entity_set.count()
)
class Meta(BaseCollectionSerializer.Meta):
"""CollectionSerializer Meta options."""
read_only_fields = BaseCollectionSerializer.Meta.read_only_fields + (
"entity_count",
)
fields = BaseCollectionSerializer.Meta.fields + ("entity_count",)
| [
6,
8,
9,
11,
12
] |
1,450 | 80c3d9165c1b592122fabf6382e265465604989c | <mask token>
class HelloApiHandler(Resource):
<mask token>
<mask token>
| <mask token>
class HelloApiHandler(Resource):
def get(self):
return {'resultStatus': 'SUCCESS', 'message': 'Hello Api Handler'}
<mask token>
| <mask token>
class HelloApiHandler(Resource):
def get(self):
return {'resultStatus': 'SUCCESS', 'message': 'Hello Api Handler'}
def post(self):
print(self)
parser = reqparse.RequestParser()
parser.add_argument('type', type=str)
parser.add_argument('message', type=str)
args = parser.parse_args()
print(args)
request_type = args['type']
request_json = args['message']
ret_status = request_type
ret_msg = request_json
if ret_msg:
message = 'Your Message Requested: {}'.format(ret_msg)
else:
message = 'No Msg'
final_ret = {'status': 'Success', 'message': message}
return final_ret
| from flask_restful import Api, Resource, reqparse
class HelloApiHandler(Resource):
def get(self):
return {'resultStatus': 'SUCCESS', 'message': 'Hello Api Handler'}
def post(self):
print(self)
parser = reqparse.RequestParser()
parser.add_argument('type', type=str)
parser.add_argument('message', type=str)
args = parser.parse_args()
print(args)
request_type = args['type']
request_json = args['message']
ret_status = request_type
ret_msg = request_json
if ret_msg:
message = 'Your Message Requested: {}'.format(ret_msg)
else:
message = 'No Msg'
final_ret = {'status': 'Success', 'message': message}
return final_ret
| from flask_restful import Api, Resource, reqparse
class HelloApiHandler(Resource):
def get(self):
return {
'resultStatus': 'SUCCESS',
'message': "Hello Api Handler"
}
def post(self):
print(self)
parser = reqparse.RequestParser()
parser.add_argument('type', type=str)
parser.add_argument('message', type=str)
args = parser.parse_args()
print(args)
# note, the post req from frontend needs to match the strings here (e.g. 'type and 'message')
request_type = args['type']
request_json = args['message']
# ret_status, ret_msg = ReturnData(request_type, request_json)
# currently just returning the req straight
ret_status = request_type
ret_msg = request_json
if ret_msg:
message = "Your Message Requested: {}".format(ret_msg)
else:
message = "No Msg"
final_ret = {"status": "Success", "message": message}
return final_ret | [
1,
2,
3,
4,
5
] |
1,451 | ed3fbae19c88100690dd5c558c0dc6d36a4849c8 | <mask token>
class NewScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
self.get_weibo(link, index)
time.sleep(5)
self.retrieve_posts(OUTPUT_FILE_NAME)
print('=' * 10)
print('Congratulations! Your data is stored')
return None
<mask token>
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('accessing web data.')
html_doc.write(r.text)
html_doc.close()
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return None
<mask token>
def retrieve_posts(self, outfile_name):
"""(str)->a file
"""
post_text = []
for i in range(50):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),
encoding='utf-8')
frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,
outfile_name), encoding='utf-8')
print('Done')
return None
class ContinueScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
cmd = self.get_weibo(link, index)
if cmd == 'STOP':
break
else:
time.sleep(10)
continue
print('=' * 10)
print('Scrape is now complete. Help me to organize them.')
print(
'View your temp folder, what is the biggest number of the files? \n'
)
fn = int(input())
self.retrieve_posts(fn)
print('=' * 10)
print('Congratulations! Your data is stored')
return
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('Accessing web data.')
html_doc.write(r.text)
html_doc.close()
h_post_text = []
h_frame = pandas.read_csv(OLD_MASTER_FILE)
h_df = DataFrame(h_frame)
for i in h_df.post_text:
h_post_text.append(i)
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
cmd = None
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
if txt == h_post_text[0]:
print(txt)
print(' ___ exists')
print('End of new data.')
del post_link[-1]
del user_link[-1]
del post_time[-1]
del post_txt[-1]
cmd = 'STOP'
break
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return cmd
def retrieve_posts(self, file_number_total):
"""(int)->a file
"""
post_text = []
for i in range(file_number_total + 1):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
frame_1 = pandas.read_csv(OLD_MASTER_FILE)
df1 = DataFrame(frame_1)
for i in df1.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
print('Data gathered.')
print('Temp files removed')
return None
<mask token>
| <mask token>
global QUERY_LINK
<mask token>
global OUTPUT_FILE_NAME
<mask token>
global WORKING_DIR
<mask token>
global OLD_MASTER_FILE
<mask token>
class NewScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
self.get_weibo(link, index)
time.sleep(5)
self.retrieve_posts(OUTPUT_FILE_NAME)
print('=' * 10)
print('Congratulations! Your data is stored')
return None
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('accessing web data.')
html_doc.write(r.text)
html_doc.close()
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return None
def clean_temp(self):
filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
for f in filelist:
os.remove(f)
print('Temp files removed')
return None
def retrieve_posts(self, outfile_name):
"""(str)->a file
"""
post_text = []
for i in range(50):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),
encoding='utf-8')
frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,
outfile_name), encoding='utf-8')
print('Done')
return None
class ContinueScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
cmd = self.get_weibo(link, index)
if cmd == 'STOP':
break
else:
time.sleep(10)
continue
print('=' * 10)
print('Scrape is now complete. Help me to organize them.')
print(
'View your temp folder, what is the biggest number of the files? \n'
)
fn = int(input())
self.retrieve_posts(fn)
print('=' * 10)
print('Congratulations! Your data is stored')
return
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('Accessing web data.')
html_doc.write(r.text)
html_doc.close()
h_post_text = []
h_frame = pandas.read_csv(OLD_MASTER_FILE)
h_df = DataFrame(h_frame)
for i in h_df.post_text:
h_post_text.append(i)
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
cmd = None
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
if txt == h_post_text[0]:
print(txt)
print(' ___ exists')
print('End of new data.')
del post_link[-1]
del user_link[-1]
del post_time[-1]
del post_txt[-1]
cmd = 'STOP'
break
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return cmd
def retrieve_posts(self, file_number_total):
"""(int)->a file
"""
post_text = []
for i in range(file_number_total + 1):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
frame_1 = pandas.read_csv(OLD_MASTER_FILE)
df1 = DataFrame(frame_1)
for i in df1.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
print('Data gathered.')
print('Temp files removed')
return None
print('=' * 10)
print(
"""This program will help you collect Weibo language data as generated by the 中搜 search results.
"""
)
print(
"""Use this page to generate a link for your query item:
http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF"""
)
<mask token>
if resp == 'Y':
print()
print('=' * 10)
print('Initialize scraping now.')
print('=' * 10)
NewScrape().scrape_main()
elif resp == 'N':
OLD_MASTER_FILE = input(
"""
Where is the old txt file you want to merge later? Please paste full path.
> """
)
print()
print('=' * 10)
print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')
print('Initialize scraping now.')
print('=' * 10)
ContinueScrape().scrape_main()
else:
print('Invalid command. Try again.')
| <mask token>
global QUERY_LINK
QUERY_LINK = (
'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'
)
global OUTPUT_FILE_NAME
OUTPUT_FILE_NAME = 'scrape'
global WORKING_DIR
WORKING_DIR = '~/Corpora/'
global OLD_MASTER_FILE
OLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt'
class NewScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
self.get_weibo(link, index)
time.sleep(5)
self.retrieve_posts(OUTPUT_FILE_NAME)
print('=' * 10)
print('Congratulations! Your data is stored')
return None
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('accessing web data.')
html_doc.write(r.text)
html_doc.close()
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return None
def clean_temp(self):
filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
for f in filelist:
os.remove(f)
print('Temp files removed')
return None
def retrieve_posts(self, outfile_name):
"""(str)->a file
"""
post_text = []
for i in range(50):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),
encoding='utf-8')
frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,
outfile_name), encoding='utf-8')
print('Done')
return None
class ContinueScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
cmd = self.get_weibo(link, index)
if cmd == 'STOP':
break
else:
time.sleep(10)
continue
print('=' * 10)
print('Scrape is now complete. Help me to organize them.')
print(
'View your temp folder, what is the biggest number of the files? \n'
)
fn = int(input())
self.retrieve_posts(fn)
print('=' * 10)
print('Congratulations! Your data is stored')
return
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('Accessing web data.')
html_doc.write(r.text)
html_doc.close()
h_post_text = []
h_frame = pandas.read_csv(OLD_MASTER_FILE)
h_df = DataFrame(h_frame)
for i in h_df.post_text:
h_post_text.append(i)
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
cmd = None
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
if txt == h_post_text[0]:
print(txt)
print(' ___ exists')
print('End of new data.')
del post_link[-1]
del user_link[-1]
del post_time[-1]
del post_txt[-1]
cmd = 'STOP'
break
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return cmd
def retrieve_posts(self, file_number_total):
"""(int)->a file
"""
post_text = []
for i in range(file_number_total + 1):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
frame_1 = pandas.read_csv(OLD_MASTER_FILE)
df1 = DataFrame(frame_1)
for i in df1.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
print('Data gathered.')
print('Temp files removed')
return None
print('=' * 10)
print(
"""This program will help you collect Weibo language data as generated by the 中搜 search results.
"""
)
print(
"""Use this page to generate a link for your query item:
http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF"""
)
QUERY_LINK = input("""
Paste your query link
> """)
OUTPUT_FILE_NAME = input(
"""
What's your query term? (This will be used as file name)
> """)
resp = input("""
Is this your first time running this query? Y/N
> """).upper()
if resp == 'Y':
print()
print('=' * 10)
print('Initialize scraping now.')
print('=' * 10)
NewScrape().scrape_main()
elif resp == 'N':
OLD_MASTER_FILE = input(
"""
Where is the old txt file you want to merge later? Please paste full path.
> """
)
print()
print('=' * 10)
print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')
print('Initialize scraping now.')
print('=' * 10)
ContinueScrape().scrape_main()
else:
print('Invalid command. Try again.')
| <mask token>
import requests
from bs4 import BeautifulSoup
from pandas import DataFrame
import time
import pandas
import glob, os
global QUERY_LINK
QUERY_LINK = (
'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'
)
global OUTPUT_FILE_NAME
OUTPUT_FILE_NAME = 'scrape'
global WORKING_DIR
WORKING_DIR = '~/Corpora/'
global OLD_MASTER_FILE
OLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt'
class NewScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
self.get_weibo(link, index)
time.sleep(5)
self.retrieve_posts(OUTPUT_FILE_NAME)
print('=' * 10)
print('Congratulations! Your data is stored')
return None
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('accessing web data.')
html_doc.write(r.text)
html_doc.close()
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return None
def clean_temp(self):
filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
for f in filelist:
os.remove(f)
print('Temp files removed')
return None
def retrieve_posts(self, outfile_name):
"""(str)->a file
"""
post_text = []
for i in range(50):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name),
encoding='utf-8')
frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR,
outfile_name), encoding='utf-8')
print('Done')
return None
class ContinueScrape:
def scrape_main(self):
"""
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
"""
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
cmd = self.get_weibo(link, index)
if cmd == 'STOP':
break
else:
time.sleep(10)
continue
print('=' * 10)
print('Scrape is now complete. Help me to organize them.')
print(
'View your temp folder, what is the biggest number of the files? \n'
)
fn = int(input())
self.retrieve_posts(fn)
print('=' * 10)
print('Congratulations! Your data is stored')
return
def gen_links(self):
links = []
for i in range(1, 51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK, i))
return links
def get_weibo(self, link, index):
"""
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
"""
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w',
encoding='utf8')
r = requests.get(link)
print('Accessing web data.')
html_doc.write(r.text)
html_doc.close()
h_post_text = []
h_frame = pandas.read_csv(OLD_MASTER_FILE)
h_df = DataFrame(h_frame)
for i in h_df.post_text:
h_post_text.append(i)
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name, 'w',
encoding='utf8')
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r',
encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
cmd = None
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'
):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
if txt == h_post_text[0]:
print(txt)
print(' ___ exists')
print('End of new data.')
del post_link[-1]
del user_link[-1]
del post_time[-1]
del post_txt[-1]
cmd = 'STOP'
break
data = {'post_text': post_txt, 'post_link': post_link, 'user':
user_link, 'time': post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print(outfile_name, 'processed complete.')
outfile.close()
html_doc.close()
return cmd
def retrieve_posts(self, file_number_total):
"""(int)->a file
"""
post_text = []
for i in range(file_number_total + 1):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'
.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:
post_text.append(i)
frame_1 = pandas.read_csv(OLD_MASTER_FILE)
df1 = DataFrame(frame_1)
for i in df1.post_text:
post_text.append(i)
data = {'post_text': post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR,
OUTPUT_FILE_NAME), encoding='utf-8')
print('Data gathered.')
print('Temp files removed')
return None
print('=' * 10)
print(
"""This program will help you collect Weibo language data as generated by the 中搜 search results.
"""
)
print(
"""Use this page to generate a link for your query item:
http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF"""
)
QUERY_LINK = input("""
Paste your query link
> """)
OUTPUT_FILE_NAME = input(
"""
What's your query term? (This will be used as file name)
> """)
resp = input("""
Is this your first time running this query? Y/N
> """).upper()
if resp == 'Y':
print()
print('=' * 10)
print('Initialize scraping now.')
print('=' * 10)
NewScrape().scrape_main()
elif resp == 'N':
OLD_MASTER_FILE = input(
"""
Where is the old txt file you want to merge later? Please paste full path.
> """
)
print()
print('=' * 10)
print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')
print('Initialize scraping now.')
print('=' * 10)
ContinueScrape().scrape_main()
else:
print('Invalid command. Try again.')
| '''
Author: Iris Peng. Date: Feb 21, 2016
Usage: Scrape Weibo posts from Zhongsou for the first time for a query
In the terminal, type
$ python3 scrape_weibo.py
and follow the prompts
'''
import requests
from bs4 import BeautifulSoup
from pandas import DataFrame
import time
import pandas
import glob, os
global QUERY_LINK
QUERY_LINK = 'http://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%B1%C6'#link
global OUTPUT_FILE_NAME
OUTPUT_FILE_NAME = 'scrape' # Name of your output file
global WORKING_DIR
WORKING_DIR = '~/Corpora/'
global OLD_MASTER_FILE
OLD_MASTER_FILE = '{}Text_data/'.format(WORKING_DIR) + 'yeshizuile.txt' #Feed the new output
class NewScrape():
def scrape_main(self):
'''
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
'''
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
self.get_weibo(link,index)
time.sleep(5)
self.retrieve_posts(OUTPUT_FILE_NAME)
#self.clean_temp()
print('='*10)
print('Congratulations! Your data is stored')
return None
def gen_links(self):
links = []
for i in range(1,51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK,i))
return links
def get_weibo(self,link,index):
'''
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
'''
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR),'w', encoding = 'utf8')
r = requests.get(link)
print ('accessing web data.')
html_doc.write(r.text)
html_doc.close()
# Write into a csv file
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR) + outfile_name,'w', encoding = 'utf8') #change path
# Turn the text into a BeautifulSoup object and strip down the text.
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR),'r', encoding = 'utf8')#change path
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
data = {'post_text':post_txt,'post_link':post_link,'user':user_link, 'time':post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print (outfile_name,'processed complete.')
outfile.close()
html_doc.close()
return None
def clean_temp(self):
filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
for f in filelist:
os.remove(f)
print('Temp files removed')
return None
def retrieve_posts(self,outfile_name):
'''(str)->a file
'''
post_text = []
for i in range(50):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'.format(WORKING_DIR, str(i)))#change directory
df2 = DataFrame(frame_2)
for i in df2.post_text:#the column'post_text'
post_text.append(i)
data = {'post_text':post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}.txt'.format(WORKING_DIR, outfile_name), encoding = 'utf-8')#change saved path
frame.to_excel('{}Text_data/{}.xlsx'.format(WORKING_DIR, outfile_name), encoding = 'utf-8')#change saved path
print("Done")
return None
class ContinueScrape():
def scrape_main(self):
'''
Top-level function.
Use links from below, scrape a page, sleep for 5s, and restart on the next link.
'''
for i in self.gen_links():
index = str(self.gen_links().index(i))
link = i
cmd = self.get_weibo(link,index)
if cmd == 'STOP':
break
else:
time.sleep(10)
continue
print('='*10)
print('Scrape is now complete. Help me to organize them.')
print ('View your temp folder, what is the biggest number of the files? \n')
fn = int(input())
self.retrieve_posts(fn)
print('='*10)
print('Congratulations! Your data is stored')
return
def gen_links(self):
links = []
for i in range(1,51):
i = str(i)
links.append('{}&b={}'.format(QUERY_LINK,i))
return links
def get_weibo(self,link,index):
'''
Scrape a certain weibio search result page on 'zhongsou' and store it in locally.
'''
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'w', encoding='utf8')
r = requests.get(link)
print ('Accessing web data.')
html_doc.write(r.text)
html_doc.close()
# Retrieve scrape history
h_post_text = []
h_frame = pandas.read_csv(OLD_MASTER_FILE)
h_df = DataFrame(h_frame)
for i in h_df.post_text:
h_post_text.append(i)
# Write into a csv file
outfile_name = 'zhongsou_results_page_' + index + '.csv'
outfile = open('{}Temp/'.format(WORKING_DIR)+ outfile_name,'w', encoding = 'utf8') #change path
# Turn the text into a BeautifulSoup object and strip down the text.
html_doc = open('{}Temp/weibo.txt'.format(WORKING_DIR), 'r', encoding='utf8')
soup = BeautifulSoup(html_doc)
user_link = []
post_txt = []
post_link = []
post_time = []
cmd = None
weibo_items = soup.find_all('div', class_='weibo_item')
for item in weibo_items:
for link in item.find_all('a', target='_blank', class_='sina_weibo'):
url = link.get('href')
post_link.append(url)
for post in item.find_all('h3', class_='weibo_title'):
for a in post.find_all('a'):
url = a.get('href')
user_link.append(url)
for time in item.find_all('div', class_='weibo_time'):
txt = time.get_text()
post_time.append(txt)
for post in item.find_all('p', class_='weibo_txt'):
txt = post.get_text()
post_txt.append(txt)
#has bugs!
#if txt in h_post_text:
if txt == h_post_text[0]:
print (txt)
print(' ___ exists')
print ('End of new data.') #Doesn't affect main function, break should be in main function
del post_link[-1]
del user_link[-1]
del post_time[-1]
del post_txt[-1]
cmd = 'STOP'
break
data = {'post_text':post_txt,'post_link':post_link,'user':user_link, 'time':post_time}
frame = DataFrame(data)
frame.to_csv(outfile, encoding='utf-8')
print (outfile_name,'processed complete.')
outfile.close()
html_doc.close()
return cmd
def retrieve_posts(self,file_number_total):
'''(int)->a file
'''
post_text = []
for i in range(file_number_total+1):
frame_2 = pandas.read_csv('{}Temp/zhongsou_results_page_{}.csv'.format(WORKING_DIR, str(i)))
df2 = DataFrame(frame_2)
for i in df2.post_text:#the column'post_text'
post_text.append(i)
frame_1 = pandas.read_csv(OLD_MASTER_FILE)
df1 = DataFrame(frame_1)
for i in df1.post_text:
post_text.append(i)
data = {'post_text':post_text}
frame = DataFrame(data)
frame.to_csv('{}Text_data/{}_2.txt'.format(WORKING_DIR, OUTPUT_FILE_NAME), encoding = 'utf-8')#saved path
frame.to_excel('{}Text_data/{}_2.xlsx'.format(WORKING_DIR, OUTPUT_FILE_NAME), encoding = 'utf-8')#saved path
print("Data gathered.")
## filelist = glob.glob('{}Temp/*'.format(WORKING_DIR))
## for f in filelist:
## os.remove(f)
#os.remove(OLD_MASTER_FILE)
print('Temp files removed')
return None
print('='*10)
print('This program will help you collect Weibo language data as generated by the 中搜 search results.\n')
print('Use this page to generate a link for your query item:\n\nhttp://t.zhongsou.com/wb?form_id=1&org=1&sel=0&so=1&v=%D6%D0%CB%D1&w=%CD%F8%D3%EF')
QUERY_LINK = input('\nPaste your query link \n> ')
OUTPUT_FILE_NAME = input('\nWhat\'s your query term? (This will be used as file name)\n> ')
resp = input('\nIs this your first time running this query? Y/N\n> ').upper()
if resp == 'Y':
print()
print('='*10)
print('Initialize scraping now.')
print('='*10)
NewScrape().scrape_main()
elif resp == 'N':
OLD_MASTER_FILE = input('\nWhere is the old txt file you want to merge later? Please paste full path. \n> ')
print()
print('='*10)
print('WARNING: FURTHER ACTIONS NEEDED AT THE END OF SCRAPING.')
print('Initialize scraping now.')
print('='*10)
ContinueScrape().scrape_main()
else:
print('Invalid command. Try again.')
| [
9,
12,
13,
14,
15
] |
1,452 | feb912ac899208618f00c894458c1fda7a402652 | <mask token>
def possi(y, x):
global n
if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:
return False
return True
def move(d, ay, ax, by, bx):
ay += D[d][0]
by += D[d][0]
ax += D[d][1]
bx += D[d][1]
if possi(ay, ax) and possi(by, bx):
return True
return False
def rotate(y, x, tail, dest):
if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[
dest][0], x + D[tail][1] + D[dest][1]):
return True
return False
<mask token>
def solution(b):
global n, B
B = [el[:] for el in b]
n = len(b)
dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]
dp[0][1][3] = 0
answer = bfs(dp)
return answer
| <mask token>
def possi(y, x):
global n
if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:
return False
return True
def move(d, ay, ax, by, bx):
ay += D[d][0]
by += D[d][0]
ax += D[d][1]
bx += D[d][1]
if possi(ay, ax) and possi(by, bx):
return True
return False
def rotate(y, x, tail, dest):
if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[
dest][0], x + D[tail][1] + D[dest][1]):
return True
return False
def bfs(dp):
q = deque()
q.append((0, 1, 3, 0))
while q:
y, x, tail, step = q.popleft()
ty, tx = y + D[tail][0], x + D[tail][1]
for d in range(4):
if move(d, y, x, ty, tx):
ny = y + D[d][0]
nx = x + D[d][1]
if step + 1 < dp[ny][nx][tail]:
dp[ny][nx][tail] = step + 1
q.append((ny, nx, tail, step + 1))
if d % 2 == tail % 2:
continue
if rotate(y, x, tail, d):
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append((y, x, d, step + 1))
y, x, ty, tx = ty, tx, y, x
if rotate(y, x, (tail + 2) % 4, d):
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append((y, x, d, step + 1))
y, x, ty, tx = ty, tx, y, x
global up, down, right, left
cs = [1000000000.0] * 4
cs[0] = dp[n - 1][n - 1][up]
cs[1] = dp[n - 1][n - 1][left]
cs[2] = dp[n - 2][n - 1][down]
cs[3] = dp[n - 1][n - 2][right]
return min(cs)
def solution(b):
global n, B
B = [el[:] for el in b]
n = len(b)
dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]
dp[0][1][3] = 0
answer = bfs(dp)
return answer
| <mask token>
n = -1
D = [(-1, 0), (0, 1), (1, 0), (0, -1)]
B = -1
up = 0
right = 1
down = 2
left = 3
dic = {}
dic[0] = 'up'
dic[1] = 'right'
dic[2] = 'down'
dic[3] = 'left'
def possi(y, x):
global n
if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:
return False
return True
def move(d, ay, ax, by, bx):
ay += D[d][0]
by += D[d][0]
ax += D[d][1]
bx += D[d][1]
if possi(ay, ax) and possi(by, bx):
return True
return False
def rotate(y, x, tail, dest):
if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[
dest][0], x + D[tail][1] + D[dest][1]):
return True
return False
def bfs(dp):
q = deque()
q.append((0, 1, 3, 0))
while q:
y, x, tail, step = q.popleft()
ty, tx = y + D[tail][0], x + D[tail][1]
for d in range(4):
if move(d, y, x, ty, tx):
ny = y + D[d][0]
nx = x + D[d][1]
if step + 1 < dp[ny][nx][tail]:
dp[ny][nx][tail] = step + 1
q.append((ny, nx, tail, step + 1))
if d % 2 == tail % 2:
continue
if rotate(y, x, tail, d):
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append((y, x, d, step + 1))
y, x, ty, tx = ty, tx, y, x
if rotate(y, x, (tail + 2) % 4, d):
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append((y, x, d, step + 1))
y, x, ty, tx = ty, tx, y, x
global up, down, right, left
cs = [1000000000.0] * 4
cs[0] = dp[n - 1][n - 1][up]
cs[1] = dp[n - 1][n - 1][left]
cs[2] = dp[n - 2][n - 1][down]
cs[3] = dp[n - 1][n - 2][right]
return min(cs)
def solution(b):
global n, B
B = [el[:] for el in b]
n = len(b)
dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]
dp[0][1][3] = 0
answer = bfs(dp)
return answer
| from collections import deque
n = -1
D = [(-1, 0), (0, 1), (1, 0), (0, -1)]
B = -1
up = 0
right = 1
down = 2
left = 3
dic = {}
dic[0] = 'up'
dic[1] = 'right'
dic[2] = 'down'
dic[3] = 'left'
def possi(y, x):
global n
if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:
return False
return True
def move(d, ay, ax, by, bx):
ay += D[d][0]
by += D[d][0]
ax += D[d][1]
bx += D[d][1]
if possi(ay, ax) and possi(by, bx):
return True
return False
def rotate(y, x, tail, dest):
if possi(y + D[dest][0], x + D[dest][1]) and possi(y + D[tail][0] + D[
dest][0], x + D[tail][1] + D[dest][1]):
return True
return False
def bfs(dp):
q = deque()
q.append((0, 1, 3, 0))
while q:
y, x, tail, step = q.popleft()
ty, tx = y + D[tail][0], x + D[tail][1]
for d in range(4):
if move(d, y, x, ty, tx):
ny = y + D[d][0]
nx = x + D[d][1]
if step + 1 < dp[ny][nx][tail]:
dp[ny][nx][tail] = step + 1
q.append((ny, nx, tail, step + 1))
if d % 2 == tail % 2:
continue
if rotate(y, x, tail, d):
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append((y, x, d, step + 1))
y, x, ty, tx = ty, tx, y, x
if rotate(y, x, (tail + 2) % 4, d):
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append((y, x, d, step + 1))
y, x, ty, tx = ty, tx, y, x
global up, down, right, left
cs = [1000000000.0] * 4
cs[0] = dp[n - 1][n - 1][up]
cs[1] = dp[n - 1][n - 1][left]
cs[2] = dp[n - 2][n - 1][down]
cs[3] = dp[n - 1][n - 2][right]
return min(cs)
def solution(b):
global n, B
B = [el[:] for el in b]
n = len(b)
dp = [[([1000000000.0] * 4) for _ in range(n)] for _ in range(n)]
dp[0][1][3] = 0
answer = bfs(dp)
return answer
| from collections import deque
n = -1
D = [(-1 , 0) , (0 , 1) , (1 , 0) , (0 , -1)]
B = -1
up = 0
right = 1
down = 2
left = 3
dic = {}
dic[0] = 'up'
dic[1] = 'right'
dic[2] = 'down'
dic[3] = 'left'
def possi(y , x):
global n
if y < 0 or y >= n or x < 0 or x >= n or B[y][x]:
return False
return True
def move(d , ay , ax , by , bx):
ay += D[d][0]
by += D[d][0]
ax += D[d][1]
bx += D[d][1]
if possi(ay , ax) and possi(by , bx):
return True
return False
def rotate(y , x , tail , dest):
# 목적지, 대각선 검사
if possi(y + D[dest][0] , x + D[dest][1]) and possi(y + D[tail][0] + D[dest][0] , x + D[tail][1] + D[dest][1] ):
return True
return False
def bfs(dp):
q = deque()
# q.append((y , x , tail , step))
q.append( (0 , 1 , 3 , 0) )
while q:
y , x , tail , step = q.popleft()
ty , tx = y + D[tail][0] , x + D[tail][1]
for d in range(4):
if move(d , y , x , ty , tx):
ny = y + D[d][0]
nx = x + D[d][1]
if step + 1 < dp[ny][nx][tail]: # 위치 바뀜 , 꼬리 같음
dp[ny][nx][tail] = step + 1
q.append( (ny , nx , tail , step + 1) )
if d % 2 == tail % 2: # 자신이거나 180는 안돌음
continue
#rotate(ori_tail , new_tail)
if rotate(y , x , tail , d): # if possi(ry , rx) and possi( (ry + ty) // 2, (ry + tx) // 2): # 꼬리가 가는 방향 , 대각선 ??
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append( (y , x , d , step + 1) ) # 위치 같음 , 꼬리 바뀜
# 머리 꼬리 스왑
y , x , ty , tx = ty , tx , y , x
if rotate(y , x , (tail + 2) % 4 , d): # 이 함수 바꿔주다 스왑 까먹음
if step + 1 < dp[y][x][d]:
dp[y][x][d] = step + 1
q.append( (y , x , d , step + 1) ) # 위치 같음 , 꼬리 바뀜
y , x , ty , tx = ty , tx , y , x
global up , down , right , left
cs = [1e9] * 4
cs[0] = dp[n-1][n-1][up]
cs[1] = dp[n-1][n-1][left]
cs[2] = dp[n-2][n-1][down]
cs[3] = dp[n-1][n-2][right]
#print(cs)
return min(cs)
def solution(b):
global n , B
B = [el[:] for el in b]
n = len(b)
dp = [ [ [1e9] * 4 for _ in range (n)] for _ in range(n)]
# dir: 꼬리가 위, 오, 아, 왼
dp[0][1][3] = 0
answer = bfs(dp)
return answer
#print(solution([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 1, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 1], [0, 0, 1, 0, 0, 0, 0]]))
#print(solution( [[0, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0, 0]]))
#print(solution([[0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0]])) | [
4,
5,
6,
7,
8
] |
1,453 | efc0b8f1c4887810a9c85e34957d664b01c1e92e | <mask token>
| <mask token>
for i in range(1, N + 1, 1):
NUM = int(input('ingrese un numero entero '))
if NUM > 0:
SP += NUM
CP += 1
else:
SO += NUM
<mask token>
print(
f'hay {CP} numeros positivos, el promedio general es de {PG} y el promedio de los numeros positivos es de {PP}'
)
| N = int(input('ingrese el numero de datos a ingresar '))
SP = 0
SO = 0
CP = 0
for i in range(1, N + 1, 1):
NUM = int(input('ingrese un numero entero '))
if NUM > 0:
SP += NUM
CP += 1
else:
SO += NUM
PG = (SP + SO) / N
PP = SP / CP
print(
f'hay {CP} numeros positivos, el promedio general es de {PG} y el promedio de los numeros positivos es de {PP}'
)
| N = int(input("ingrese el numero de datos a ingresar "))
SP = 0
SO = 0
CP = 0
for i in range(1,N+1,1):
NUM = int(input("ingrese un numero entero "))
if NUM > 0:
SP += NUM
CP += 1
else:
SO += NUM
PG = (SP+SO)/N
PP = SP/CP
print(f"hay { CP } numeros positivos, el promedio general es de { PG } y el promedio de los numeros positivos es de { PP }")
| null | [
0,
1,
2,
3
] |
1,454 | 3240a7fb9fbd5cd84165e68f8406e0a146c2b6b6 | #!/usr/bin/python
# coding:utf-8
#
#这个脚本主要是对apache日志文件的处理分析,过滤出需要的信息
#处理后得到的数据是: 主机IP:192.168.14.44 访问流量:814 K
#使用说明 python 脚本名 文件名; eg:python python.analysis.apachelog.py access.log
#
# by wangdd 2016/02/02
#
import os
import re
import sys
import shelve
#re 模块,利用re模块对apahce日志进行分析
#通过 re.match(……) 和 re.compile(……).match返回
# 该对象有如下方法和属性:
# 方法:
# group( [group1, ...])
# groups( [default])
# groupdict( [default])
# start( [group])
# end( [group])
#
# apache 日志格式: 192.168.47.82 - - [17/Dec/2014:16:41:03 +0800] "GET /application/account/loginIndex.htm HTTP/1.1" 200 56273
#
#基本思路是,利用re模块进行正则匹配,过滤出对应IP的访问字节数,然后把数据保存到apache_log.db数据库中,最后进行数据的格式
log_line_re = re.compile(r'''(?P<remote_host>^\d{1,3}\.(\d{1,3}\.){2}\d{1,3})
\s+
(?P<log_name>\S+)
\s+
(?P<login_user>\S+)
\s+
(?P<time>\[.*\])
\s+
".*"
\s+
(?P<status>\d+)
\s+
(?P<bytes_sent>-|\d+)
''',re.X)
#利用正则表达过滤出需要的数据,返回一个字典类型的数据
def logline(line):
m = log_line_re.search(line)
if m:
groupdict = m.groupdict()
if groupdict['bytes_sent'] == '-':
groupdict['bytes_sent'] = '0'
return groupdict
else:
return {'remote_host':None,'status':None,'bytes_sent':"0",}
#从获取的字典中得到需要的数据
def log_report(logfile):
report_dict ={}
for line in logfile:
line_dict = logline(line)
try:
bytes_sent = int(line_dict['bytes_sent'])
except ValueError:
continue
report_dict.setdefault(line_dict['remote_host'],[]).append(bytes_sent)
for k,v in report_dict.iteritems():
sum = 0
if k != None:
for data in v:
sum = sum +data
print '主机IP:%s\t 访问流量:%s K' % (k,sum/1024)
#这个函数是把处理后的数据保存到data.db文件中,利用了shelv 模块
def store_data(file):
shelv_file = shelve.open('apache_log.db')
if not os.path.isfile('shelv_file'):
for line in file:
d_line = logline(line)
shelv_file[d_line['remote_host']] = \
shelv_file.setdefault(d_line['remote_host'],0) + \
int (d_line['bytes_sent'])
data_file.close()
shelv_file.close()
if __name__ == '__main__':
if not len(sys.argv) >1:
print __doc__
sys.exit(1)
infile_name = sys.argv[1]
try:
infile = open(infile_name,'r')
except IOError:
print "please input some file"
print __doc__
sys.exit(1)
log_report(infile)
store_data(infile)
infile.close()
#--------------------------------------------------------------------
| null | null | null | null | [
0
] |
1,455 | 3ffbef142d8fb53b734567ebea874f9c59ff9a9e | <mask token>
| <mask token>
print(num1)
print(num2)
print(add)
print(sub)
print(mul)
print(div)
print(mod)
print(exp)
print(fd)
| num1 = 101
num2 = 20
add = num1 + num2
sub = num1 - num2
mul = num1 * num2
div = num1 / num2
mod = num1 % num2
exp = num1 ** num2
fd = num1 // num2
print(num1)
print(num2)
print(add)
print(sub)
print(mul)
print(div)
print(mod)
print(exp)
print(fd)
| null | null | [
0,
1,
2
] |
1,456 | 4d4f7db6d5b4ed7eac3ced73aca76d3c952c84f4 | <mask token>
| <mask token>
def parse_rule(rules: dict, rule_str: str=None) ->Rule:
if rule_str is None:
rule_str: str = rules[0]
if '"' in rule_str:
return CharacterMatch(rule_str.strip('"'))
elif '|' in rule_str:
or_rules = [parse_rule(rules, part.strip()) for part in rule_str.
split('|')]
return OrRule(*or_rules)
elif ' ' in rule_str:
and_rules = [parse_rule(rules, part.strip()) for part in rule_str.
split(' ')]
return ListRule(*and_rules)
elif rule_str.strip().isnumeric():
return parse_rule(rules, rules.get(int(rule_str)))
else:
print(f'WUT? {rule_str}')
<mask token>
| <mask token>
def parse_rule(rules: dict, rule_str: str=None) ->Rule:
if rule_str is None:
rule_str: str = rules[0]
if '"' in rule_str:
return CharacterMatch(rule_str.strip('"'))
elif '|' in rule_str:
or_rules = [parse_rule(rules, part.strip()) for part in rule_str.
split('|')]
return OrRule(*or_rules)
elif ' ' in rule_str:
and_rules = [parse_rule(rules, part.strip()) for part in rule_str.
split(' ')]
return ListRule(*and_rules)
elif rule_str.strip().isnumeric():
return parse_rule(rules, rules.get(int(rule_str)))
else:
print(f'WUT? {rule_str}')
if __name__ == '__main__':
with open('input.txt', 'rt') as puzzle:
rules = dict()
while True:
line = puzzle.readline().strip()
if not line:
break
number, rule = line.split(': ')
rules[int(number)] = rule
inputs = []
while True:
line = puzzle.readline().strip()
if not line:
break
inputs.append(line)
rule = parse_rule(rules)
matches = sum([is_match for is_match, left in [rule.match(_input) for
_input in inputs] if not left])
print(f'number of matching messages: {matches}')
| from day19.rules import Rule, CharacterMatch, OrRule, ListRule
def parse_rule(rules: dict, rule_str: str=None) ->Rule:
if rule_str is None:
rule_str: str = rules[0]
if '"' in rule_str:
return CharacterMatch(rule_str.strip('"'))
elif '|' in rule_str:
or_rules = [parse_rule(rules, part.strip()) for part in rule_str.
split('|')]
return OrRule(*or_rules)
elif ' ' in rule_str:
and_rules = [parse_rule(rules, part.strip()) for part in rule_str.
split(' ')]
return ListRule(*and_rules)
elif rule_str.strip().isnumeric():
return parse_rule(rules, rules.get(int(rule_str)))
else:
print(f'WUT? {rule_str}')
if __name__ == '__main__':
with open('input.txt', 'rt') as puzzle:
rules = dict()
while True:
line = puzzle.readline().strip()
if not line:
break
number, rule = line.split(': ')
rules[int(number)] = rule
inputs = []
while True:
line = puzzle.readline().strip()
if not line:
break
inputs.append(line)
rule = parse_rule(rules)
matches = sum([is_match for is_match, left in [rule.match(_input) for
_input in inputs] if not left])
print(f'number of matching messages: {matches}')
| null | [
0,
1,
2,
3
] |
1,457 | 92dea316889192824c353002670cdcf03dfbcd4c | <mask token>
| <mask token>
print(effort)
| <mask token>
size, k = map(int, input().split())
parcel = list(map(int, input().split()))
effort = 2 * parcel[k - 1] * min(parcel) + max(parcel) * min(parcel)
print(effort)
| #Question:
"""
The parcel section of the Head Post Office is in a mess. The parcels that need to be loaded to the vans have been lined up in a row in an arbitrary order of weights. The Head Post Master wants them to be sorted in the increasing order of the weights of the parcels, with one exception. He wants the heaviest (and presumably the most valuable) parcel kept nearest his office.
You and your friend try to sort these boxes and you decide to sort them by interchanging two boxes at a time. Such an interchange needs effort equal to the product of the weights of the two boxes.
The objective is to reposition the boxes as required with minimum effort.
Input Format:
The first line consists of two space-separated positive integers giving the number of boxes (N) and the position of the Head Post Masters office (k) where the heaviest box must be.
The second line consists of N space-separated positive integers giving the weights of the boxes. You may assume that no two weights are equal
Output Format:
The output is one line giving the total effort taken to get the boxes in sorted order, and the heaviest in position k.
Constraints:
N<=50 and Weights <= 1000
Sample Input 1:
5 2
20 50 30 80 70
Sample Output 1:
3600
"""
#Solution:
size,k = map(int,input().split())
parcel = list(map(int,input().split()))
effort = 2*parcel[k-1]*min(parcel) + max(parcel)*min(parcel)
print(effort) | null | [
0,
1,
2,
3
] |
1,458 | e9c81be79d9107433e00182c27488e64f1ca779f | <mask token>
| <mask token>
class Command(BaseCommand):
<mask token>
def handle(self, *args, **options):
task_scheduler.start()
| <mask token>
class Command(BaseCommand):
"""
启动BanBanTong.tasks定时任务
"""
def handle(self, *args, **options):
task_scheduler.start()
| from django.core.management.base import BaseCommand
from BanBanTong.utils import task_scheduler
class Command(BaseCommand):
"""
启动BanBanTong.tasks定时任务
"""
def handle(self, *args, **options):
task_scheduler.start()
| #!/usr/bin/env python
# coding=utf-8
from django.core.management.base import BaseCommand
from BanBanTong.utils import task_scheduler
class Command(BaseCommand):
'''
启动BanBanTong.tasks定时任务
'''
def handle(self, *args, **options):
task_scheduler.start()
| [
0,
2,
3,
4,
5
] |
1,459 | d437d77d5a57a6f2f4a2d530be05c3845dce93bc | <mask token>
class Detailedreservation(RetrieveUpdateDestroyAPIView):
<mask token>
<mask token>
| <mask token>
class ListReservation(ListCreateAPIView):
<mask token>
<mask token>
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
| <mask token>
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id=bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
| <mask token>
@api_view(['GET'])
def find_flight(request):
bodyData = request.data
req_flight = Flight.objects.filter(departureCity=bodyData[
'departureCity'], arrivalCity=bodyData['arrivalCity'],
dateOfDeparture=bodyData['dateOfDeparture'])
serialized_flight = FlightSerializer(req_flight, many=True)
return Response(serialized_flight.data)
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id=bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
| from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from .models import Flight, Passenger, Reservation
from .serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
# Function Based Views Below
@api_view(['GET'])
def find_flight(request):
bodyData = request.data
req_flight = Flight.objects.filter(
departureCity = bodyData['departureCity'],
arrivalCity = bodyData['arrivalCity'],
dateOfDeparture = bodyData['dateOfDeparture']
)
serialized_flight = FlightSerializer(req_flight, many=True)
return Response(serialized_flight.data)
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id= bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
# Non Primary based Operations Below
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
# Primary Key based Operation Below
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer | [
1,
7,
13,
14,
16
] |
1,460 | 2cc9f8c476026311456857d3395a14a45e2f4b80 | <mask token>
| <mask token>
p.add_argument('--foo', action='store_true')
<mask token>
print(args.foo)
| <mask token>
p = argparse.ArgumentParser()
p.add_argument('--foo', action='store_true')
args = p.parse_args()
print(args.foo)
| import argparse
p = argparse.ArgumentParser()
p.add_argument('--foo', action='store_true')
args = p.parse_args()
print(args.foo)
| import argparse
p = argparse.ArgumentParser()
p.add_argument("--foo", action="store_true")
args = p.parse_args()
print(args.foo)
| [
0,
1,
2,
3,
4
] |
1,461 | 0d9c50e55df5aa5614bd5a9679729cf7fa69c5df | <mask token>
| <mask token>
if __name__ == '__main__':
url_arg = sys.argv[1]
email = sys.argv[2]
params = {'email': email}
response = requests.post(url_arg, data=params)
print(response.text)
| <mask token>
import requests
import sys
if __name__ == '__main__':
url_arg = sys.argv[1]
email = sys.argv[2]
params = {'email': email}
response = requests.post(url_arg, data=params)
print(response.text)
| #!/usr/bin/python3
"""takes in a URL and an email address, sends a POST request to the passed
URL with the email as a parameter, and finally
displays the body of the response.
"""
import requests
import sys
if __name__ == "__main__":
url_arg = sys.argv[1]
email = sys.argv[2]
params = {'email': email}
response = requests.post(url_arg, data=params)
print(response.text)
| null | [
0,
1,
2,
3
] |
1,462 | 38c21fb959d8b98b616006ea48bd720cc6f9995c | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('digressions', '0004_auto_20180303_1158')]
operations = [migrations.RemoveField(model_name='extraits', name=
'extraits_livre_id'), migrations.AddField(model_name='extraits',
name='extraits_livre_id', field=models.ForeignKey(default=
'du coté de chez Swann', on_delete=django.db.models.deletion.
CASCADE, to='digressions.Livre'), preserve_default=False)]
| from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('digressions', '0004_auto_20180303_1158')]
operations = [migrations.RemoveField(model_name='extraits', name=
'extraits_livre_id'), migrations.AddField(model_name='extraits',
name='extraits_livre_id', field=models.ForeignKey(default=
'du coté de chez Swann', on_delete=django.db.models.deletion.
CASCADE, to='digressions.Livre'), preserve_default=False)]
| # Generated by Django 2.0 on 2018-03-06 16:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('digressions', '0004_auto_20180303_1158'),
]
operations = [
migrations.RemoveField(
model_name='extraits',
name='extraits_livre_id',
),
migrations.AddField(
model_name='extraits',
name='extraits_livre_id',
field=models.ForeignKey(default='du coté de chez Swann', on_delete=django.db.models.deletion.CASCADE, to='digressions.Livre'),
preserve_default=False,
),
]
| [
0,
1,
2,
3,
4
] |
1,463 | 760a62a94347171eb9e40015c0c43d72df8f4fc8 | <mask token>
| <mask token>
def setup(app: Application):
app.register_run_task(multiply)
| <mask token>
def multiply():
print('multiply', 2 * 2)
def setup(app: Application):
app.register_run_task(multiply)
| from unv.app.base import Application
def multiply():
print('multiply', 2 * 2)
def setup(app: Application):
app.register_run_task(multiply)
| null | [
0,
1,
2,
3
] |
1,464 | d0a73385db0dd6f729d267095ef83b9fec72e40c | <mask token>
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
<mask token>
| <mask token>
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
def downgrade():
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength',
'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
| <mask token>
revision = 'a07768b0d4c0'
down_revision = 'a80cd9a35e58'
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
def downgrade():
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength',
'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
| <mask token>
from alembic import op
import sqlalchemy as sa
revision = 'a07768b0d4c0'
down_revision = 'a80cd9a35e58'
branch_labels = None
depends_on = None
def upgrade():
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name',
'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(),
nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength',
'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
def downgrade():
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength',
'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
| """product_ingredient unique constraint
Revision ID: a07768b0d4c0
Revises: a80cd9a35e58
Create Date: 2017-05-18 11:39:52.258266
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a07768b0d4c0'
down_revision = 'a80cd9a35e58'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('_unique_name_unit', 'ingredient', ['name', 'unit'])
op.create_unique_constraint(None, 'product', ['nappi_code'])
op.add_column('product_ingredient', sa.Column('strength', sa.String(), nullable=True))
op.create_unique_constraint('_unique_product_ingredient_strength', 'product_ingredient', ['product_id', 'ingredient_id', 'strength'])
op.drop_column('product_ingredient', 'stength')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('product_ingredient', sa.Column('stength', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint('_unique_product_ingredient_strength', 'product_ingredient', type_='unique')
op.drop_column('product_ingredient', 'strength')
op.drop_constraint(None, 'product', type_='unique')
op.drop_constraint('_unique_name_unit', 'ingredient', type_='unique')
# ### end Alembic commands ###
| [
1,
2,
3,
4,
5
] |
1,465 | ddabceb223f4e457a0f69af5abf793ae72e5f432 | <mask token>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
<mask token>
| <mask token>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
<mask token>
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders) - 2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +
'.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t'
)
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir + '/' + f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE
.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
summary_data = pd.merge(summary_data,
match_collapsed_data, on='SEQUENCE', sort=False,
how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE': []})
time_end = time.time()
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.
SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))
| <mask token>
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data = pd.read_csv(manifest_file, header='infer', sep=',')
<mask token>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders) - 2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +
'.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t'
)
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir + '/' + f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE
.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
summary_data = pd.merge(summary_data,
match_collapsed_data, on='SEQUENCE', sort=False,
how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE': []})
time_end = time.time()
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.
SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))
| import os
import os.path
import numpy as np
import pandas as pd
import collections
import subprocess
from pathlib import Path
import time
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data = pd.read_csv(manifest_file, header='infer', sep=',')
<mask token>
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample) - 1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (
manifest_data['NAME'] == name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders) - 2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +
'.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t'
)
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE': []})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir + '/' + f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE
.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header=
'infer', sep='\t')
summary_data = pd.merge(summary_data,
match_collapsed_data, on='SEQUENCE', sort=False,
how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE': []})
time_end = time.time()
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.
SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))
| ## PURPOSE: get reads for certain motifs across certain tumors
## INPUT: manifest data all-tumor-manifest.csv
## collapsed fastq files sample.converted.unpaired.fastq.collapsed
## OUTPUT: table containing reads for specific motif across samples motif.tumor.common.reads.fastq.collapsed.summary.tsv
import os
import os.path
import numpy as np
import pandas as pd
import collections
import subprocess
from pathlib import Path
import time
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data =pd.read_csv(manifest_file, header='infer', sep=',')
'''
file = base_path + 'TARGET/TARGET-manifest.csv'
data = pd.read_csv(file, header='infer', sep=',')
data['DISEASE.ABBV'] = 'TARGET'
manifest_data = pd.concat([manifest_data, data])
print(manifest_data.shape)
manifest_data.to_csv(manifest_file, sep=',', index=False)
'''
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample)-1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV']==tumor) & (manifest_data['NAME']==name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders)-2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = base_path + 'motif_reads/' + mirna + '/' + motif + '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv'
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE':[]})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE':[]})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
#matched_seq = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir+'/'+f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
#print(collapsed_data.shape[0])
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
#print(collapsed_data.shape[0])
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data #pd.DataFrame(columns = ['READS', 'SEQUENCE'])
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
summary_data = pd.merge(summary_data, match_collapsed_data, on='SEQUENCE', sort=False, how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE':[]})
time_end = time.time()
#print('TUMOR: ' + tumor + ' SAMPLE: ' + str(patient) + ' TOTAL TIME: ' + str((time_end-time_start)/60) + ' ROWS: ' + str(num_rows))
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end-total_time_start))
| [
2,
3,
4,
5,
6
] |
1,466 | e2feb12b88babbbfa4cc8447c91e8a5b6c30f78b | <mask token>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<mask token>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
<mask token>
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
<mask token>
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
<mask token>
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
<mask token>
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
<mask token>
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<mask token>
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
<mask token>
def testPageUnloockAll():
api.unlockAll()
<mask token>
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<mask token>
| <mask token>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<mask token>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
<mask token>
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId, srcLoc, locId, cartId):
try:
c = api.mission(agvId, 2)
if c:
checkCart(cartId, c)
api.move(agvId, srcLoc + '.2')
except Exception as e:
pass
finally:
unlockStockA(agvId, srcLoc)
loc, type = api.getMissionType('get', '', srcLoc)
api.mission(agvId, type)
loc, type = api.getMissionType('put', srcLoc, locId)
api.move(agvId, loc + '.3')
api.mission(agvId, type)
lockStockA(agvId, locId)
try:
api.move(agvId, locId + '.4')
api.mission(agvId, 5)
api.move(agvId, locId + '.5')
finally:
unlockStockA(agvId, locId)
freeAgv(agvId)
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
def move(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
try:
locId = getOriginPoint(locId)
_run(func=api.move, args=(agvId, locId), callback=finishCallback,
obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
<mask token>
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
def testgetStockA():
assert getStockA('stockA_row10_col3') == 9003
assert getStockA('stockA_row10_col4') == 9004
assert getStockA('stockA_row1_col1') == 1001
assert getStockA('stockA_row2_col2') == 1002
assert getStockA('stockA_row3_col2') == 3002
assert getStockA('stockA_row4_col2') == 3002
assert getStockA('stockA_row4_col2.1') == 3002
assert getStockA('stockB_row4_col2.1') == None
assert getStockA('begin_1') == None
assert getStockA('seat_1') == None
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<mask token>
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
<mask token>
def testPageUnloockAll():
api.unlockAll()
<mask token>
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<mask token>
| <mask token>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<mask token>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
def _call(agvId, locId):
if api.isCartLoc(locId):
api.move(agvId, locId + '.1')
lockStockA(agvId, locId)
try:
api.mission(agvId, 1)
except Exception as e:
unlockStockA(agvId, locId)
raise e
else:
api.move(agvId, locId)
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId, srcLoc, locId, cartId):
try:
c = api.mission(agvId, 2)
if c:
checkCart(cartId, c)
api.move(agvId, srcLoc + '.2')
except Exception as e:
pass
finally:
unlockStockA(agvId, srcLoc)
loc, type = api.getMissionType('get', '', srcLoc)
api.mission(agvId, type)
loc, type = api.getMissionType('put', srcLoc, locId)
api.move(agvId, loc + '.3')
api.mission(agvId, type)
lockStockA(agvId, locId)
try:
api.move(agvId, locId + '.4')
api.mission(agvId, 5)
api.move(agvId, locId + '.5')
finally:
unlockStockA(agvId, locId)
freeAgv(agvId)
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
def move(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
try:
locId = getOriginPoint(locId)
_run(func=api.move, args=(agvId, locId), callback=finishCallback,
obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
<mask token>
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
def testgetStockA():
assert getStockA('stockA_row10_col3') == 9003
assert getStockA('stockA_row10_col4') == 9004
assert getStockA('stockA_row1_col1') == 1001
assert getStockA('stockA_row2_col2') == 1002
assert getStockA('stockA_row3_col2') == 3002
assert getStockA('stockA_row4_col2') == 3002
assert getStockA('stockA_row4_col2.1') == 3002
assert getStockA('stockB_row4_col2.1') == None
assert getStockA('begin_1') == None
assert getStockA('seat_1') == None
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<mask token>
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
<mask token>
def testPageUnloockAll():
api.unlockAll()
<mask token>
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<mask token>
| <mask token>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<mask token>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
def _call(agvId, locId):
if api.isCartLoc(locId):
api.move(agvId, locId + '.1')
lockStockA(agvId, locId)
try:
api.mission(agvId, 1)
except Exception as e:
unlockStockA(agvId, locId)
raise e
else:
api.move(agvId, locId)
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId, srcLoc, locId, cartId):
try:
c = api.mission(agvId, 2)
if c:
checkCart(cartId, c)
api.move(agvId, srcLoc + '.2')
except Exception as e:
pass
finally:
unlockStockA(agvId, srcLoc)
loc, type = api.getMissionType('get', '', srcLoc)
api.mission(agvId, type)
loc, type = api.getMissionType('put', srcLoc, locId)
api.move(agvId, loc + '.3')
api.mission(agvId, type)
lockStockA(agvId, locId)
try:
api.move(agvId, locId + '.4')
api.mission(agvId, 5)
api.move(agvId, locId + '.5')
finally:
unlockStockA(agvId, locId)
freeAgv(agvId)
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
def move(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
try:
locId = getOriginPoint(locId)
_run(func=api.move, args=(agvId, locId), callback=finishCallback,
obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
def testgetStockA():
assert getStockA('stockA_row10_col3') == 9003
assert getStockA('stockA_row10_col4') == 9004
assert getStockA('stockA_row1_col1') == 1001
assert getStockA('stockA_row2_col2') == 1002
assert getStockA('stockA_row3_col2') == 3002
assert getStockA('stockA_row4_col2') == 3002
assert getStockA('stockA_row4_col2.1') == 3002
assert getStockA('stockB_row4_col2.1') == None
assert getStockA('begin_1') == None
assert getStockA('seat_1') == None
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<mask token>
@counter.count
def move_cart(cartId, srcLoc, destLoc, agvId=None):
print(cartId, srcLoc, destLoc)
counter.setPrint(True)
def callback1(obj):
if obj['result'] == -1:
print('error, system exit')
obj['finish'] = True
sys.exit(-1)
else:
log.warning(obj['agv'], 'start move from', obj['loc1'], 'to',
obj['loc2'])
moveCart(obj['agv'], obj['cart'], obj['loc1'], obj['loc2'],
callback2, obj)
def callback2(obj):
if obj['result'] == -1:
print('error, system exit')
obj['finish'] = True
sys.exit(-1)
else:
log.warning(obj['agv'], 'arrived', obj['loc2'])
obj['finish'] = True
obj = {}
obj['loc1'] = srcLoc
obj['loc2'] = destLoc
obj['cart'] = cartId
print('call ', srcLoc)
if agvId is None:
agvId = apply(srcLoc)
call(agvId, srcLoc, callback1, obj)
while not utility.is_exited():
if 'finish' in obj:
break
time.sleep(0.2)
print('------ move ', srcLoc, ' to ', destLoc, ' finish ------')
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
def testtestPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
print(start)
time.sleep(3)
seat = currentJson['seat']
seats = str.split(seat, ',')
print(seat)
time.sleep(3)
for currentseat in seats:
print(currentseat)
time.sleep(3)
time.sleep(10)
result = True
return result
def testPageUnloockAll():
api.unlockAll()
def testProcess(jsonData):
utility.start()
testPageAgvControl(jsonData)
utility.finish()
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<mask token>
| #coding=utf-8
# ycat 2017-10-20 create
# AGV的控制
import sys,os
import json
import setup
if __name__ == '__main__':
setup.setCurPath(__file__)
import utility
import enhance
import threading
import time
import log
import re
import lock
import json_codec
import driver.agv.hdcAgvApi as api
g_threads =[]
g_carts = None
g_point = None
g_lock = threading.RLock()
locationEvent = enhance.event()
api.locationEvent.connect(locationEvent.emit)
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
g_stockLock = {}
def getStockA(loc):
if loc[0:6] != "stockA":
return None
m = re.search("stockA_row(\d+)_col(\d+).*",loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row%2 != 1:
row -= 1
return row*1000+col
@lock.lock(g_lock)
def checkTimeout(index,agvId,loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10*60*1000:
unlockStockA(agvId,loc)
log.warning("delete timeout locked",index)
#解决在StockA两个车头对撞的问题
def lockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index,agvId,loc)
log.warning(agvId,loc,"is locked, wait for unlock")
for i in range(60*5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId,loc,"wait for unlock success")
global g_lock
log.debug(agvId,"lock",loc,index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId,"unlock",loc,index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPoint():
resulta= getPoint("StockA_row7_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2_col2") == 1002
assert getStockA("stockA_row3_col2") == 3002
assert getStockA("stockA_row4_col2") == 3002
assert getStockA("stockA_row4_col2.1") == 3002
assert getStockA("stockB_row4_col2.1") == None
assert getStockA("begin_1") == None
assert getStockA("seat_1") == None
def testcheckCart():
global g_carts
g_carts = None
checkCart("CART9001","591")
checkCart("CART9002","592")
gg = json_codec.load_file("cart.cfg")
assert "CART9001" in gg
assert "CART9002" in gg
assert gg["CART9001"] == "591"
assert gg["CART9002"] == "592"
checkCart("CART9002","592")
checkCart("CART9001","591")
try:
checkCart("CART9002","591")
assert 0
except Exception as e:
s = str(e)
assert s.find("货架ID不正确,期望货架:CART9002, 实际货架:CART9001") != -1
import counter
@counter.count
def move_cart(cartId,srcLoc,destLoc,agvId=None):
print(cartId,srcLoc,destLoc)
counter.setPrint(True)
def callback1(obj):
if obj["result"] == -1:
print("error, system exit")
obj["finish"] = True
sys.exit(-1)
else:
log.warning(obj["agv"],"start move from",obj["loc1"],"to",obj["loc2"])
moveCart(obj["agv"],obj["cart"],obj["loc1"],obj["loc2"],callback2,obj)
def callback2(obj):
if obj["result"] == -1:
print("error, system exit")
obj["finish"] = True
sys.exit(-1)
else:
log.warning(obj["agv"],"arrived",obj["loc2"])
obj["finish"] = True
obj = {}
obj["loc1"] = srcLoc
obj["loc2"] = destLoc
obj["cart"] = cartId
print("call ",srcLoc)
if agvId is None:
agvId = apply(srcLoc)
call(agvId,srcLoc,callback1,obj)
while not utility.is_exited():
if "finish" in obj:
break
time.sleep(0.2)
print("------ move ",srcLoc," to ",destLoc," finish ------")
#def func1(start,stock1,stock2):
# print("-------------------- start thread ------------------------")
# time.sleep(1)
# cartId = "CART9009"
# move_cart(cartId,start,stock1)
# next = stock1
# for s in seats:
# move_cart(cartId,next,"seat"+str(s)+"_1")
# if next == stock1:
# next = stock2
# else:
# next = stock1
# move_cart(cartId,"seat"+str(s)+"_1",next)
# # move_cart(cartId, s, next)
# print("=======================================")
# print("finish func1")
# print("=======================================")
def func2(stock1,stock2):
print("-------------------- start thread ------------------------",stock1,stock2)
time.sleep(1)
cartId = "CART9009"
for i in range(20):
print("current loop is - ",i.__str__())
move_cart(cartId,stock1,stock2)
move_cart(cartId,stock2,stock1)
print("current loop end - ",i.__str__())
print("=======================================")
print("finish func2")
print("=======================================")
def func3(times,starts,seats):
current=starts
cartId = "CART9009"
time.sleep(1)
for loop in range(0,times-1):
# current=starts
tip1="currentLoop is "+loop.__str__()+" currentStart is "+current
print(tip1)
for i in range(0,len(seats)):
next = str(seats[i])
tip2= "currentLoop is "+loop.__str__()+"currentOrigin is "+ current + "currentNext is " + next +" seatIndex is "+i.__str__()
print(tip2)
print("excuting")
move_cart(cartId,current,next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData)==0:
result=False
else:
for currentJson in jsonData:
start = currentJson["start"]
seat = currentJson["seat"]
loop=int(currentJson["loop"])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop, start, seats])
durabilityTestTask1.start()
result=True
return result
def testtestPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson["start"]
print(start)
time.sleep(3)
seat = currentJson["seat"]
seats = str.split(seat, ',')
print(seat)
time.sleep(3)
for currentseat in seats:
print(currentseat)
time.sleep(3)
time.sleep(10)
result = True
return result
def testPageUnloockAll():
api.unlockAll();
def testProcess(jsonData):
utility.start()
testPageAgvControl(jsonData)
utility.finish()
def test1():
Init()
durabilityTestTask1= threading.Thread(target=func3,args=[20,"stockA_row1_col3",["stockA_row1_col2","stockA_row1_col4"]])
durabilityTestTask1.start()
durabilityTestTask2= threading.Thread(target=func3,args=[20,"stockA_row1_col2",["seat2_1","stockA_row4_col2"]])
# durabilityTestTask2.start()
durabilityTestTask3= threading.Thread(target=func3,args=[20,"stockA_row5_col3",["seat16_1","stockA_row5_col2"]])
# durabilityTestTask3.start()
durabilityTestTask4= threading.Thread(target=func3,args=[20,"stockA_row6_col3",["seat12_1","stockA_row6_col2"]])
# durabilityTestTask4.start()
durabilityTestTask1.join()
#t1.join()
print("===============ALL FINISH ========================")
if __name__ == '__main__':
# utility.run_tests()
if sys.argv is not None and len(sys.argv)>0:
if "process" in sys.argv:
log.info("run at testPage mode")
args=""
with open('/agvscada/driver/args.txt', 'r', encoding='utf-8') as f:
args=f.read()
api.init()
time.sleep(3)
testPageAgvControl(args)
elif "unlock" in sys.argv:
testPageUnloockAll()
elif "test" in sys.argv:
utility.start()
test1()
utility.finish()
else:
utility.start()
testgetPoint()
utility.finish()
# test3()
| [
26,
29,
30,
34,
38
] |
1,467 | fcca845b60b050fa5dd0a3c50b3c36c154022f07 | <mask token>
class Solution:
<mask token>
<mask token>
| <mask token>
class Solution:
def FindGreatestSumOfSubArray(self, array):
dp = [array[0]]
res = array[0]
for i in range(1, len(array)):
temp = max(dp[i - 1] + array[i], array[i])
dp.append(temp)
if temp > res:
res = temp
return res
<mask token>
| <mask token>
class Solution:
def FindGreatestSumOfSubArray(self, array):
dp = [array[0]]
res = array[0]
for i in range(1, len(array)):
temp = max(dp[i - 1] + array[i], array[i])
dp.append(temp)
if temp > res:
res = temp
return res
<mask token>
print(s.FindGreatestSumOfSubArray([6, -3, -2, 7, -15, 1, 2, 2]))
| <mask token>
class Solution:
def FindGreatestSumOfSubArray(self, array):
dp = [array[0]]
res = array[0]
for i in range(1, len(array)):
temp = max(dp[i - 1] + array[i], array[i])
dp.append(temp)
if temp > res:
res = temp
return res
s = Solution()
print(s.FindGreatestSumOfSubArray([6, -3, -2, 7, -15, 1, 2, 2]))
| """
题目描述
HZ偶尔会拿些专业问题来忽悠那些非计算机专业的同学。
今天测试组开完会后,他又发话了:在古老的一维模式识别中,
常常需要计算连续子向量的最大和,当向量全为正数的时候,问题很好解决。
但是,如果向量中包含负数,是否应该包含某个负数,并期望旁边的正数会弥补它呢?
例如:{6,-3,-2,7,-15,1,2,2},连续子向量的最大和为8(从第0个开始,到第3个为止)。
给一个数组,返回它的最大连续子序列的和,你会不会被他忽悠住?(子向量的长度至少是1)
"""
# -*- coding:utf-8 -*-
class Solution:
def FindGreatestSumOfSubArray(self, array):
# write code here
# 以i结尾的数组长度,max(array[i], dp[i-1]+array[i])
dp = [array[0]]
res = array[0]
for i in range(1, len(array)):
temp = max(dp[i-1]+array[i], array[i])
dp.append(temp)
if temp > res:
res = temp
return res
s = Solution()
print(s.FindGreatestSumOfSubArray([6,-3,-2,7,-15,1,2,2])) | [
1,
2,
3,
4,
5
] |
1,468 | 997b68e42547b8f8a1059776c55c3ad16df494da | <mask token>
| ii = [('LeakWTI2.py', 6)]
| null | null | null | [
0,
1
] |
1,469 | e675283f14a3d29fba878e7f6d9592130611c2be | <mask token>
class UserModel:
<mask token>
def __init__(self, name, password, birth, sex, phone, email, id=0):
if id == 0:
self.id = self.id + 1
else:
self.id = id
self.name = name
self.email = email
s = hashlib.sha256()
s.update(password.encode('utf-8'))
self.password = s.hexdigest()
self.birth = birth
self.sex = sex
self.phone = phone
def add_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
insert_query = (
'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'
)
cursor.execute(insert_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email))
conn.commit()
conn.close()
@staticmethod
def get_user(self, id):
user = None
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users WHERE id=?'
print(query_one_query)
result = cursor.execute(query_one_query, (str(id),)).fetchone()
if result is None:
return None
print(result)
user = UserModel(id=result[0], name=result[1], password=result[2],
sex=result[3], birth=result[4], phone=result[5], email=result[6])
user.id = result[0]
conn.close()
return user
@staticmethod
def delete_user(self, id):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
delete_query = 'DELETE FROM users WHERE id=?'
cursor.execute(delete_query, (id,))
conn.commit()
conn.close()
def update_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
update_query = (
'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'
)
cursor.execute(update_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email, self.id))
conn.commit()
conn.close()
<mask token>
<mask token>
| <mask token>
class UserModel:
<mask token>
def __init__(self, name, password, birth, sex, phone, email, id=0):
if id == 0:
self.id = self.id + 1
else:
self.id = id
self.name = name
self.email = email
s = hashlib.sha256()
s.update(password.encode('utf-8'))
self.password = s.hexdigest()
self.birth = birth
self.sex = sex
self.phone = phone
def add_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
insert_query = (
'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'
)
cursor.execute(insert_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email))
conn.commit()
conn.close()
@staticmethod
def get_user(self, id):
user = None
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users WHERE id=?'
print(query_one_query)
result = cursor.execute(query_one_query, (str(id),)).fetchone()
if result is None:
return None
print(result)
user = UserModel(id=result[0], name=result[1], password=result[2],
sex=result[3], birth=result[4], phone=result[5], email=result[6])
user.id = result[0]
conn.close()
return user
@staticmethod
def delete_user(self, id):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
delete_query = 'DELETE FROM users WHERE id=?'
cursor.execute(delete_query, (id,))
conn.commit()
conn.close()
def update_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
update_query = (
'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'
)
cursor.execute(update_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email, self.id))
conn.commit()
conn.close()
@staticmethod
def get_all_user():
users = []
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users'
for item in cursor.execute(query_one_query):
user = UserModel(id=item[0], name=item[1], password=item[2],
sex=item[3], birth=item[4], phone=item[5], email=item[6])
users.append(user)
conn.close()
return users
<mask token>
| <mask token>
class UserModel:
id = 0
def __init__(self, name, password, birth, sex, phone, email, id=0):
if id == 0:
self.id = self.id + 1
else:
self.id = id
self.name = name
self.email = email
s = hashlib.sha256()
s.update(password.encode('utf-8'))
self.password = s.hexdigest()
self.birth = birth
self.sex = sex
self.phone = phone
def add_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
insert_query = (
'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'
)
cursor.execute(insert_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email))
conn.commit()
conn.close()
@staticmethod
def get_user(self, id):
user = None
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users WHERE id=?'
print(query_one_query)
result = cursor.execute(query_one_query, (str(id),)).fetchone()
if result is None:
return None
print(result)
user = UserModel(id=result[0], name=result[1], password=result[2],
sex=result[3], birth=result[4], phone=result[5], email=result[6])
user.id = result[0]
conn.close()
return user
@staticmethod
def delete_user(self, id):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
delete_query = 'DELETE FROM users WHERE id=?'
cursor.execute(delete_query, (id,))
conn.commit()
conn.close()
def update_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
update_query = (
'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'
)
cursor.execute(update_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email, self.id))
conn.commit()
conn.close()
@staticmethod
def get_all_user():
users = []
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users'
for item in cursor.execute(query_one_query):
user = UserModel(id=item[0], name=item[1], password=item[2],
sex=item[3], birth=item[4], phone=item[5], email=item[6])
users.append(user)
conn.close()
return users
<mask token>
| <mask token>
class UserModel:
id = 0
def __init__(self, name, password, birth, sex, phone, email, id=0):
if id == 0:
self.id = self.id + 1
else:
self.id = id
self.name = name
self.email = email
s = hashlib.sha256()
s.update(password.encode('utf-8'))
self.password = s.hexdigest()
self.birth = birth
self.sex = sex
self.phone = phone
def add_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
insert_query = (
'INSERT INTO users (name, password, sex, birth, phone, email) VALUES(?, ?, ?, ?, ?, ?)'
)
cursor.execute(insert_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email))
conn.commit()
conn.close()
@staticmethod
def get_user(self, id):
user = None
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users WHERE id=?'
print(query_one_query)
result = cursor.execute(query_one_query, (str(id),)).fetchone()
if result is None:
return None
print(result)
user = UserModel(id=result[0], name=result[1], password=result[2],
sex=result[3], birth=result[4], phone=result[5], email=result[6])
user.id = result[0]
conn.close()
return user
@staticmethod
def delete_user(self, id):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
delete_query = 'DELETE FROM users WHERE id=?'
cursor.execute(delete_query, (id,))
conn.commit()
conn.close()
def update_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
update_query = (
'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'
)
cursor.execute(update_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email, self.id))
conn.commit()
conn.close()
@staticmethod
def get_all_user():
users = []
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users'
for item in cursor.execute(query_one_query):
user = UserModel(id=item[0], name=item[1], password=item[2],
sex=item[3], birth=item[4], phone=item[5], email=item[6])
users.append(user)
conn.close()
return users
if __name__ == '__main__':
print(UserModel.get_all_user())
| import sqlite3
import hashlib
users = []
class UserModel:
id = 0
def __init__(self, name, password, birth, sex, phone, email, id=0):
if(id == 0):
self.id = self.id + 1
else:
self.id = id
self.name = name
self.email = email
#處理密碼
s = hashlib.sha256()
s.update(password.encode('utf-8'))
self.password = s.hexdigest()
self.birth = birth
self.sex = sex
self.phone = phone
def add_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
insert_query = 'INSERT INTO users (name, password, sex, birth, phone, email) \
VALUES(?, ?, ?, ?, ?, ?)'
cursor.execute(insert_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email))
conn.commit()
conn.close()
@staticmethod
def get_user(self, id):
user = None
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users WHERE id=?'
print(query_one_query)
result = cursor.execute(query_one_query, (str(id),)).fetchone()
if result is None:
return None
print(result)
user = UserModel(id=result[0], name=result[1], password=result[2], sex = result[3], \
birth=result[4], phone=result[5], email=result[6])
user.id = result[0]
conn.close()
return user
@staticmethod
def delete_user(self, id):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
delete_query = 'DELETE FROM users WHERE id=?'
cursor.execute(delete_query, (id,))
conn.commit()
conn.close()
def update_user(self):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
update_query = 'UPDATE users SET name=?, password=?, sex=?, birth=?, phone=?, email=? WHERE id=?'
cursor.execute(update_query, (self.name, self.password, self.sex,
self.birth, self.phone, self.email, self.id))
conn.commit()
conn.close()
@staticmethod
def get_all_user():
users = []
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
query_one_query = 'SELECT * FROM users'
for item in cursor.execute(query_one_query):
user = UserModel(id=item[0], name=item[1], password=item[2], sex = item[3], \
birth=item[4], phone=item[5], email=item[6])
users.append(user)
conn.close()
return users
if __name__ == "__main__":
print(UserModel.get_all_user()) | [
6,
7,
8,
9,
12
] |
1,470 | e056a1600b620519e729c597dcec57793284019a | <mask token>
@mod.route('/shutdown')
def shutdown():
flash(
'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'
)
subprocess.call(['sudo', 'halt'])
return redirect(url_for('system.index'))
<mask token>
| <mask token>
def check_output(*args):
return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]
<mask token>
@mod.route('/')
def index():
uptime = check_output(['uptime'])
return render_template('system/system.html', uptime=uptime)
@mod.route('/shutdown')
def shutdown():
flash(
'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'
)
subprocess.call(['sudo', 'halt'])
return redirect(url_for('system.index'))
@mod.route('/reboot')
def reboot():
flash('Rebooting... please wait.<br>This will take approx. one minute.')
subprocess.call(['sudo', 'reboot'])
return redirect(url_for('system.index'))
| <mask token>
def check_output(*args):
return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]
mod = Blueprint('system', __name__)
@mod.route('/')
def index():
uptime = check_output(['uptime'])
return render_template('system/system.html', uptime=uptime)
@mod.route('/shutdown')
def shutdown():
flash(
'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'
)
subprocess.call(['sudo', 'halt'])
return redirect(url_for('system.index'))
@mod.route('/reboot')
def reboot():
flash('Rebooting... please wait.<br>This will take approx. one minute.')
subprocess.call(['sudo', 'reboot'])
return redirect(url_for('system.index'))
| from flask import Blueprint, render_template, redirect, url_for, flash
import subprocess
def check_output(*args):
return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]
mod = Blueprint('system', __name__)
@mod.route('/')
def index():
uptime = check_output(['uptime'])
return render_template('system/system.html', uptime=uptime)
@mod.route('/shutdown')
def shutdown():
flash(
'Shutting down.<br>When the LEDs on the board stop flashing, it should be safe to unplug your Raspberry Pi.'
)
subprocess.call(['sudo', 'halt'])
return redirect(url_for('system.index'))
@mod.route('/reboot')
def reboot():
flash('Rebooting... please wait.<br>This will take approx. one minute.')
subprocess.call(['sudo', 'reboot'])
return redirect(url_for('system.index'))
| # -*- coding: utf-8 -*-
from flask import Blueprint, render_template, redirect, url_for, flash
import subprocess
def check_output(*args):
return subprocess.Popen(*args, stdout=subprocess.PIPE).communicate()[0]
mod = Blueprint('system', __name__)
@mod.route('/')
def index():
uptime = check_output(["uptime"])
return render_template('system/system.html', uptime=uptime)
@mod.route('/shutdown')
def shutdown():
flash("Shutting down.<br>When the LEDs on the board stop flashing, \
it should be safe to unplug your Raspberry Pi.")
subprocess.call(["sudo", "halt"])
return redirect(url_for('system.index'))
@mod.route('/reboot')
def reboot():
flash("Rebooting... please wait.<br>This will take approx. one minute.")
subprocess.call(["sudo", "reboot"])
return redirect(url_for('system.index'))
| [
1,
4,
5,
6,
7
] |
1,471 | 4d30f4294a9f3aab8cae20dca9d280c53b37ed25 | <mask token>
| <mask token>
for i in range(len(bull_list)):
bull_list.append(int(bull_str[i]))
<mask token>
while True:
flag += 1
for i in range(len(bull_list)):
if bull_list[i] == 1:
for j in range(bull_list.index(bull_list[i]), len(bull_list)):
if bull_list[j] == 1:
bull_list[j] = 0
else:
bull_list = 1
break
if i == len(bull_list):
flag += 1
print('Alex' if flag % 2 == 0 else 'Bob')
break
| num = int(input())
bull_str = input().split(' ')
bull_list = []
for i in range(len(bull_list)):
bull_list.append(int(bull_str[i]))
flag = 0
while True:
flag += 1
for i in range(len(bull_list)):
if bull_list[i] == 1:
for j in range(bull_list.index(bull_list[i]), len(bull_list)):
if bull_list[j] == 1:
bull_list[j] = 0
else:
bull_list = 1
break
if i == len(bull_list):
flag += 1
print('Alex' if flag % 2 == 0 else 'Bob')
break
| null | null | [
0,
1,
2
] |
1,472 | d4d47f7abc5c8224188430546a65bfb8f358802f | <mask token>
| <mask token>
if __name__ == '__main__':
dq = Deque()
dq.palindrom()
| <mask token>
from com.bridgelabz.utility.Data_structure_utility import *
if __name__ == '__main__':
dq = Deque()
dq.palindrom()
| """
purpose :Take an string as input and construct an algorithm
to input a string of characters and check whether
it is a palindrome.
@Author : Reshma Y. Kale
"""
from com.bridgelabz.utility.Data_structure_utility import *
if __name__=="__main__":
dq = Deque()
dq.palindrom() | null | [
0,
1,
2,
3
] |
1,473 | ae1aab7563443db3a31fe98b5b26b32944d57c9d | <mask token>
class TestTestHelper(TestCase):
<mask token>
<mask token>
def test_assertAnyIn_suceeds(self):
"""
Make sure assertInAny succeeds
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
self.assertIsNone(test_case.assertAnyIn(['1', '2', '3'], ['1', 'b',
'c', 'd']))
| <mask token>
class TestTestHelper(TestCase):
<mask token>
def test_assertAnyIn_fails(self):
"""
Make sure assertInAny fails correctly
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
with self.assertRaises(AssertionError):
test_case.assertAnyIn(['1', '2', '3'], ['a', 'b', 'c', 'd'])
def test_assertAnyIn_suceeds(self):
"""
Make sure assertInAny succeeds
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
self.assertIsNone(test_case.assertAnyIn(['1', '2', '3'], ['1', 'b',
'c', 'd']))
| <mask token>
class TestTestHelper(TestCase):
"""
Test our helper functions
"""
def test_assertAnyIn_fails(self):
"""
Make sure assertInAny fails correctly
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
with self.assertRaises(AssertionError):
test_case.assertAnyIn(['1', '2', '3'], ['a', 'b', 'c', 'd'])
def test_assertAnyIn_suceeds(self):
"""
Make sure assertInAny succeeds
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
self.assertIsNone(test_case.assertAnyIn(['1', '2', '3'], ['1', 'b',
'c', 'd']))
| from unittest import TestCase
from tests import AuthHelperTestCase
class TestTestHelper(TestCase):
"""
Test our helper functions
"""
def test_assertAnyIn_fails(self):
"""
Make sure assertInAny fails correctly
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
with self.assertRaises(AssertionError):
test_case.assertAnyIn(['1', '2', '3'], ['a', 'b', 'c', 'd'])
def test_assertAnyIn_suceeds(self):
"""
Make sure assertInAny succeeds
:return:
"""
test_case = AuthHelperTestCase('assertAnyIn')
self.assertIsNone(test_case.assertAnyIn(['1', '2', '3'], ['1', 'b',
'c', 'd']))
| null | [
2,
3,
4,
5
] |
1,474 | f9a255a464b5f48a1a8be2e2887db721a92e7f4e | <mask token>
class TestNestedDict(unittest.TestCase):
<mask token>
<mask token>
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
<mask token>
<mask token>
<mask token>
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<mask token>
<mask token>
| <mask token>
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
<mask token>
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
<mask token>
<mask token>
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
<mask token>
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
| <mask token>
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
<mask token>
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
<mask token>
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
| <mask token>
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
def test_file(self):
self.assertTrue(os.path.isfile(self.afile))
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
def test_set(self):
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[u'0002', u'topping',
u'5001', u'type'], dnow=dcopy)
value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': 'topless'})
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001',
'price'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': u'None', 'price':
'5.01'})
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001',
'type'], dnow=dcopy)
pprint(dchg)
argv = [35, 'topping', '5001']
value = self.nd.get(keys=argv, dnow=dchg)
self.assertEqual(value, {'type': 'topless'})
dcopy = copy.deepcopy(self.dfood)
dnew = {'id': 555, 'type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],
dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
pprint(value)
self.assertEqual(value, dnew)
dcopy = copy.deepcopy(self.dfood)
dnew = {'Type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'],
dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price':
0.99, u'type': u'None'})
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
| import unittest
import json
import os
import copy
from nested.nested_dict import NestedDict
from pprint import pprint
class TestNestedDict(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.dirname(__file__)
cls.afile = os.path.join(path, '../nested/data/food_nested_dict.json')
cls.nd = NestedDict()
cls.d = {'a': {'b': {'c': 'C'}}}
with open(cls.afile, 'r') as fp:
cls.dfood = json.load(fp)
def test_file(self):
self.assertTrue(os.path.isfile(self.afile))
def test_dfood(self):
self.assertEqual(self.dfood.keys(), [u'0001', u'0002', u'0003'])
def test_get(self):
v = self.nd.get(keys=['a', 'b', 'c'], dnow=self.d)
self.assertEqual(v, 'C')
# depth 0
dc = copy.deepcopy(self.d)
items = ['x', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['x', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 1
dc = copy.deepcopy(self.d)
items = ['a', 'y', 'z']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'y', 'z'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 2
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'e']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'e'], dnow=dchg)
self.assertEqual(v, 'E')
# depth 3
dc = copy.deepcopy(self.d)
items = ['a', 'b', 'c']
dchg = self.nd.set(value='E', keys=items, dnow=dc)
v = self.nd.get(keys=['a', 'b', 'c'], dnow=dchg)
self.assertEqual(v, 'E')
def test_set(self):
# update the lastdict with new value of the same key
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[u'0002', u'topping', u'5001', u'type'], dnow=dcopy)
value = self.nd.get(keys=[u'0002', u'topping', u'5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': 'topless'})
# update the lastdict with new key: value, but not new dict
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='5.01', keys=['0002', 'topping', '5001', 'price'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {'id': '5001', 'type': u'None', 'price': '5.01'})
# int key
dcopy = copy.deepcopy(self.dfood)
dchg = self.nd.set(value='topless', keys=[35, 'topping', '5001', 'type'], dnow=dcopy)
pprint(dchg)
argv = [35, 'topping', '5001']
value = self.nd.get(keys=argv, dnow=dchg)
self.assertEqual(value, {'type': 'topless'})
# special condition value to be dict
dcopy = copy.deepcopy(self.dfood)
dnew = {'id': 555, 'type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
pprint(value)
self.assertEqual(value, dnew)
# without id
dcopy = copy.deepcopy(self.dfood)
dnew = {'Type': 'berry', 'price': 0.99}
dchg = self.nd.set(value=dnew, keys=['0002', 'topping', '5001'], dnow=dcopy)
value = self.nd.get(keys=['0002', 'topping', '5001'], dnow=dchg)
self.assertEqual(value, {u'id': u'5001', 'Type': 'berry', 'price': 0.99, u'type': u'None'})
def test_create(self):
keys = ['a', 'b', 'c']
value = {u'd': 1}
d = self.nd.create(value=value, keys=keys)
dchg = {'a': {'b': {'c': {u'd': 1}}}}
self.assertEqual(d, dchg)
def test_update(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
# d_original did not change
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
# dnow in parameters will be updated(!)
# self.assertEqual(d_original.keys(), ['hello1'])
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
def test_merge_shallow(self):
d = {}
dchg = {}
du = self.nd.merge_shallow(dchg=dchg, dnow=d)
self.assertEqual(du, d)
d_original = {'hello1': 1}
dup = {'hello2': 2}
du = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(du, {'hello1': 1, 'hello2': 2})
# this is not shallow
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.merge_shallow(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over'}})
def test_update2(self):
d_original = {'hello1': 1}
dup = {'hello2': 2}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello1': 1, 'hello2': 2})
# d_original did not change
self.assertEqual(set(d.keys()), set(['hello1', 'hello2']))
# self.assertEqual(d_original.keys(), ['hello1'])
value = self.nd.get(keys=['hello2'], dnow=d)
self.assertEqual(value, 2)
d_original = {'hello': 'to_override'}
dup = {'hello': 'over'}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': 'over'})
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': 'over'}}
d = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(d, {'hello': {'value': 'over', 'no_change': 1}})
value = self.nd.get(keys=['hello', 'no_change'], dnow=d_original)
self.assertEqual(value, 1)
d_original = {'hello': {'value': 'to_override', 'no_change': 1}}
dup = {'hello': {'value': {}}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': {}, 'no_change': 1}})
d_original = {'hello': {'value': {}, 'no_change': 1}}
dup = {'hello': {'value': 2}}
dchg = self.nd.update2(dchg=dup, dnow=d_original)
self.assertEqual(dchg, {'hello': {'value': 2, 'no_change': 1}})
| [
3,
6,
8,
10,
12
] |
1,475 | 021efe01c21db4d3bd936ba4eb75dc03dde91cc6 | <mask token>
def test_verify_home_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Home Page')
eyes.check('FZ IBE Home page', Target.window())
eyes.close(False)
<mask token>
def test_verify_manage_booking_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Manage Booking Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(2)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_checkin_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Checkin Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(3)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_flight_status_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight Status Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(4)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
<mask token>
| <mask token>
def test_verify_home_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Home Page')
eyes.check('FZ IBE Home page', Target.window())
eyes.close(False)
<mask token>
def test_verify_manage_booking_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Manage Booking Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(2)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_checkin_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Checkin Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(3)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_flight_status_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight Status Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(4)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_search_ow_results_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight listing page')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper .radio-oneway-div').click()
driver.find_element_by_css_selector(
'div.widgetBoxWrapper #airport-destination').click()
driver.find_element_by_css_selector(
".DestinationAirlist li[data-value='MCT']").click()
driver.execute_script(
'date = new Date();date.setDate(date.getDate()+20);document.getElementById("FormModel_DepartureDate").value=""+date'
)
driver.find_element_by_css_selector(
"div.widgetBoxWrapper input[value='Search']").click()
WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By
.CSS_SELECTOR, '.tripSummaryBox')))
eyes.check('FZ Manage Booking Widget', Target.window())
eyes.close(False)
| <mask token>
def test_verify_home_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Home Page')
eyes.check('FZ IBE Home page', Target.window())
eyes.close(False)
def test_verify_search_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Search Widget')
eyes.check('FZ Search Widget', Target.region('div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_manage_booking_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Manage Booking Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(2)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_checkin_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Checkin Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(3)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_flight_status_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight Status Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(4)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_search_ow_results_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight listing page')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper .radio-oneway-div').click()
driver.find_element_by_css_selector(
'div.widgetBoxWrapper #airport-destination').click()
driver.find_element_by_css_selector(
".DestinationAirlist li[data-value='MCT']").click()
driver.execute_script(
'date = new Date();date.setDate(date.getDate()+20);document.getElementById("FormModel_DepartureDate").value=""+date'
)
driver.find_element_by_css_selector(
"div.widgetBoxWrapper input[value='Search']").click()
WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By
.CSS_SELECTOR, '.tripSummaryBox')))
eyes.check('FZ Manage Booking Widget', Target.window())
eyes.close(False)
| from applitools.selenium import Target, eyes
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
def test_verify_home_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Home Page')
eyes.check('FZ IBE Home page', Target.window())
eyes.close(False)
def test_verify_search_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Search Widget')
eyes.check('FZ Search Widget', Target.region('div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_manage_booking_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Manage Booking Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(2)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_checkin_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Checkin Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(3)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_flight_status_widget(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight Status Widget')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper > ul li:nth-of-type(4)').click()
eyes.check('FZ Manage Booking Widget', Target.region(
'div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_search_ow_results_page(eyes, driver):
eyes.open(driver, 'FlyDubai IBE', 'Verify Flight listing page')
driver.find_element_by_css_selector(
'div.widgetBoxWrapper .radio-oneway-div').click()
driver.find_element_by_css_selector(
'div.widgetBoxWrapper #airport-destination').click()
driver.find_element_by_css_selector(
".DestinationAirlist li[data-value='MCT']").click()
driver.execute_script(
'date = new Date();date.setDate(date.getDate()+20);document.getElementById("FormModel_DepartureDate").value=""+date'
)
driver.find_element_by_css_selector(
"div.widgetBoxWrapper input[value='Search']").click()
WebDriverWait(driver, 30).until(EC.presence_of_all_elements_located((By
.CSS_SELECTOR, '.tripSummaryBox')))
eyes.check('FZ Manage Booking Widget', Target.window())
eyes.close(False)
| from applitools.selenium import Target, eyes
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
def test_verify_home_page(eyes, driver):
eyes.open(driver, "FlyDubai IBE", "Verify Home Page")
eyes.check("FZ IBE Home page", Target.window())
eyes.close(False)
def test_verify_search_widget(eyes, driver):
eyes.open(driver, "FlyDubai IBE", "Verify Search Widget")
eyes.check("FZ Search Widget", Target.region('div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_manage_booking_widget(eyes, driver):
eyes.open(driver, "FlyDubai IBE", "Verify Manage Booking Widget")
driver.find_element_by_css_selector('div.widgetBoxWrapper > ul li:nth-of-type(2)').click()
eyes.check("FZ Manage Booking Widget", Target.region('div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_checkin_widget(eyes, driver):
eyes.open(driver, "FlyDubai IBE", "Verify Checkin Widget")
driver.find_element_by_css_selector('div.widgetBoxWrapper > ul li:nth-of-type(3)').click()
eyes.check("FZ Manage Booking Widget", Target.region('div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_flight_status_widget(eyes, driver):
eyes.open(driver, "FlyDubai IBE", "Verify Flight Status Widget")
driver.find_element_by_css_selector('div.widgetBoxWrapper > ul li:nth-of-type(4)').click()
eyes.check("FZ Manage Booking Widget", Target.region('div.widgetBoxWrapper'))
eyes.close(False)
def test_verify_search_ow_results_page(eyes, driver):
eyes.open(driver, "FlyDubai IBE", "Verify Flight listing page")
driver.find_element_by_css_selector('div.widgetBoxWrapper .radio-oneway-div').click()
driver.find_element_by_css_selector('div.widgetBoxWrapper #airport-destination').click()
driver.find_element_by_css_selector('.DestinationAirlist li[data-value=\'MCT\']').click()
driver.execute_script('date = new Date();date.setDate(date.getDate()+20);document.getElementById("FormModel_DepartureDate").value=""+date')
driver.find_element_by_css_selector("div.widgetBoxWrapper input[value='Search']").click()
WebDriverWait(driver,30).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR,'.tripSummaryBox')))
eyes.check("FZ Manage Booking Widget", Target.window())
eyes.close(False) | [
4,
5,
6,
7,
8
] |
1,476 | 22aa6042b77c3cfd1f102a0ea22a43223e366d2f | <mask token>
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
<mask token>
| <mask token>
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
rename('C:\\test', '*.jpeg', '%s')
| <mask token>
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
rename('C:\\test', '*.jpeg', '%s')
| import re, glob, os
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
rename('C:\\test', '*.jpeg', '%s')
| import re, glob, os
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
#title = title[22:]
#hexa = []
#hexb = []
hexa = title[:2]
hexb = title[2:4]
#title = title[4:]
title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))
#print(title)
#lst.append(title)
os.rename(pathAndFilename,
os.path.join(dir, titlePattern % title + ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename= os.path.basename(pathname)
new_filename= re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(
pathname,
os.path.join(os.path.dirname(pathname), new_filename))
rename(r'C:\test', r'*.jpeg', r'%s')
#print(lst)
| [
2,
3,
4,
5,
6
] |
1,477 | e4b6304be10ee5a741c8a193dcf65950299ef11a | <mask token>
@ask.launch
def new_game():
pygame.mixer.init()
pygame.mixer.music.load('haha2.mp3')
pygame.mixer.music.play()
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent('ChooseIntent', convert={'first': int})
def ask_choose(first):
session.attributes['first'] = first
ask = render_template('ask')
return question(ask)
@ask.intent('StopIntent')
def stop():
pygame.mixer.music.stop()
return question('')
@ask.intent('AgainIntent')
def again():
pygame.mixer.music.load('britney.mp3')
pygame.mixer.music.play()
return question('')
@ask.intent('AMAZON.YesIntent')
def yes_ans():
first = session.attributes['first']
if first % 3 == 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
@ask.intent('AMAZON.NoIntent')
def no_ans():
first = session.attributes['first']
if first % 3 != 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
<mask token>
| <mask token>
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def new_game():
pygame.mixer.init()
pygame.mixer.music.load('haha2.mp3')
pygame.mixer.music.play()
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent('ChooseIntent', convert={'first': int})
def ask_choose(first):
session.attributes['first'] = first
ask = render_template('ask')
return question(ask)
@ask.intent('StopIntent')
def stop():
pygame.mixer.music.stop()
return question('')
@ask.intent('AgainIntent')
def again():
pygame.mixer.music.load('britney.mp3')
pygame.mixer.music.play()
return question('')
@ask.intent('AMAZON.YesIntent')
def yes_ans():
first = session.attributes['first']
if first % 3 == 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
@ask.intent('AMAZON.NoIntent')
def no_ans():
first = session.attributes['first']
if first % 3 != 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
if __name__ == '__main__':
app.run(debug=True)
| <mask token>
app = Flask(__name__)
ask = Ask(app, '/')
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def new_game():
pygame.mixer.init()
pygame.mixer.music.load('haha2.mp3')
pygame.mixer.music.play()
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent('ChooseIntent', convert={'first': int})
def ask_choose(first):
session.attributes['first'] = first
ask = render_template('ask')
return question(ask)
@ask.intent('StopIntent')
def stop():
pygame.mixer.music.stop()
return question('')
@ask.intent('AgainIntent')
def again():
pygame.mixer.music.load('britney.mp3')
pygame.mixer.music.play()
return question('')
@ask.intent('AMAZON.YesIntent')
def yes_ans():
first = session.attributes['first']
if first % 3 == 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
@ask.intent('AMAZON.NoIntent')
def no_ans():
first = session.attributes['first']
if first % 3 != 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
if __name__ == '__main__':
app.run(debug=True)
| import logging
from random import randint
import pygame
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
from playsound import playsound
app = Flask(__name__)
ask = Ask(app, '/')
logging.getLogger('flask_ask').setLevel(logging.DEBUG)
@ask.launch
def new_game():
pygame.mixer.init()
pygame.mixer.music.load('haha2.mp3')
pygame.mixer.music.play()
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent('ChooseIntent', convert={'first': int})
def ask_choose(first):
session.attributes['first'] = first
ask = render_template('ask')
return question(ask)
@ask.intent('StopIntent')
def stop():
pygame.mixer.music.stop()
return question('')
@ask.intent('AgainIntent')
def again():
pygame.mixer.music.load('britney.mp3')
pygame.mixer.music.play()
return question('')
@ask.intent('AMAZON.YesIntent')
def yes_ans():
first = session.attributes['first']
if first % 3 == 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
@ask.intent('AMAZON.NoIntent')
def no_ans():
first = session.attributes['first']
if first % 3 != 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
if __name__ == '__main__':
app.run(debug=True)
| import logging
from random import randint
import pygame
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
from playsound import playsound
app = Flask(__name__)
ask = Ask(app, "/")
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
@ask.launch
def new_game():
pygame.mixer.init()
pygame.mixer.music.load("haha2.mp3")
pygame.mixer.music.play()
welcome_msg = render_template('welcome')
return question(welcome_msg)
@ask.intent("ChooseIntent", convert={'first': int})
def ask_choose(first):
session.attributes['first'] = first
ask = render_template('ask')
return question(ask)
@ask.intent("StopIntent")
def stop():
pygame.mixer.music.stop()
return question('')
@ask.intent("AgainIntent")
def again():
pygame.mixer.music.load("britney.mp3")
pygame.mixer.music.play()
return question('')
@ask.intent("AMAZON.YesIntent")
def yes_ans():
first = session.attributes['first']
if first % 3 == 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
@ask.intent("AMAZON.NoIntent")
def no_ans():
first = session.attributes['first']
if first % 3 != 0:
msg = render_template('win')
else:
msg = render_template('lose')
return statement(msg)
if __name__ == '__main__':
app.run(debug=True)
| [
6,
7,
8,
9,
10
] |
1,478 | 5ac489a2d30155bb92767184ad546247817e28ea | <mask token>
| <mask token>
print(word_dict)
<mask token>
print(multiple_list)
<mask token>
print(final_list)
| <mask token>
word_list = ['Tree', 'Apple', 'Snake', 'flowers']
word_dict = {word: word[::-1] for word in word_list}
print(word_dict)
<mask token>
use_range = range(1, 101)
multiple_list = [i for i in use_range if i % 2 == 0]
print(multiple_list)
<mask token>
list_above = [[1, 2, 3, 4], [5, 6, 7, 8]]
final_list = [[(bottom * 2) for bottom in top] for top in list_above]
print(final_list)
| """
Create a list of words and with it, create a new dictionary
in which the key is the word and the value is the same word
reversed.
"""
word_list = ['Tree','Apple','Snake','flowers']
word_dict = {word:word[::-1] for word in word_list}
print(word_dict)
#Output: {'Tree': 'eerT', 'Apple': 'elppA', 'Snake': 'ekanS', 'flowers': 'srewolf'}
"""
Let's try this one again:
Using the range function, create a sequence of numbers
from 1 to 100, and using the comprehension to return only
those that are multiplies of 2.
"""
use_range = range(1,101)
multiple_list = [i for i in use_range if i%2==0]
print(multiple_list)
"""
[[1, 2, 3, 4], [5, 6, 7, 8]]
Use the list above and create nested comprehensions so that
the final value is a new list like the following
[[2, 4, 6, 8], [10, 12, 14, 16]] The number multiplied by 2
"""
list_above = [[1, 2, 3, 4], [5, 6, 7, 8]]
final_list = [[bottom*2 for bottom in top] for top in list_above]
print(final_list) | null | [
0,
1,
2,
3
] |
1,479 | a5a7cd112faad1096ce4c6f04b2179fbdf732702 | <mask token>
| <mask token>
setup(packages=find_packages(), setup_requires=['flask'], name='mith1')
| from setuptools import setup, find_packages
setup(packages=find_packages(), setup_requires=['flask'], name='mith1')
| from setuptools import setup, find_packages
setup(
packages=find_packages(),
setup_requires=["flask"],
name="mith1",
) | null | [
0,
1,
2,
3
] |
1,480 | 594479c22cada665dcdc76737085ce342d7d5faf | <mask token>
def convert_type(data_value):
try:
return int(data_value)
except ValueError:
try:
return float(data_value)
except ValueError:
return data_value
<mask token>
def get_delim(sourcefile1):
print('> executing get_delim function')
data = open(sourcefile1, 'r')
my_read_data = data.read()
if my_read_data.find(',') > 0:
print(' delimiter: comma')
return ','
else:
print(' delimiter: space')
return ' '
print(' ')
def lines_to_dict(lines, header=False):
print('> executing lines_to_dict')
column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',
'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']
data_dict = {}
for idx, column in enumerate(column_titles):
data_dict[column] = []
for row in lines:
data_dict[column] += [row[idx]]
return data_dict
<mask token>
def plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False
):
df = pd.DataFrame.from_dict(dd)
x = np.fromiter(dd[col1], dtype=float)
y1 = np.fromiter(dd[col2a], dtype=float)
y2 = np.fromiter(dd[col2b], dtype=float)
fig, ax1 = plt.subplots()
plt.title(label1 + ' by ' + label2a + ' and ' + label2b)
clra = 'indigo'
ax1.set_xlabel(label1)
ax1.set_ylabel(label2a, color=clra)
ax1.scatter(df[col1], df[col2a], color=clra, marker='^')
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y1, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clra)
ax1.tick_params(axis='y', labelcolor=clra)
ax2 = ax1.twinx()
clrb = 'darkgreen'
ax2.set_ylabel(label2b, color=clrb)
ax2.scatter(df[col1], df[col2b], color=clrb)
ax2.tick_params(axis='y', labelcolor=clrb)
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y2, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clrb)
ax1.tick_params(axis='y', labelcolor=clra)
fig.tight_layout()
plt.savefig('an2_colour' + n + '.png')
plt.show()
def main():
data_file = 'wine.data'
dlm = get_delim(data_file)
my_data = parse_file(data_file, dlm)
data_dictionary = lines_to_dict(my_data)
plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',
'Alcohol', 'Flavonoids', '1')
plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',
'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')
<mask token>
| <mask token>
def convert_type(data_value):
try:
return int(data_value)
except ValueError:
try:
return float(data_value)
except ValueError:
return data_value
<mask token>
def get_delim(sourcefile1):
print('> executing get_delim function')
data = open(sourcefile1, 'r')
my_read_data = data.read()
if my_read_data.find(',') > 0:
print(' delimiter: comma')
return ','
else:
print(' delimiter: space')
return ' '
print(' ')
def lines_to_dict(lines, header=False):
print('> executing lines_to_dict')
column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',
'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']
data_dict = {}
for idx, column in enumerate(column_titles):
data_dict[column] = []
for row in lines:
data_dict[column] += [row[idx]]
return data_dict
def parse_file(data_file, dlm, debug=False):
print('> executing parse_file')
assert op.isfile(data_file)
with open(data_file, 'r') as fhandle:
csv_reader = csv.reader(fhandle, delimiter=dlm)
lines = []
if debug:
count = 0
for line in csv_reader:
if debug:
if count > 2:
break
count += 1
newline = []
for value in line:
newline += [convert_type(value)]
if len(newline) > 0:
lines += [newline]
print('> view a few lines')
print(' ')
for line in lines[0:2]:
print(line)
print(' ')
return lines
def plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False
):
df = pd.DataFrame.from_dict(dd)
x = np.fromiter(dd[col1], dtype=float)
y1 = np.fromiter(dd[col2a], dtype=float)
y2 = np.fromiter(dd[col2b], dtype=float)
fig, ax1 = plt.subplots()
plt.title(label1 + ' by ' + label2a + ' and ' + label2b)
clra = 'indigo'
ax1.set_xlabel(label1)
ax1.set_ylabel(label2a, color=clra)
ax1.scatter(df[col1], df[col2a], color=clra, marker='^')
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y1, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clra)
ax1.tick_params(axis='y', labelcolor=clra)
ax2 = ax1.twinx()
clrb = 'darkgreen'
ax2.set_ylabel(label2b, color=clrb)
ax2.scatter(df[col1], df[col2b], color=clrb)
ax2.tick_params(axis='y', labelcolor=clrb)
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y2, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clrb)
ax1.tick_params(axis='y', labelcolor=clra)
fig.tight_layout()
plt.savefig('an2_colour' + n + '.png')
plt.show()
def main():
data_file = 'wine.data'
dlm = get_delim(data_file)
my_data = parse_file(data_file, dlm)
data_dictionary = lines_to_dict(my_data)
plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',
'Alcohol', 'Flavonoids', '1')
plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',
'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')
<mask token>
| print(
'========================================================================================'
)
print(
'========================================================================================'
)
print('> start of program an2_colour.py')
print('> import libraries')
<mask token>
print('> define convert_type function')
def convert_type(data_value):
try:
return int(data_value)
except ValueError:
try:
return float(data_value)
except ValueError:
return data_value
print('> define get_delim function')
def get_delim(sourcefile1):
print('> executing get_delim function')
data = open(sourcefile1, 'r')
my_read_data = data.read()
if my_read_data.find(',') > 0:
print(' delimiter: comma')
return ','
else:
print(' delimiter: space')
return ' '
print(' ')
def lines_to_dict(lines, header=False):
print('> executing lines_to_dict')
column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',
'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']
data_dict = {}
for idx, column in enumerate(column_titles):
data_dict[column] = []
for row in lines:
data_dict[column] += [row[idx]]
return data_dict
def parse_file(data_file, dlm, debug=False):
print('> executing parse_file')
assert op.isfile(data_file)
with open(data_file, 'r') as fhandle:
csv_reader = csv.reader(fhandle, delimiter=dlm)
lines = []
if debug:
count = 0
for line in csv_reader:
if debug:
if count > 2:
break
count += 1
newline = []
for value in line:
newline += [convert_type(value)]
if len(newline) > 0:
lines += [newline]
print('> view a few lines')
print(' ')
for line in lines[0:2]:
print(line)
print(' ')
return lines
def plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False
):
df = pd.DataFrame.from_dict(dd)
x = np.fromiter(dd[col1], dtype=float)
y1 = np.fromiter(dd[col2a], dtype=float)
y2 = np.fromiter(dd[col2b], dtype=float)
fig, ax1 = plt.subplots()
plt.title(label1 + ' by ' + label2a + ' and ' + label2b)
clra = 'indigo'
ax1.set_xlabel(label1)
ax1.set_ylabel(label2a, color=clra)
ax1.scatter(df[col1], df[col2a], color=clra, marker='^')
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y1, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clra)
ax1.tick_params(axis='y', labelcolor=clra)
ax2 = ax1.twinx()
clrb = 'darkgreen'
ax2.set_ylabel(label2b, color=clrb)
ax2.scatter(df[col1], df[col2b], color=clrb)
ax2.tick_params(axis='y', labelcolor=clrb)
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y2, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clrb)
ax1.tick_params(axis='y', labelcolor=clra)
fig.tight_layout()
plt.savefig('an2_colour' + n + '.png')
plt.show()
def main():
data_file = 'wine.data'
dlm = get_delim(data_file)
my_data = parse_file(data_file, dlm)
data_dictionary = lines_to_dict(my_data)
plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',
'Alcohol', 'Flavonoids', '1')
plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',
'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')
if __name__ == '__main__':
main()
| print(
'========================================================================================'
)
print(
'========================================================================================'
)
print('> start of program an2_colour.py')
print('> import libraries')
import argparse
import os.path as op
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy.polynomial.polynomial import polyfit
print('> define convert_type function')
def convert_type(data_value):
try:
return int(data_value)
except ValueError:
try:
return float(data_value)
except ValueError:
return data_value
print('> define get_delim function')
def get_delim(sourcefile1):
print('> executing get_delim function')
data = open(sourcefile1, 'r')
my_read_data = data.read()
if my_read_data.find(',') > 0:
print(' delimiter: comma')
return ','
else:
print(' delimiter: space')
return ' '
print(' ')
def lines_to_dict(lines, header=False):
print('> executing lines_to_dict')
column_titles = ['class', 'alc', 'ma', 'ash', 'alkash', 'mg', 'tphen',
'flav', 'nfphen', 'pac', 'ci', 'hue', 'od', 'proline']
data_dict = {}
for idx, column in enumerate(column_titles):
data_dict[column] = []
for row in lines:
data_dict[column] += [row[idx]]
return data_dict
def parse_file(data_file, dlm, debug=False):
print('> executing parse_file')
assert op.isfile(data_file)
with open(data_file, 'r') as fhandle:
csv_reader = csv.reader(fhandle, delimiter=dlm)
lines = []
if debug:
count = 0
for line in csv_reader:
if debug:
if count > 2:
break
count += 1
newline = []
for value in line:
newline += [convert_type(value)]
if len(newline) > 0:
lines += [newline]
print('> view a few lines')
print(' ')
for line in lines[0:2]:
print(line)
print(' ')
return lines
def plot_data3(dd, col1, label1, col2a, col2b, label2a, label2b, n, debug=False
):
df = pd.DataFrame.from_dict(dd)
x = np.fromiter(dd[col1], dtype=float)
y1 = np.fromiter(dd[col2a], dtype=float)
y2 = np.fromiter(dd[col2b], dtype=float)
fig, ax1 = plt.subplots()
plt.title(label1 + ' by ' + label2a + ' and ' + label2b)
clra = 'indigo'
ax1.set_xlabel(label1)
ax1.set_ylabel(label2a, color=clra)
ax1.scatter(df[col1], df[col2a], color=clra, marker='^')
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y1, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clra)
ax1.tick_params(axis='y', labelcolor=clra)
ax2 = ax1.twinx()
clrb = 'darkgreen'
ax2.set_ylabel(label2b, color=clrb)
ax2.scatter(df[col1], df[col2b], color=clrb)
ax2.tick_params(axis='y', labelcolor=clrb)
xp = np.linspace(np.amin(x), np.amax(x), 100)
weights = np.polyfit(x, y2, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clrb)
ax1.tick_params(axis='y', labelcolor=clra)
fig.tight_layout()
plt.savefig('an2_colour' + n + '.png')
plt.show()
def main():
data_file = 'wine.data'
dlm = get_delim(data_file)
my_data = parse_file(data_file, dlm)
data_dictionary = lines_to_dict(my_data)
plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav',
'Alcohol', 'Flavonoids', '1')
plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od',
'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')
if __name__ == '__main__':
main()
| # program name: an2_colour.py
# no optional arguments: Uses Wine data to display information about the relationship of
# various attributes with colour and hue
print('========================================================================================')
print('========================================================================================')
print('> start of program an2_colour.py')
print('> import libraries')
import argparse
import os.path as op
import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from numpy.polynomial.polynomial import polyfit
print('> define convert_type function')
def convert_type(data_value):
try:
return int(data_value)
except ValueError:
try:
return float(data_value)
except ValueError:
return data_value
print("> define get_delim function")
def get_delim(sourcefile1):
print('> executing get_delim function')
data = open(sourcefile1, 'r')
my_read_data = data.read()
if my_read_data.find(',') > 0:
print(' delimiter: comma')
return ','
else:
print(' delimiter: space')
return ' '
print(' ')
def lines_to_dict(lines, header=False):
print('> executing lines_to_dict')
# column_titles = ['Class','Alcohol','Malic acid','Ash','Alcalinity of ash','Magnesium','Total phenols','Flavanoids','Nonflavanoid phenols','Proanthocyanins','Color intensity','Hue',
# 'OD280/OD315 of diluted wines','Proline']
column_titles = ['class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',
'od','proline']
data_dict = {}
for idx, column in enumerate(column_titles):
data_dict[column] = []
for row in lines:
data_dict[column] += [row[idx]]
return data_dict
def parse_file(data_file, dlm, debug=False): # took delimiter out
print('> executing parse_file')
# Verify the file exists
assert(op.isfile(data_file))
# open it as a csv
with open(data_file, 'r') as fhandle:
csv_reader = csv.reader(fhandle, delimiter=dlm)
# Add each line in the file to a list
lines = []
if debug:
count = 0
for line in csv_reader:
if debug:
if count > 2:
break
count += 1
newline = []
for value in line:
newline += [convert_type(value)]
if len(newline) > 0:
lines += [newline]
print('> view a few lines')
print(' ')
for line in lines[0:2]:
print(line)
print(' ')
# Return all the contents of our file
return lines
# class','alc','ma','ash','alkash','mg','tphen','flav','nfphen','pac','ci','hue',
# 'od','proline
def plot_data3(dd, col1, label1,
col2a, col2b,
label2a, label2b, n,
debug=False):
df = pd.DataFrame.from_dict(dd)
x = np.fromiter(dd[col1], dtype=float) # need these for the lines below
y1 = np.fromiter(dd[col2a], dtype=float)
y2 = np.fromiter(dd[col2b], dtype=float)
# print(df)
fig, ax1 = plt.subplots()
plt.title(label1 + ' by ' + label2a + ' and ' + label2b)
clra = 'indigo'
ax1.set_xlabel(label1)
ax1.set_ylabel(label2a, color=clra) # left side
ax1.scatter(df[col1], df[col2a], color=clra, marker = '^')
xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays
weights = np.polyfit(x, y1, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clra)
ax1.tick_params(axis='y', labelcolor=clra)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
clrb = 'darkgreen'
ax2.set_ylabel(label2b, color=clrb) # we already handled the x-label with ax1
# ax2.plot(df[col1], df[col2b], color=color)
ax2.scatter(df[col1], df[col2b], color= clrb)
ax2.tick_params(axis='y', labelcolor=clrb)
xp = np.linspace(np.amin(x), np.amax(x), 100) #only works for numpy arrays
weights = np.polyfit(x, y2, 1)
model = np.poly1d(weights)
plt.plot(xp, model(xp), '-', c=clrb)
ax1.tick_params(axis='y', labelcolor=clra)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.savefig('an2_colour' + n + '.png')
plt.show()
# Cases where there is a possible correlation with colour intensity or hue.
# color intensity:
# check against : alc, flav, od, proline
# hue:
# check against: ma, tphen, flav, pac, od
def main():
data_file = "wine.data"
dlm = get_delim(data_file)
my_data = parse_file(data_file, dlm)
data_dictionary = lines_to_dict(my_data)
#print(data_dictionary)
plot_data3(data_dictionary, 'ci', 'Colour Intensity', 'alc', 'flav', 'Alcohol', 'Flavonoids', '1')
plot_data3(data_dictionary, 'hue', 'Hue', 'pac', 'od', 'Proanthocyanidins', 'OD280/OD315 of Diluted Wines', '2')
if __name__ == "__main__":
main()
| [
5,
6,
7,
8,
9
] |
1,481 | b2cfd397e48213a540608fc232db2eab282935bb | <mask token>
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
<mask token>
<mask token>
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
| <mask token>
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
def get_ac(self, ob):
"""
ob is shape (n_dim,)
"""
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1, -1))
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
<mask token>
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
| <mask token>
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
def get_ac(self, ob):
"""
ob is shape (n_dim,)
"""
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1, -1))
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render=False):
next_ob = self.env.reset().reshape(1, -1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1, -1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
if done or timesteps > self.max_timesteps:
break
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)
), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)
), torch.tensor(res), torch.tensor(terminals)
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
| from os import path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions
import numpy as np
from torch.serialization import load
import global_var as gv
torch.set_default_dtype(gv.torch_default_type)
class PG_Agent(object):
def __init__(self, env, policy: torch.nn.modules.container.Sequential,
learning_rate: float, n_policy: int, n_episode: int, max_timesteps: int
) ->None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.
learning_rate)
def get_acs(self, obs):
"""
obs is shape (batch_size, n_dim)
"""
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs
def get_ac(self, ob):
"""
ob is shape (n_dim,)
"""
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1, -1))
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render=False):
next_ob = self.env.reset().reshape(1, -1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1, -1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
if done or timesteps > self.max_timesteps:
break
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)
), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)
), torch.tensor(res), torch.tensor(terminals)
def train(self):
"""
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
"""
for i_policy in range(self.n_policy):
J = 0
q = 0
for i_episode in range(self.n_episode):
obs, acs, next_obs, res, terminals = self.generate_episode()
assert len(obs) == len(next_obs) == len(res) == len(acs
) == len(terminals)
r_tau = sum(res)
logits = self.policy(obs)
criterion = nn.CrossEntropyLoss(reduction='sum')
negative_likelihoods = criterion(logits, acs)
negative_likelihoods = negative_likelihoods.sum()
J += negative_likelihoods * r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(
f'第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q / self.n_episode}'
)
J.backward()
self.optimizer.step()
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
| # 总管buffer和policy
from os import path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributions as distributions
import numpy as np
from torch.serialization import load
import global_var as gv
torch.set_default_dtype(gv.torch_default_type)
class PG_Agent(object):
def __init__(
self,
env,
policy: torch.nn.modules.container.Sequential,
learning_rate: float,
n_policy: int, # 迭代多少个策略
n_episode: int, # 每个策略下输出多少个episode用来更新该策略
max_timesteps: int # 最多一个episode多个步,免得一个很强的策略出来以后episode不终止了
) -> None:
super().__init__()
self.env = env
self.policy = policy
self.learning_rate = learning_rate
# self.buffer = buffer
self.n_policy = n_policy
self.n_episode = n_episode
self.max_timesteps = max_timesteps
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.learning_rate)
def get_acs(self, obs):
'''
obs is shape (batch_size, n_dim)
'''
logits = self.policy(obs)
acs = torch.argmax(logits, dim=1)
return acs # shape (batch_size,)
def get_ac(self, ob):
'''
ob is shape (n_dim,)
'''
if isinstance(ob, np.ndarray):
ob = torch.from_numpy(ob.astype(gv.np_default_type))
logits = self.policy(ob.view(1,-1))
# 按照概率分布来获取ac,而不是直接取较大Logit者,这里dubug了好久,烦烦烦
# ac = torch.argmax(logits)
distri = distributions.Categorical(logits=logits)
return distri.sample().item()
def generate_episode(self, render = False):
next_ob = self.env.reset().reshape(1,-1)
if render:
self.env.render()
timesteps = 0
obs = []
acs = []
next_obs = []
res = []
terminals = []
while True:
ob = next_ob
ac = self.get_ac(ob)
next_ob, re, done, info = self.env.step(ac)
if render:
self.env.render()
next_ob = next_ob.reshape(1,-1)
obs.append(ob)
acs.append(ac)
next_obs.append(next_ob)
res.append(re)
terminals.append(done)
# break
if done or timesteps > self.max_timesteps:
break
# print(acs, type(acs), 'acs')
return torch.from_numpy(np.concatenate(obs).astype(gv.np_default_type)), torch.tensor(acs), torch.from_numpy(np.concatenate(next_obs)), torch.tensor(res), torch.tensor(terminals)
def train(self):
'''
for _ in 轮数:
由于不知道如何处理不同的episode的timesteps不一样的问题,所以设置batch_size为1,每次只处理一个episode
# 那么也不需要buffer了
按照既有策略生成buffer
从buffer中获取数据
利用loss计算j tilder
求梯度
更新loss
'''
# print(self.policy.state_dict(), 'p1')
for i_policy in range(self.n_policy):
J = 0 # j tilda,也就是loss
q = 0
for i_episode in range(self.n_episode):
# 生成
obs, acs, next_obs, res, terminals = self.generate_episode()
# print(acs, acs.shape, 'acs')
assert(len(obs)==len(next_obs)==len(res)==len(acs)==len(terminals))
r_tau = sum(res)
logits = self.policy(obs)
# print(logits, logits.shape, 'logits')
# print(acs, type(acs))
criterion = nn.CrossEntropyLoss(reduction='sum') # 注意这里要选择sum才对,否则和policy gradient的公式并不一样,导致训练一直没有效果,难受啊,找了好久这个问题
negative_likelihoods = criterion(logits, acs)
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
negative_likelihoods = negative_likelihoods.sum()
# print(negative_likelihoods, negative_likelihoods.shape, 'negative_likelihoods')
# print(r_tau, 'r_tau')
J += negative_likelihoods*r_tau
q += res.sum().item()
J /= self.n_episode
self.optimizer.zero_grad()
print(f"第{i_policy}个策略的loss J tilda 为 {J.item()}, avg return >= {q/self.n_episode}") # 这里的loss估计不对,要用平均每次的
J.backward()
self.optimizer.step()
# print(self.policy.state_dict(), 'p2')
def save_policy(self, path='policy.pth'):
torch.save(self.policy, path)
def load_policy(self, path='policy.pth'):
self.policy = torch.load(path)
| [
6,
7,
8,
10,
11
] |
1,482 | f644ff322d1268092dbdcbfc1a3c76006424184b | <mask token>
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
<mask token>
<mask token>
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
<mask token>
| <mask token>
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])
<mask token>
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
<mask token>
| <mask token>
def create_test_board(size):
board = Board(size)
board[0, 0].state = ALIVE
board[0, 1].state = ALIVE
board[2, 1].state = ALIVE
return board
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])
def test_overpopulation(self):
init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
if __name__ == '__main__':
unittest.main()
| import unittest
from game_of_life.board import Board
from game_of_life.cell import Cell, ALIVE, DEAD
def create_test_board(size):
board = Board(size)
board[0, 0].state = ALIVE
board[0, 1].state = ALIVE
board[2, 1].state = ALIVE
return board
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,
ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])
def test_overpopulation(self):
init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
if __name__ == '__main__':
unittest.main()
| import unittest
from game_of_life.board import Board
from game_of_life.cell import Cell, ALIVE, DEAD
def create_test_board(size):
board = Board(size)
board[0, 0].state = ALIVE
board[0, 1].state = ALIVE
board[2, 1].state = ALIVE
return board
class BoardTests(unittest.TestCase):
def test_get_neighbours(self):
board = create_test_board(3)
self.assertListEqual(board.get_neighbour_states(1, 0), [
None, None, ALIVE,
ALIVE, DEAD,
ALIVE, DEAD, None
])
def test_simple_update(self):
alive_cells = [(0, 0), (1, 1), (0, 1)]
board = Board(3)
board.set_alive_cells(alive_cells)
board.update()
states = board.list_of_values
self.assertListEqual(states, [
[1, 1, 0],
[1, 1, 0],
[0, 0, 0]
])
def test_simple_update2(self):
init_config = [(0, 0), (0, 1), (0, 2)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [
[0, 1, 0],
[0, 1, 0],
[0, 0, 0]
])
def test_overpopulation(self):
init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]
board = Board(3)
board.set_alive_cells(init_config)
board.update()
states = board.list_of_values
self.assertListEqual(states, [
[1, 1, 1],
[1, 0, 1],
[1, 1, 1]
])
class CellTest(unittest.TestCase):
def test_is_alive(self):
alive_cell = Cell(ALIVE)
self.assertTrue(alive_cell.is_alive)
dead_cell = Cell(DEAD)
self.assertFalse(dead_cell.is_alive)
def test_create_life(self):
cell = Cell(DEAD)
neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours))
def test_will_not_be_born(self):
cell = Cell(DEAD)
neighbours = [1, 1, 0, 0, 0, 0, 0,]
self.assertFalse(cell.will_survive(neighbours))
neighbours = [1, 1, 1, 1, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_stay_alive(self):
cell = Cell(ALIVE)
neighbours2 = [1, 1, 0, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours2))
neighbours3 = [1, 1, 1, 0, 0, 0, None, None]
self.assertTrue(cell.will_survive(neighbours3))
def test_will_not_survive_overpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 1, 1, 1, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
def test_will_not_survive_underpopulation(self):
cell = Cell(ALIVE)
neighbours = [1, 0, 0, 0, 0, 0, 0]
self.assertFalse(cell.will_survive(neighbours))
if __name__ == '__main__':
unittest.main()
| [
10,
11,
14,
15,
16
] |
1,483 | 3461e9dceb2c0bfc49002809154f8be4cd8c66e2 | import dataset
import json
import gc
import os
jsonDir = "/home/jr/share/python/music-visualizer/merged"
db = dataset.connect('sqlite:///test.db')
table = db['Songs']
for root, subFolders, files in os.walk(jsonDir):
for f in files:
print("file:{}".format(f))
gc.collect()
tmpJson = json.load(open(os.path.join(root, f)))
for Artist in tmpJson:
for song in tmpJson[Artist]["Songs"]:
table.insert(song)
import urllib2
import json
import re
#in_artist
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
nodes = []
edges = []
anchor = "Rihanna"
q = [anchor]
while len(q) > 0:
art = q.pop(0)
#get song list
url = "http://10.104.246.185:5000/artist/"+art.replace(" ", "%20")
response = urllib2.urlopen(url)
dictionary = byteify(json.loads(response.read()))
songlist = []
if (dictionary):
lst = dictionary["Songs"]
for song in lst:
songlist.append(song["Title"])
for song in songlist:
#get string of featured artists
m = re.match('.+[fF]eat. ([^)(/]+)', song)
if m:
s = m.group(1)
#split into artists
lst = s.split(",")
lstend = (lst.pop()).split("&")
lst.extend(lstend)
for a in lst:
a = a.strip()
edges.append((art.strip(),a))
if nodes.count(a) == 0:
q.append(a)
for b in lst:
b = b.strip()
if a != b:
edges.append((a,b))
if nodes.count(art) == 0:
nodes.append(art.strip())
i = 0
j = 0
while i < len(edges)-1:
j = i+1
t1 = edges[i]
while j < len(edges):
t2 = edges[j]
if t1[0] == t2[0] and t1[1] == t2[1]:
edges.pop(j)
elif t1[1] == t2[0] and t1[0] == t2[1]:
edges.pop(j)
elif t2[0] == t2[1]:
edges.pop(j)
else:
j = j + 1
i = i + 1
print nodes
print edges
| null | null | null | null | [
0
] |
1,484 | af02cd0778e19df7b11145c4863776a1afd1cca6 | """ Implements BCFW for DIFFRAC objectives. """
import numpy as np
import os
from tqdm import tqdm
from numpy.linalg import norm as matrix_norm
import time
def get_feat_block(feats, block_idx, memory_mode, bias_value=-1.0):
"""Get feature for a given block."""
if memory_mode == 'RAM':
feat = feats[block_idx]
elif memory_mode == 'disk':
feat = np.load(feats[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
if bias_value > 0.0:
feat = np.append(
feat, bias_value * np.ones([feat.shape[0], 1]), axis=1)
return feat
def get_p_block(p_matrix, block_idx, memory_mode):
if memory_mode == 'RAM':
return p_matrix[block_idx]
elif memory_mode == 'disk':
return np.load(p_matrix[block_idx])
else:
raise ValueError(
'Memory mode {} is not supported.'.format(memory_mode))
def compute_p_matrix(feats, alpha, memory_mode, bias_value=-1.0):
"""Precompute the P dictionnary matrix."""
_, d = np.shape(
get_feat_block(feats, 0, memory_mode, bias_value=bias_value))
# Compute X^TX
print('Computing xtx...')
x_t_x = np.zeros([d, d])
N = 0
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
x_t_x += np.dot(np.transpose(x), x)
N += x.shape[0]
# Compute P
p_matrix = []
print('Inverting big matrix...')
inv_mat = np.linalg.inv(x_t_x + N * alpha * np.eye(d))
print('Computing P matrix by block...')
for i in tqdm(range(len(feats))):
x = get_feat_block(feats, i, memory_mode, bias_value=bias_value)
sol = np.dot(inv_mat, np.transpose(x))
if memory_mode == 'RAM':
p_matrix.append(np.array(sol))
else:
path_x = feats[i]
base_path, filename = os.path.split(path_x)
np.save(os.path.join(base_path, 'P_{}'.format(filename)), sol)
p_matrix.append(path_x)
return p_matrix, N
def compute_weights(p_matrix, asgn, memory_mode):
d, _ = np.shape(get_p_block(p_matrix, 0, memory_mode))
_, k = np.shape(asgn[0])
weights = np.zeros([d, k])
print('Computing weights from scratch...')
for i in tqdm(range(len(p_matrix))):
weights += np.dot(get_p_block(p_matrix, i, memory_mode), asgn[i])
return weights
def compute_obj(x, y, weights, n_feats):
return 1.0 / n_feats * matrix_norm(np.dot(x, weights) - y, ord='fro')**2
def compute_grad(x, y, weights, n_feats):
return 1.0 / n_feats * (y - np.dot(x, weights))
def compute_gap(x,
y,
weights,
n_feats,
cstr,
cstr_solver,
opt_y=None,
grad_y=None):
# Check if we need to call the oracle.
if opt_y is None:
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstr_solver.solve(cstr, grad_y)
gap = -np.multiply(opt_y - y, grad_y).sum()
return gap
def sample_block(gaps, block_sampling):
if block_sampling == 'uniform':
return np.random.randint(0, len(gaps), 1)[0]
elif block_sampling == 'gap_sampling':
if not np.all(gaps >= 0):
print('Warning: some gaps are negative block {}, value :{}'.format(
gaps.argmin(), gaps.min()))
gaps[gaps < 0] = 0.00000001
gap_prob = gaps / gaps.sum()
return np.random.choice(len(gaps), 1, p=gap_prob)[0]
def display_information(iter,
max_iter,
gaps,
eval_metric,
objective_value=None,
verbose='silent',
prev_time=-1,
prev_global_time=-1):
"""Display information about the training."""
if objective_value is None:
objective_value = []
if verbose in ['normal', 'heavy']:
string_display = 'Iteration {0:05d}/{1:05d}, Gap sum: {2:.4E}'.format(
iter, max_iter, gaps.sum())
new_time = time.time()
if prev_time > 0:
diff_time = int(round(new_time - prev_time))
string_display += ' ({:d} s)'.format(diff_time)
if prev_global_time > 0:
diff_time = int(round(new_time - prev_global_time))
string_display += ' (Glob. {:d} s)'.format(diff_time)
if eval_metric >= 0:
string_display += ', Eval metric: {:.2f}'.format(eval_metric)
if objective_value:
string_display += ', Objective: '
string_display += ','.join([
'{}: {:.4E}'.format(key, value)
for key, value in objective_value.items()
])
print(string_display)
def save_asgn_block(path_save_asgn, block_idx, asgn, t):
np.save(
os.path.join(path_save_asgn, '{0}_{1:05d}.npy'.format(block_idx, t)),
asgn[block_idx])
def save_xw_block(path_save_asgn, block_idx, x, weights, t):
np.save(
os.path.join(path_save_asgn, 'xw_{0}_{1:05d}.npy'.format(block_idx,
t)),
np.dot(x, weights))
def save_gt_block(path_save_asgn, block_idx, gts):
np.save(
os.path.join(path_save_asgn, '{}_gt.npy'.format(block_idx)),
gts[block_idx])
def solver(feats,
asgn,
cstrs,
cstrs_solver,
gts=None,
eval_function=None,
rounding_function=None,
alpha=1e-4,
memory_mode='RAM',
bias_value=-1.0,
n_iterations=10000,
block_sampling='uniform',
verbose='silent',
gap_frequency=2000,
eval_frequency=500,
verbose_frequency=250,
objective_frequency=250,
path_save_asgn=None,
validation_info=None):
"""Main solver for DiffracBCFW.
Args:
feats: Input features as a list (one entry per block).
asgn: Assignment variables as a list (one entry per block). This provides
the initialization of the system.
cstrs: Input constraints as a dictionary (one entry per block).
cstrs_solver: Method that takes as input a gradient for a block and a cstrs and then
returns the LP solution.
gts: A ground truth can be specified if you wish to evaluate your solution.
eval_function: an eval function method can be provided.
rounding_function: rounding function.
alpha: Value of the regularization parameter (lambda in the paper).
memory_mode: `disk` (features are stored in disk) or `RAM` (features are in RAM).
bias_value: Value to add for the bias (if negative no bias is added to the features).
n_iterations: Number of iterations of the solver.
block_sampling: Method for sampling block.
verbose: `silent`, `normal`, `heavy`.
gap_frequency: frequency to recompute all the gaps.
eval_frequency: frequency to perform evaluation.
verbose_frequency: frequency to print info.
objective_frequency: frequency to compute objective (only used if positive).
path_save_asgn: If not None save asgn at path_save_asgn. None by default.
validation_info: If not None perform evaluation on validation
"""
compute_objective = False
objective_value = None
if objective_frequency > 0:
compute_objective = True
save_asgn = False
save_ids = []
if path_save_asgn is not None:
if not os.path.exists(path_save_asgn):
os.makedirs(path_save_asgn)
# Monitor evolution of asgn during optim on a subset of samples.
save_asgn = True
n_save_asgn = min(20, len(asgn))
save_ids = np.random.choice(len(asgn), n_save_asgn, replace=False)
# Pre-compute the P matrix.
p_matrix, n_feats = compute_p_matrix(
feats, alpha, memory_mode, bias_value=bias_value)
# Compute W.
weights = compute_weights(p_matrix, asgn, memory_mode=memory_mode)
# Init the gaps.
gaps = np.zeros(len(feats))
print('Computing init gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights, n_feats,
cstrs[block_idx], cstrs_solver)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, 0)
save_xw_block(path_save_asgn, block_idx, x, weights, 0)
save_gt_block(path_save_asgn, block_idx, gts)
print('Init gap: {0:4E}, starting the optimization...'.format(gaps.sum()))
eval_metric = -1.0
prev_time = time.time() # init time of iterations
prev_global_time = prev_time
for t in range(n_iterations):
if eval_frequency > 0 and t % eval_frequency == 0:
# Evaluation.
if eval_function is not None and gts is not None:
print('Performing evaluation...')
eval_metric = eval_function.evaluate(asgn, gts, weights, feats,
rounding_function, cstrs)
if validation_info is not None:
gts_val = validation_info['gts']
feats_val = validation_info['feats']
eval_function.evaluate(None, gts_val, weights, feats_val,
rounding_function, None)
else:
eval_metric = -1.0
if compute_objective and t % objective_frequency == 0:
print('Computing objective...')
objective_value = {}
# Compute the diffrac objective.
dfrac_obj = 0.0
# Data dependent term: 1.0 / N * ||X * W - Y||_2^2
for block_idx in range(len(feats)):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
dfrac_obj += compute_obj(x, asgn[block_idx], weights, n_feats)
# Regularization term: \alpha * || W ||_2^2
dfrac_obj += alpha * matrix_norm(weights, ord='fro')**2
objective_value['dfrac'] = dfrac_obj
# Print information.
if t % verbose_frequency == 0:
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose, prev_time, prev_global_time)
prev_time = time.time()
# Sample a block.
block_idx = sample_block(gaps, block_sampling)
# Compute gradient.
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
y = asgn[block_idx]
grad_y = compute_grad(x, y, weights, n_feats)
opt_y = cstrs_solver.solve(cstrs[block_idx], grad_y)
gaps[block_idx] = compute_gap(x, y, weights, n_feats,
cstrs[block_idx], cstrs_solver,
opt_y, grad_y)
# Step size computation.
p = get_p_block(p_matrix, block_idx, memory_mode)
dir_y = opt_y - y
gamma_n = gaps[block_idx]
gamma_d = 1.0 / n_feats * np.multiply(
dir_y, dir_y - np.linalg.multi_dot([x, p, dir_y])).sum()
gamma = min(1.0, gamma_n / gamma_d)
# gamma should always be positive.
if gamma < 0:
print 'Warning: gamma = {}, gap_i = {}'.format(
gamma, gaps[block_idx])
gamma = 0.0
# Update variables.
asgn[block_idx] += gamma * dir_y
weights += gamma * np.dot(p, dir_y)
if save_asgn and block_idx in save_ids:
save_asgn_block(path_save_asgn, block_idx, asgn, t)
save_xw_block(path_save_asgn, block_idx, x, weights, t)
# Update gaps if needed.
if (t + 1) % gap_frequency == 0:
print('Recomputing gaps...')
for block_idx in tqdm(range(len(feats))):
x = get_feat_block(
feats, block_idx, memory_mode, bias_value=bias_value)
gaps[block_idx] = compute_gap(x, asgn[block_idx], weights,
n_feats, cstrs[block_idx],
cstrs_solver)
display_information(t, n_iterations, gaps, eval_metric,
objective_value, verbose)
return asgn, weights
| null | null | null | null | [
0
] |
1,485 | ca93f49fbdc1d64e0616bca035a6043b3cc80ddc | <mask token>
| <mask token>
def inception_2d_fields(img, fields, num_classes=30, is_training=True,
dropout_keep_prob=0.6, prediction_fn=layers_lib.softmax,
spatial_squeeze=True, reuse=None, scope='InceptionV1_Fields'):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.2),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002)):
net, end_points = inception_2d.inception_v1_base(img, scope=scope,
final_endpoint='Mixed_4b')
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope=
'AvgPool_0a_5x5')
net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
net = array_ops.squeeze(net, [1, 2], name='Squeeze4Fields')
net = tf.concat([net, fields], axis=1)
net = layers.fully_connected(inputs=net, num_outputs=1024)
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b'
)
logits = layers.fully_connected(inputs=net, num_outputs=
num_classes, activation_fn=None, weights_initializer=tf.
contrib.layers.xavier_initializer(), biases_initializer=tf.
constant_initializer(0.0), weights_regularizer=regularizers
.l2_regularizer(0.0002), biases_regularizer=regularizers.
l2_regularizer(0.0002), scope='InnerProduct')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name=
'SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope=
'Predictions')
return logits, end_points
| import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
import inception_2d
def inception_2d_fields(img, fields, num_classes=30, is_training=True,
dropout_keep_prob=0.6, prediction_fn=layers_lib.softmax,
spatial_squeeze=True, reuse=None, scope='InceptionV1_Fields'):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.2),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002)):
net, end_points = inception_2d.inception_v1_base(img, scope=scope,
final_endpoint='Mixed_4b')
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope=
'AvgPool_0a_5x5')
net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
net = array_ops.squeeze(net, [1, 2], name='Squeeze4Fields')
net = tf.concat([net, fields], axis=1)
net = layers.fully_connected(inputs=net, num_outputs=1024)
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b'
)
logits = layers.fully_connected(inputs=net, num_outputs=
num_classes, activation_fn=None, weights_initializer=tf.
contrib.layers.xavier_initializer(), biases_initializer=tf.
constant_initializer(0.0), weights_regularizer=regularizers
.l2_regularizer(0.0002), biases_regularizer=regularizers.
l2_regularizer(0.0002), scope='InnerProduct')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name=
'SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope=
'Predictions')
return logits, end_points
| import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
import inception_2d
def inception_2d_fields(img,
fields,
num_classes=30,
is_training=True,
dropout_keep_prob=0.6,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1_Fields'
):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.2),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002)):
net, end_points = inception_2d.inception_v1_base(img, scope=scope, final_endpoint='Mixed_4b')
with variable_scope.variable_scope('Logits'):
net = layers_lib.avg_pool2d(net, [5, 5], stride=3, scope='AvgPool_0a_5x5')
net = layers.conv2d(inputs=net, num_outputs=128, kernel_size=1)
net = tf.reshape(net, [-1, 1, 1, 4 * 4 * 128])
net = array_ops.squeeze(net,[1,2],name='Squeeze4Fields')
net = tf.concat([net,fields],axis=1)
net = layers.fully_connected(inputs=net, num_outputs=1024)
net = layers_lib.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = layers.fully_connected(inputs=net,
num_outputs=num_classes,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.constant_initializer(0.0),
weights_regularizer=regularizers.l2_regularizer(0.0002),
biases_regularizer=regularizers.l2_regularizer(0.0002),
scope='InnerProduct')
# logits = layers.conv2d(
# net,
# num_classes, [1, 1],
# activation_fn=None,
# normalizer_fn=None,
# scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
| null | [
0,
1,
2,
3
] |
1,486 | 5c174dd514d0a7d9aa932fcb436f22d9a44d2327 | <mask token>
| <mask token>
def raiz(numero):
casas_decimais = 18
if numero == 0 or numero == 1:
return 'O resultado eh: ' + str(numero)
elif numero < 0:
return 'A raiz nao existe no conjunto real'
else:
posicao = 0
casa_decimal = 10 ** posicao
resultado_parcial = 0.0
while -posicao != casas_decimais + 1:
if (resultado_parcial + casa_decimal + casa_decimal * 1
) ** 2 < numero:
casa_decimal += 10 ** posicao * 1
resultado_parcial += casa_decimal
else:
posicao -= 1
casa_decimal = 10 ** posicao
return resultado_parcial
| '''
Aluno: Lucas Airam Castro de Souza
Resumo: Programa para calcular a raiz com a precisão n de casas decimais
def raiz(numero, casas_decimais=0):
if ((numero == 0) or (numero == 1)):
return "O resultado eh: " + str(numero)
elif (numero<0):
return "A raiz nao existe no conjunto real"
else:
resultado = [0]
if (casas_decimais == 0):
while True:
if ((resultado[0]+1)**2<numero):
resultado[0]+=1
else:
return resultado[0]
else:
for cont in range(casas_decimais):
resultado+=[0]
primeiro_numero=0
while True:
if ((primeiro_numero+1)**2<numero):
primeiro_numero+=1
else:
resultado[0]=str(primeiro_numero)
casas_corretas = 1
while (casas_corretas < len(resultado)):
cont1=0
while ((cont1 < 10) and (cont1 < len(resultado))):
numero_parcial = ""
print resultado
for cont2 in range(casas_corretas):
numero_parcial+=str(resultado[cont2])
print resultado
print resultado [1]
print resultado[cont2]
if ((int(numero_parcial)+1)**2<numero):
cont3 = int(resultado[casas_corretas])+1
resultado[casas_corretas]=str(cont3)
else:
resultado[casas_corretas]=str(cont1)
casas_corretas+=1
cont1+=1
resultado_final = ""
for cont4 in range(casas_corretas):
resultado_final+=resultado[cont4]
return int(resultado_final)
'''
def raiz(numero):
casas_decimais=18
if ((numero == 0) or (numero == 1)):
return "O resultado eh: " + str(numero)
elif (numero<0):
return "A raiz nao existe no conjunto real"
else:
posicao = 0
casa_decimal = 10**posicao
resultado_parcial = 0.0
while (-posicao != casas_decimais+1):
# print 'resultado: ' +str(resultado_parcial)
# print 'casa decimal: ' +str(casa_decimal)
# print 'posicao: ' +str(posicao)
if ((resultado_parcial+casa_decimal+(casa_decimal*1))**2 < numero):
casa_decimal+=(10**posicao)*1
resultado_parcial+=casa_decimal
else:
posicao-=1
casa_decimal=10**posicao
return resultado_parcial
| null | null | [
0,
1,
2
] |
1,487 | dbc3e51fed63fe0fadea67d05c4b4efc693938a3 | <mask token>
| <mask token>
while test_case != 0:
test_case -= 1
n, m = map(int, input().split())
ans = n * m
A = []
for i in range(n):
t = list(map(int, input().split()))
A.append(t)
for i in range(1, n - 1):
for j in range(1, m - 1):
k = 1
while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:
l = A[i][j - k]
r = A[i][j + k]
u = A[i - k][j]
d = A[i + k][j]
if l == r and u == d:
ans += 1
else:
break
k += 1
print(ans)
| test_case = int(input())
while test_case != 0:
test_case -= 1
n, m = map(int, input().split())
ans = n * m
A = []
for i in range(n):
t = list(map(int, input().split()))
A.append(t)
for i in range(1, n - 1):
for j in range(1, m - 1):
k = 1
while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:
l = A[i][j - k]
r = A[i][j + k]
u = A[i - k][j]
d = A[i + k][j]
if l == r and u == d:
ans += 1
else:
break
k += 1
print(ans)
| test_case = int(input())
while test_case != 0:
test_case -= 1
(n, m) = map(int, input().split())
ans = n * m
A = []
for i in range(n):
t = list(map(int, input().split()))
A.append(t)
for i in range(1, n - 1):
for j in range(1, m - 1):
k = 1
while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:
l = A[i][j - k]
r = A[i][j + k]
u = A[i - k][j]
d = A[i + k][j]
if l == r and u == d:
ans += 1
else:
break
k += 1
print(ans)
| null | [
0,
1,
2,
3
] |
1,488 | c9f3e956d4016846c8efe0382b79882559d6ce64 | <mask token>
| <mask token>
print(max((y - x + 9) // 10, 0))
| x, y = map(int, input().split())
print(max((y - x + 9) // 10, 0))
| null | null | [
0,
1,
2
] |
1,489 | ba54b3a148a34ced74a337665ddd5f2d9084553b | <mask token>
| <mask token>
with open('yelp_review.csv', encoding='utf8') as csvfile:
wordFrequencies = defaultdict(int)
def beautifyDate(res):
dt = time.strptime(res, '%Y-%m-%d')
return calendar.timegm(dt)
def getAsciiFriendlyString(text, wordFrequencies):
"""
Things to note about the code: this code include punctuation and immediately adds non ASCII
friendly into the <unk> pile
"""
strings = text.lower()
strings = strings.split(' ')
for wrd in strings:
try:
wrd = re.sub(pattern, '', wrd)
wrd.encode('ascii')
wordFrequencies[wrd] += 1
except UnicodeEncodeError:
wordFrequencies['<unk>'] += 1
toyTrain = open('100k_numDate_train.csv', 'w')
toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=
csv.QUOTE_MINIMAL, lineterminator='\n')
print('Creating List....')
readCSV = list(csv.reader(csvfile, delimiter=','))
print('Finished creating list....')
print('Number of examples:', len(readCSV))
excludeSet = {REVIEW_ID_COL}
fieldNames = readCSV[0]
print(fieldNames)
readForOneHot = readCSV[1:]
print('Going through the words for the frequencies.')
for row in readForOneHot:
getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)
print(len(readForOneHot))
print('creating file with word frequencies')
wrdFrq = open('yelp_word_frequencies.csv', 'w')
wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting
=csv.QUOTE_MINIMAL)
wrdFrqWriter.writerow(['word', 'frequency'])
for wrd in wordFrequencies:
wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])
| <mask token>
REVIEW_ID_COL = 0
USER_ID_COL = 1
BUSINESS_ID_COL = 2
STARS_COL = 3
DATE_COL = 4
TEXT_COL = 5
USEFUL_COL = 6
FUNNY_COL = 7
COOL_COL = 8
pattern = re.compile('\\W')
with open('yelp_review.csv', encoding='utf8') as csvfile:
wordFrequencies = defaultdict(int)
def beautifyDate(res):
dt = time.strptime(res, '%Y-%m-%d')
return calendar.timegm(dt)
def getAsciiFriendlyString(text, wordFrequencies):
"""
Things to note about the code: this code include punctuation and immediately adds non ASCII
friendly into the <unk> pile
"""
strings = text.lower()
strings = strings.split(' ')
for wrd in strings:
try:
wrd = re.sub(pattern, '', wrd)
wrd.encode('ascii')
wordFrequencies[wrd] += 1
except UnicodeEncodeError:
wordFrequencies['<unk>'] += 1
toyTrain = open('100k_numDate_train.csv', 'w')
toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=
csv.QUOTE_MINIMAL, lineterminator='\n')
print('Creating List....')
readCSV = list(csv.reader(csvfile, delimiter=','))
print('Finished creating list....')
print('Number of examples:', len(readCSV))
excludeSet = {REVIEW_ID_COL}
fieldNames = readCSV[0]
print(fieldNames)
readForOneHot = readCSV[1:]
print('Going through the words for the frequencies.')
for row in readForOneHot:
getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)
print(len(readForOneHot))
print('creating file with word frequencies')
wrdFrq = open('yelp_word_frequencies.csv', 'w')
wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting
=csv.QUOTE_MINIMAL)
wrdFrqWriter.writerow(['word', 'frequency'])
for wrd in wordFrequencies:
wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])
| <mask token>
import csv
import time, datetime
import calendar
from collections import defaultdict
import chardet
import re
REVIEW_ID_COL = 0
USER_ID_COL = 1
BUSINESS_ID_COL = 2
STARS_COL = 3
DATE_COL = 4
TEXT_COL = 5
USEFUL_COL = 6
FUNNY_COL = 7
COOL_COL = 8
pattern = re.compile('\\W')
with open('yelp_review.csv', encoding='utf8') as csvfile:
wordFrequencies = defaultdict(int)
def beautifyDate(res):
dt = time.strptime(res, '%Y-%m-%d')
return calendar.timegm(dt)
def getAsciiFriendlyString(text, wordFrequencies):
"""
Things to note about the code: this code include punctuation and immediately adds non ASCII
friendly into the <unk> pile
"""
strings = text.lower()
strings = strings.split(' ')
for wrd in strings:
try:
wrd = re.sub(pattern, '', wrd)
wrd.encode('ascii')
wordFrequencies[wrd] += 1
except UnicodeEncodeError:
wordFrequencies['<unk>'] += 1
toyTrain = open('100k_numDate_train.csv', 'w')
toyWriter = csv.writer(toyTrain, delimiter=',', quotechar='|', quoting=
csv.QUOTE_MINIMAL, lineterminator='\n')
print('Creating List....')
readCSV = list(csv.reader(csvfile, delimiter=','))
print('Finished creating list....')
print('Number of examples:', len(readCSV))
excludeSet = {REVIEW_ID_COL}
fieldNames = readCSV[0]
print(fieldNames)
readForOneHot = readCSV[1:]
print('Going through the words for the frequencies.')
for row in readForOneHot:
getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)
print(len(readForOneHot))
print('creating file with word frequencies')
wrdFrq = open('yelp_word_frequencies.csv', 'w')
wrdFrqWriter = csv.writer(wrdFrq, delimiter=',', quotechar='|', quoting
=csv.QUOTE_MINIMAL)
wrdFrqWriter.writerow(['word', 'frequency'])
for wrd in wordFrequencies:
wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])
| """
This file goes through the data to find the frequencies of words in the corpus
"""
import csv
import time, datetime
import calendar
from collections import defaultdict
import chardet
import re
REVIEW_ID_COL = 0;
USER_ID_COL = 1
BUSINESS_ID_COL = 2
STARS_COL = 3
DATE_COL = 4
TEXT_COL = 5
USEFUL_COL = 6
FUNNY_COL = 7
COOL_COL = 8
pattern = re.compile('\W')
with open("yelp_review.csv", encoding="utf8") as csvfile:
wordFrequencies = defaultdict(int)
def beautifyDate(res):
# This function returns a floating point that gives the UTC
# print (res)
dt = time.strptime(res, '%Y-%m-%d')
return calendar.timegm(dt)
def getAsciiFriendlyString(text, wordFrequencies):
"""
Things to note about the code: this code include punctuation and immediately adds non ASCII
friendly into the <unk> pile
"""
strings = text.lower()
strings = strings.split(" ")
for wrd in strings:
try:
wrd = re.sub(pattern, '', wrd)
#print (wrd)
wrd.encode('ascii')
wordFrequencies[wrd] += 1
except UnicodeEncodeError:
#print (":( ", wrd)
wordFrequencies["<unk>"] += 1
#getAsciiFriendlyString("mooing!@ cows are the best", wordFrequencies)
#print (len(wordFrequencies))
#for wrd in wordFrequencies:
#print (wrd, wordFrequencies[wrd])
#wrdFrqWriter.writerow([wrd])
toyTrain = open("100k_numDate_train.csv", 'w')
toyWriter = csv.writer(toyTrain, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator = '\n')
print ("Creating List....")
readCSV = list(csv.reader(csvfile, delimiter=','))
print ("Finished creating list....")
print ("Number of examples:", len(readCSV))
excludeSet = {REVIEW_ID_COL};
fieldNames = readCSV[0]
print(fieldNames)
readForOneHot = readCSV[1:]
print ("Going through the words for the frequencies.")
# Go through the set, finding the frequencies
for row in readForOneHot:
getAsciiFriendlyString(row[TEXT_COL], wordFrequencies)
print (len(readForOneHot))
# Write the frequencies to a file (so we don't have to do this again.....)
print ("creating file with word frequencies")
wrdFrq = open("yelp_word_frequencies.csv", 'w')
wrdFrqWriter = csv.writer(wrdFrq, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
wrdFrqWriter.writerow(["word", "frequency"])
for wrd in wordFrequencies:
wrdFrqWriter.writerow([wrd, wordFrequencies[wrd]])
| [
0,
1,
2,
3,
4
] |
1,490 | 46d85a3babab4b18f4e0e0384f254f6105cf691d | <mask token>
def upgrade():
op.drop_column('lis_result_sourcedid',
'tool_consumer_info_product_family_code')
<mask token>
| <mask token>
def upgrade():
op.drop_column('lis_result_sourcedid',
'tool_consumer_info_product_family_code')
def downgrade():
op.add_column('lis_result_sourcedid', sa.Column(
'tool_consumer_info_product_family_code', sa.TEXT(), autoincrement=
False, nullable=True))
| <mask token>
revision = '106d94be7705'
down_revision = '973c9358b616'
def upgrade():
op.drop_column('lis_result_sourcedid',
'tool_consumer_info_product_family_code')
def downgrade():
op.add_column('lis_result_sourcedid', sa.Column(
'tool_consumer_info_product_family_code', sa.TEXT(), autoincrement=
False, nullable=True))
| <mask token>
import sqlalchemy as sa
from alembic import op
revision = '106d94be7705'
down_revision = '973c9358b616'
def upgrade():
op.drop_column('lis_result_sourcedid',
'tool_consumer_info_product_family_code')
def downgrade():
op.add_column('lis_result_sourcedid', sa.Column(
'tool_consumer_info_product_family_code', sa.TEXT(), autoincrement=
False, nullable=True))
| """
Remove tool_consumer_info_product_family_code from GradingInfo.
Revision ID: 106d94be7705
Revises: 973c9358b616
Create Date: 2023-07-06 11:23:10.850486
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "106d94be7705"
down_revision = "973c9358b616"
def upgrade():
op.drop_column("lis_result_sourcedid", "tool_consumer_info_product_family_code")
def downgrade():
op.add_column(
"lis_result_sourcedid",
sa.Column(
"tool_consumer_info_product_family_code",
sa.TEXT(),
autoincrement=False,
nullable=True,
),
)
| [
1,
2,
3,
4,
5
] |
1,491 | 51642dbb210600f9ca4e035fb884fbdda030fd04 | <mask token>
| <mask token>
def registry_names():
return iter(_registry)
| <mask token>
def registry(name):
_registry.append(name)
def registry_names():
return iter(_registry)
| _registry = []
def registry(name):
_registry.append(name)
def registry_names():
return iter(_registry)
| null | [
0,
1,
2,
3
] |
1,492 | 5b919bde9f4fe1da867695ece58f151abb9b70fb | import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense,Activation,Dropout
from keras.optimizers import SGD,Adam,RMSprop
from keras.utils import np_utils
x_train=np.loadtxt('Xtrain.txt',delimiter=' ')
x_test=np.loadtxt('Xtest.txt',delimiter=' ')
sigmoid_output=np.loadtxt('sigmoid_MSE.txt',delimiter=' ')
softplus_output=np.loadtxt('softplus_MSE.txt',delimiter=' ')
sigmoid_mean=np.mean(sigmoid_output,axis=0)
softplus_mean=np.mean(softplus_output,axis=0)
plt.plot(np.arange(1,11), sigmoid_mean,'r')
plt.plot(np.arange(1,11), softplus_mean,'g')
plt.title('Model Select')
plt.xlabel('Number of Perceptrons')
plt.ylabel('Mean Squared Error')
plt.legend(['Sigmoid', 'Softplus'])
plt.show()
if np.amin(sigmoid_mean)>np.amin(softplus_mean):
activation_function="softplus"
n_perceptron=np.argmin(softplus_mean)+1
else:
activation_function="sigmoid"
n_perceptron=np.argmin(sigmoid_mean)+1
print "the number of perceptron of best perform model: ",n_perceptron
print "the activation function of best perform model: ",activation_function
model = Sequential([Dense(n_perceptron,input_dim=1,activation=activation_function),
Dense(1,input_dim=n_perceptron,activation = None)
])
model.compile(optimizer='adam',loss='mean_squared_error')
#Begin training with best perform model
converged=0
tolerance=0.001
last_score=0
while converged==0:
model.fit(x_train[:,0], x_train[:,1], epochs = 20, batch_size=64, verbose = 0)
score = model.evaluate(x_test[:,0], x_test[:,1])
print np.abs(score-last_score)
print score
if np.abs(score-last_score)<tolerance:
converged=1
last_score=score
print "MSE of test dataset with best model: ",last_score
y_predict=model.predict(x_test[:,0])
plt.subplot(121)
plt.plot(x_test[:,0], y_predict,'.r')
plt.title('Predict Distribution')
plt.xlabel('x1 value')
plt.ylabel('predict x2 value')
plt.subplot(122)
plt.plot(x_test[:,0], x_test[:,1],'.r')
plt.title('True Distribution')
plt.xlabel('x1 value')
plt.ylabel('x2 value')
plt.show()
| null | null | null | null | [
0
] |
1,493 | ac5c4edda8a5df7abc030fd637866fa4c8fc4bfc | <mask token>
class UtilTestCase(TestCase):
<mask token>
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
<mask token>
| <mask token>
class UtilTestCase(TestCase):
<mask token>
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
| <mask token>
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
| from django.test import TestCase
from django_mptt_admin.util import get_tree_queryset, get_javascript_value
from ..models import Country
from .utils import read_testdata
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, node_id=Country.objects.get(name=
'Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
| # coding=utf-8
from django.test import TestCase
from django_mptt_admin.util import get_tree_queryset, get_javascript_value
from ..models import Country
from .utils import read_testdata
class UtilTestCase(TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
read_testdata()
def test_get_tree_queryset(self):
# get default queryset
qs = get_tree_queryset(Country)
self.assertEqual(len(qs), 257)
self.assertEqual(qs[0].name, 'root')
# subtree
qs = get_tree_queryset(Country, node_id=Country.objects.get(name='Europe').id)
self.assertEqual(len(qs), 50)
self.assertEqual(qs[0].name, u'Åland Islands')
# max_level 1
qs = get_tree_queryset(Country, max_level=1)
self.assertEqual(len(qs), 8)
self.assertEqual(qs[0].name, 'root')
# max_level True
qs = get_tree_queryset(Country, max_level=True)
self.assertEqual(len(qs), 8)
# exclude root
qs = get_tree_queryset(Country, include_root=False)
self.assertEqual(len(qs), 256)
self.assertEqual(qs[0].name, 'Africa')
def test_get_javascript_value(self):
self.assertEqual(get_javascript_value(True), 'true')
self.assertEqual(get_javascript_value(False), 'false')
self.assertEqual(get_javascript_value(10), '10')
| [
2,
3,
4,
5,
6
] |
1,494 | 08420d31713859946b2f19cebf68c333331cb80e | <mask token>
| <mask token>
def glInitYuvTargetEXT():
"""Return boolean indicating whether this extension is available"""
from OpenGL import extensions
return extensions.hasGLExtension(_EXTENSION_NAME)
| <mask token>
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.YUV_target import *
from OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME
def glInitYuvTargetEXT():
"""Return boolean indicating whether this extension is available"""
from OpenGL import extensions
return extensions.hasGLExtension(_EXTENSION_NAME)
| '''OpenGL extension EXT.YUV_target
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.YUV_target to provide a more
Python-friendly API
Overview (from the spec)
This extension adds support for three new YUV related items: first
rendering to YUV images, second sampling from YUV images while keeping the
data in YUV space, third it defines a new built in function that does
conversion from RGB to YUV with controls to choose ITU-R BT.601-7,
ITU-R BT.601-7 Full range (JFIF images), or ITU-R BT.709-5 standard.
This new functionality is layered on top of the OES_EGL_image_external
extension.
To perform the YUV rendering capability in this extension an application
will attach a texture to the framebuffer object as the color attachment.
If the texture has a target type of TEXTURE_EXTERNAL_OES with YUV color
format then the GL driver can use this framebuffer object as the render
target, TEXTURE_EXTERNAL_OES target with RGB color format are not allowed
with this extension.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/YUV_target.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.YUV_target import *
from OpenGL.raw.GLES2.EXT.YUV_target import _EXTENSION_NAME
def glInitYuvTargetEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | null | [
0,
1,
2,
3
] |
1,495 | e4422010337eade12226d84c79532cdbcae68d67 | <mask token>
class IssueCreateSerializer(serializers.ModelSerializer):
<mask token>
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'project',
'email']
class IssueStatusSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['impact', 'angle', 'name']
| <mask token>
class IssueSerializer(serializers.ModelSerializer):
<mask token>
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'
]
class IssueCreateSerializer(serializers.ModelSerializer):
"""DRF Serializer Fpr Creating Issues By The User"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'project',
'email']
class IssueStatusSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['impact', 'angle', 'name']
| <mask token>
class IssueSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'
]
class IssueCreateSerializer(serializers.ModelSerializer):
"""DRF Serializer Fpr Creating Issues By The User"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'project',
'email']
class IssueStatusSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['impact', 'angle', 'name']
| from rest_framework import serializers
from issue.models import Issue
class IssueSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'published_on'
]
class IssueCreateSerializer(serializers.ModelSerializer):
"""DRF Serializer Fpr Creating Issues By The User"""
class Meta:
model = Issue
fields = ['issueName', 'website', 'issueBody', 'impact', 'project',
'email']
class IssueStatusSerializer(serializers.ModelSerializer):
"""DRF Serializer For Listing Published Issue"""
class Meta:
model = Issue
fields = ['impact', 'angle', 'name']
| null | [
3,
5,
6,
7
] |
1,496 | 7159b447ed6fcb2005f63c7b7359970defbc9d43 | <mask token>
class Message(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Message(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __unicode__(self):
return self.text + ' : ' + str(self.votes) + ' : ' + str(self.
date_added) + ' : ' + str(self.score) + ' : ' + str(self.next_vote
) + '\n'
| <mask token>
class Message(models.Model):
text = models.CharField(max_length=200)
votes = models.IntegerField()
date_added = models.DateTimeField(default=datetime.now)
score = models.BigIntegerField()
next_vote = models.IntegerField(default=3600)
def __unicode__(self):
return self.text + ' : ' + str(self.votes) + ' : ' + str(self.
date_added) + ' : ' + str(self.score) + ' : ' + str(self.next_vote
) + '\n'
| from django.db import models
from datetime import datetime
class Message(models.Model):
text = models.CharField(max_length=200)
votes = models.IntegerField()
date_added = models.DateTimeField(default=datetime.now)
score = models.BigIntegerField()
next_vote = models.IntegerField(default=3600)
def __unicode__(self):
return self.text + ' : ' + str(self.votes) + ' : ' + str(self.
date_added) + ' : ' + str(self.score) + ' : ' + str(self.next_vote
) + '\n'
| from django.db import models
from datetime import datetime
class Message(models.Model):
text = models.CharField(max_length=200)
votes = models.IntegerField()
date_added = models.DateTimeField(default=datetime.now)
score = models.BigIntegerField()
next_vote = models.IntegerField(default=3600) # 86400 seconds in a day
def __unicode__(self):
return self.text + ' : '+ str(self.votes) + ' : '+str(self.date_added) + ' : ' + str(self.score) + ' : '+str(self.next_vote) + '\n'
| [
1,
2,
3,
4,
5
] |
1,497 | 8d9f4bce998857bcc7bc2fda0b519f370bf957fe | <mask token>
class Vowel(object):
<mask token>
<mask token>
| <mask token>
class Vowel(object):
def __init__(self, vowels):
self.vowels = vowels
self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
for j in range(len(self.list)):
if self.vowels == self.list[j]:
print('The vowel is ', self.list[j])
else:
continue
<mask token>
| class MainInit(object):
<mask token>
class Vowel(object):
def __init__(self, vowels):
self.vowels = vowels
self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
for j in range(len(self.list)):
if self.vowels == self.list[j]:
print('The vowel is ', self.list[j])
else:
continue
<mask token>
| class MainInit(object):
def __init__(self):
self.vowel = str(input('Please type the character: \n'))
if len(self.vowel) > 1:
print('Invalid number of character')
else:
Vowel(self.vowel)
class Vowel(object):
def __init__(self, vowels):
self.vowels = vowels
self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
for j in range(len(self.list)):
if self.vowels == self.list[j]:
print('The vowel is ', self.list[j])
else:
continue
MainInit()
| # Identify a vowel
class MainInit(object):
def __init__(self):
self.vowel = str(input("Please type the character: \n"))
if len(self.vowel) > 1:
print("Invalid number of character")
else:
Vowel(self.vowel)
class Vowel(object):
def __init__(self, vowels):
self.vowels = vowels
self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
for j in range(len(self.list)):
if self.vowels == self.list[j]:
print("The vowel is ", self.list[j])
else:
continue
MainInit()
#
#
# class MainVowel(object):
# def __init__(self):
# string = str(input("Please type the character: \n"))
# if len(string) > 1:
# print("Invalid number of character")
# else:
# VerifyVowel(string)
#
#
# class VerifyVowel(object):
# def __init__(self, string):
# self.string = string
# if len(string) > 1:
# print("Invalid number of character")
# else:
# if string == 'A' or string == 'a':
# print("The vowel is: ", string)
# elif string == 'E' or string == 'e':
# print("The vowel is: ", string)
# elif string == 'I' or string == 'i':
# print("The vowel is: ", string)
# elif string == 'O' or string == 'o':
# print("The vowel is: ", string)
# elif string == 'U' or string == 'u':
# print("The vowel is: ", string)
# else:
# print("No valid")
#
#
# MainVowel()
| [
1,
2,
3,
5,
6
] |
1,498 | 9fea76b1612bd02f512072692090f8ef60e8a0fe | <mask token>
| from .exenv import *
| null | null | null | [
0,
1
] |
1,499 | bacaaf5c91232d85f451c2c17a42cd2ec6966684 | <mask token>
| <mask token>
for i in range(0, len(data[0, :]) - 3):
for j in range(0, len(data[0, :]) - 3):
product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,
i + 3]
if product_hor > max_product_hor:
max_product_hor = product_hor
<mask token>
for i in range(0, len(data[:, 0]) - 3):
for j in range(0, len(data[:, 0]) - 3):
product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +
3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
<mask token>
for j in range(0, len(data[0, :]) - 3):
for i in range(0, len(data[0, :]) - 3):
product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2
] * data[i + 3, j + 3]
if product_dia > max_product_dia:
max_product_dia = product_dia
<mask token>
for j in range(0, len(data[0, :]) - 3):
for i in range(2, len(data[0, :]) - 1):
product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2
] * data[i - 3, j + 3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
<mask token>
print('The greatest product in the same direction is {}.'.format(int(
max_value)))
| <mask token>
data = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')
max_product_hor = 0
for i in range(0, len(data[0, :]) - 3):
for j in range(0, len(data[0, :]) - 3):
product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,
i + 3]
if product_hor > max_product_hor:
max_product_hor = product_hor
max_product_ver = 0
for i in range(0, len(data[:, 0]) - 3):
for j in range(0, len(data[:, 0]) - 3):
product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +
3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
max_product_dia = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(0, len(data[0, :]) - 3):
product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2
] * data[i + 3, j + 3]
if product_dia > max_product_dia:
max_product_dia = product_dia
max_product_dia_2 = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(2, len(data[0, :]) - 1):
product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2
] * data[i - 3, j + 3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
max_value = max(max_product_hor, max_product_ver, max_product_dia,
max_product_dia_2)
print('The greatest product in the same direction is {}.'.format(int(
max_value)))
| import numpy as np
data = np.genfromtxt('problem_11_matrix.txt', delimiter=' ')
max_product_hor = 0
for i in range(0, len(data[0, :]) - 3):
for j in range(0, len(data[0, :]) - 3):
product_hor = data[j, i] * data[j, i + 1] * data[j, i + 2] * data[j,
i + 3]
if product_hor > max_product_hor:
max_product_hor = product_hor
max_product_ver = 0
for i in range(0, len(data[:, 0]) - 3):
for j in range(0, len(data[:, 0]) - 3):
product_ver = data[i, j] * data[i + 1, j] * data[i + 2, j] * data[i +
3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
max_product_dia = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(0, len(data[0, :]) - 3):
product_dia = data[i, j] * data[i + 1, j + 1] * data[i + 2, j + 2
] * data[i + 3, j + 3]
if product_dia > max_product_dia:
max_product_dia = product_dia
max_product_dia_2 = 0
for j in range(0, len(data[0, :]) - 3):
for i in range(2, len(data[0, :]) - 1):
product_dia_2 = data[i, j] * data[i - 1, j + 1] * data[i - 2, j + 2
] * data[i - 3, j + 3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
max_value = max(max_product_hor, max_product_ver, max_product_dia,
max_product_dia_2)
print('The greatest product in the same direction is {}.'.format(int(
max_value)))
| # In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
# The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
# What is the greatest product of four adjacent numbers in the same direction
# (up, down, left, right, or diagonally) in the 20×20 grid?
import numpy as np
data = np.genfromtxt("problem_11_matrix.txt", delimiter=" ")
# find greatest product horizontally
max_product_hor = 0
for i in range(0, len(data[0, :])-3):
for j in range(0, len(data[0, :])-3):
product_hor = data[j, i] * data[j, i+1] * data[j, i+2] * data[j, i+3]
if product_hor > max_product_hor:
max_product_hor = product_hor
# print("The greatest product horizontally is {}. " .format(max_product_hor))
# find greatest product vertically
max_product_ver = 0
for i in range(0, len(data[:, 0])-3):
for j in range(0, len(data[:, 0])-3):
product_ver = data[i, j] * data[i+1, j] * data[i+2, j] * data[i+3, j]
if product_ver > max_product_ver:
max_product_ver = product_ver
# print("The greatest product vertically is {}. " .format(max_product_ver))
# find greatest product diagonally
max_product_dia = 0
for j in range(0, len(data[0, :])-3):
for i in range(0, len(data[0, :])-3):
product_dia = data[i, j] * data[i+1, j+1] * data[i+2, j+2] * data[i+3, j+3]
if product_dia > max_product_dia:
max_product_dia = product_dia
# print("The greatest product diagonally is {}. " .format(max_product_dia))
max_product_dia_2 = 0
for j in range(0, len(data[0, :])-3):
for i in range(2, len(data[0, :])-1):
product_dia_2 = data[i, j] * data[i-1, j+1] * data[i-2, j+2] * data[i-3, j+3]
if product_dia_2 > max_product_dia_2:
max_product_dia_2 = product_dia_2
# print("The greatest product diagonally is {}. " .format(max_product_dia_2))
max_value = max(max_product_hor, max_product_ver, max_product_dia, max_product_dia_2)
print("The greatest product in the same direction is {}." .format(int(max_value)))
| [
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.