body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def mouseReleaseEvent(self, event):
'\n Safe Feed\n '
pass | 2,897,160,408,821,979,600 | Safe Feed | classes/jogwidget.py | mouseReleaseEvent | comgram/gerbil_gui | python | def mouseReleaseEvent(self, event):
'\n \n '
pass |
def run(seed=543, data_path='/tmp/cifar10', output_path='/tmp/output-cifar10/', model='resnet18', batch_size=512, momentum=0.9, weight_decay=0.0001, num_workers=12, num_epochs=24, learning_rate=0.4, num_warmup_epochs=4, validate_every=3, checkpoint_every=200, backend=None, resume_from=None, log_every_iters=15, nproc_per_node=None, stop_iteration=None, with_trains=False, **spawn_kwargs):
'Main entry to train an model on CIFAR10 dataset.\n\n Args:\n seed (int): random state seed to set. Default, 543.\n data_path (str): input dataset path. Default, "/tmp/cifar10".\n output_path (str): output path. Default, "/tmp/output-cifar10".\n model (str): model name (from torchvision) to setup model to train. Default, "resnet18".\n batch_size (int): total batch size. Default, 512.\n momentum (float): optimizer\'s momentum. Default, 0.9.\n weight_decay (float): weight decay. Default, 1e-4.\n num_workers (int): number of workers in the data loader. Default, 12.\n num_epochs (int): number of epochs to train the model. Default, 24.\n learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.\n num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.\n validate_every (int): run model\'s validation every ``validate_every`` epochs. Default, 3.\n checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.\n backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",\n "gloo" etc. Default, None.\n nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,\n when main python process is spawning training as child processes.\n resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.\n log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.\n It can be 0 to disable it. Default, 15.\n stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.\n with_trains (bool): if True, experiment Trains logger is setup. Default, False.\n **spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes\n\n '
config = locals()
config.update(config['spawn_kwargs'])
del config['spawn_kwargs']
spawn_kwargs['nproc_per_node'] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config) | -2,428,794,729,123,700,000 | Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes | examples/contrib/cifar10/main.py | run | HelioStrike/ignite | python | def run(seed=543, data_path='/tmp/cifar10', output_path='/tmp/output-cifar10/', model='resnet18', batch_size=512, momentum=0.9, weight_decay=0.0001, num_workers=12, num_epochs=24, learning_rate=0.4, num_warmup_epochs=4, validate_every=3, checkpoint_every=200, backend=None, resume_from=None, log_every_iters=15, nproc_per_node=None, stop_iteration=None, with_trains=False, **spawn_kwargs):
'Main entry to train an model on CIFAR10 dataset.\n\n Args:\n seed (int): random state seed to set. Default, 543.\n data_path (str): input dataset path. Default, "/tmp/cifar10".\n output_path (str): output path. Default, "/tmp/output-cifar10".\n model (str): model name (from torchvision) to setup model to train. Default, "resnet18".\n batch_size (int): total batch size. Default, 512.\n momentum (float): optimizer\'s momentum. Default, 0.9.\n weight_decay (float): weight decay. Default, 1e-4.\n num_workers (int): number of workers in the data loader. Default, 12.\n num_epochs (int): number of epochs to train the model. Default, 24.\n learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.\n num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.\n validate_every (int): run model\'s validation every ``validate_every`` epochs. Default, 3.\n checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.\n backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",\n "gloo" etc. Default, None.\n nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,\n when main python process is spawning training as child processes.\n resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.\n log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.\n It can be 0 to disable it. Default, 15.\n stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.\n with_trains (bool): if True, experiment Trains logger is setup. Default, False.\n **spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes\n\n '
config = locals()
config.update(config['spawn_kwargs'])
del config['spawn_kwargs']
spawn_kwargs['nproc_per_node'] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config) |
def test_lump_init(ctx_factory):
'\n Simple test to check that Lump initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=[(0.0,), ((- 5.0),)], b=[(10.0,), (5.0,)], n=((nel_1d,) * dim))
order = 3
logger.info(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
center = np.zeros(shape=(dim,))
velocity = np.zeros(shape=(dim,))
center[0] = 5
velocity[0] = 1
lump = Lump(center=center, velocity=velocity)
lump_soln = lump(0, nodes)
cv = split_conserved(dim, lump_soln)
p = (0.4 * (cv.energy - ((0.5 * np.dot(cv.momentum, cv.momentum)) / cv.mass)))
exp_p = 1.0
errmax = discr.norm((p - exp_p), np.inf)
logger.info(f'lump_soln = {lump_soln}')
logger.info(f'pressure = {p}')
assert (errmax < 1e-15) | -774,467,605,370,207,400 | Simple test to check that Lump initializer
creates the expected solution field. | test/test_init.py | test_lump_init | anderson2981/mirgecom | python | def test_lump_init(ctx_factory):
'\n Simple test to check that Lump initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=[(0.0,), ((- 5.0),)], b=[(10.0,), (5.0,)], n=((nel_1d,) * dim))
order = 3
logger.info(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
center = np.zeros(shape=(dim,))
velocity = np.zeros(shape=(dim,))
center[0] = 5
velocity[0] = 1
lump = Lump(center=center, velocity=velocity)
lump_soln = lump(0, nodes)
cv = split_conserved(dim, lump_soln)
p = (0.4 * (cv.energy - ((0.5 * np.dot(cv.momentum, cv.momentum)) / cv.mass)))
exp_p = 1.0
errmax = discr.norm((p - exp_p), np.inf)
logger.info(f'lump_soln = {lump_soln}')
logger.info(f'pressure = {p}')
assert (errmax < 1e-15) |
def test_vortex_init(ctx_factory):
'\n Simple test to check that Vortex2D initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=[(0.0,), ((- 5.0),)], b=[(10.0,), (5.0,)], n=((nel_1d,) * dim))
order = 3
logger.info(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
vortex = Vortex2D()
vortex_soln = vortex(0, nodes)
gamma = 1.4
cv = split_conserved(dim, vortex_soln)
p = (0.4 * (cv.energy - ((0.5 * np.dot(cv.momentum, cv.momentum)) / cv.mass)))
exp_p = (cv.mass ** gamma)
errmax = discr.norm((p - exp_p), np.inf)
logger.info(f'vortex_soln = {vortex_soln}')
logger.info(f'pressure = {p}')
assert (errmax < 1e-15) | 5,725,629,722,062,752,000 | Simple test to check that Vortex2D initializer
creates the expected solution field. | test/test_init.py | test_vortex_init | anderson2981/mirgecom | python | def test_vortex_init(ctx_factory):
'\n Simple test to check that Vortex2D initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
dim = 2
nel_1d = 4
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=[(0.0,), ((- 5.0),)], b=[(10.0,), (5.0,)], n=((nel_1d,) * dim))
order = 3
logger.info(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
vortex = Vortex2D()
vortex_soln = vortex(0, nodes)
gamma = 1.4
cv = split_conserved(dim, vortex_soln)
p = (0.4 * (cv.energy - ((0.5 * np.dot(cv.momentum, cv.momentum)) / cv.mass)))
exp_p = (cv.mass ** gamma)
errmax = discr.norm((p - exp_p), np.inf)
logger.info(f'vortex_soln = {vortex_soln}')
logger.info(f'pressure = {p}')
assert (errmax < 1e-15) |
def test_shock_init(ctx_factory):
'\n Simple test to check that Shock1D initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
dim = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=[(0.0,), (1.0,)], b=[((- 0.5),), (0.5,)], n=((nel_1d,) * dim))
order = 3
print(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
initr = SodShock1D()
initsoln = initr(t=0.0, x_vec=nodes)
print('Sod Soln:', initsoln)
xpl = 1.0
xpr = 0.1
tol = 1e-15
nodes_x = nodes[0]
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
assert (discr.norm(actx.np.where((nodes_x < 0.5), (p - xpl), (p - xpr)), np.inf) < tol) | -983,791,910,415,503,500 | Simple test to check that Shock1D initializer
creates the expected solution field. | test/test_init.py | test_shock_init | anderson2981/mirgecom | python | def test_shock_init(ctx_factory):
'\n Simple test to check that Shock1D initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
dim = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=[(0.0,), (1.0,)], b=[((- 0.5),), (0.5,)], n=((nel_1d,) * dim))
order = 3
print(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
initr = SodShock1D()
initsoln = initr(t=0.0, x_vec=nodes)
print('Sod Soln:', initsoln)
xpl = 1.0
xpr = 0.1
tol = 1e-15
nodes_x = nodes[0]
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
assert (discr.norm(actx.np.where((nodes_x < 0.5), (p - xpl), (p - xpr)), np.inf) < tol) |
@pytest.mark.parametrize('dim', [1, 2, 3])
def test_uniform(ctx_factory, dim):
'\n Simple test to check that Uniform initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=(((- 0.5),) * dim), b=((0.5,) * dim), n=((nel_1d,) * dim))
order = 1
print(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f'DIM = {dim}, {len(nodes)}')
print(f'Nodes={nodes}')
from mirgecom.initializers import Uniform
initr = Uniform(numdim=dim)
initsoln = initr(t=0.0, x_vec=nodes)
tol = 1e-15
ssoln = split_conserved(dim, initsoln)
assert (discr.norm((ssoln.mass - 1.0), np.inf) < tol)
assert (discr.norm((ssoln.energy - 2.5), np.inf) < tol)
print(f'Uniform Soln:{initsoln}')
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
print(f'Press:{p}')
assert (discr.norm((p - 1.0), np.inf) < tol) | 7,036,261,238,489,417,000 | Simple test to check that Uniform initializer
creates the expected solution field. | test/test_init.py | test_uniform | anderson2981/mirgecom | python | @pytest.mark.parametrize('dim', [1, 2, 3])
def test_uniform(ctx_factory, dim):
'\n Simple test to check that Uniform initializer\n creates the expected solution field.\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 2
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=(((- 0.5),) * dim), b=((0.5,) * dim), n=((nel_1d,) * dim))
order = 1
print(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f'DIM = {dim}, {len(nodes)}')
print(f'Nodes={nodes}')
from mirgecom.initializers import Uniform
initr = Uniform(numdim=dim)
initsoln = initr(t=0.0, x_vec=nodes)
tol = 1e-15
ssoln = split_conserved(dim, initsoln)
assert (discr.norm((ssoln.mass - 1.0), np.inf) < tol)
assert (discr.norm((ssoln.energy - 2.5), np.inf) < tol)
print(f'Uniform Soln:{initsoln}')
eos = IdealSingleGas()
cv = split_conserved(dim, initsoln)
p = eos.pressure(cv)
print(f'Press:{p}')
assert (discr.norm((p - 1.0), np.inf) < tol) |
@pytest.mark.parametrize('dim', [1, 2, 3])
def test_pulse(ctx_factory, dim):
'\n Test of Gaussian pulse generator.\n If it looks, walks, and quacks like a duck, then ...\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=(((- 0.5),) * dim), b=((0.5,) * dim), n=((nel_1d,) * dim))
order = 1
print(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f'DIM = {dim}, {len(nodes)}')
print(f'Nodes={nodes}')
tol = 1e-15
from mirgecom.initializers import _make_pulse
amp = 1.0
w = 0.1
rms2 = (w * w)
r0 = np.zeros(dim)
r2 = (np.dot(nodes, nodes) / rms2)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
print(f'Pulse = {pulse}')
pulse_check = actx.np.exp(((- 0.5) * r2))
print(f'exact: {pulse_check}')
pulse_resid = (pulse - pulse_check)
print(f'pulse residual: {pulse_resid}')
assert (discr.norm(pulse_resid, np.inf) < tol)
amp = 2.0
pulse = 0
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
pulse_resid = (pulse - (pulse_check + pulse_check))
assert (discr.norm(pulse_resid, np.inf) < tol)
amp = 1.0
rcheck = (np.sqrt(2.0) * nodes)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)
assert (discr.norm((pulse - (pulse_check * pulse_check)), np.inf) < tol)
w = (w / np.sqrt(2.0))
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
assert (discr.norm((pulse - (pulse_check * pulse_check)), np.inf) < tol) | 852,771,810,507,177,200 | Test of Gaussian pulse generator.
If it looks, walks, and quacks like a duck, then ... | test/test_init.py | test_pulse | anderson2981/mirgecom | python | @pytest.mark.parametrize('dim', [1, 2, 3])
def test_pulse(ctx_factory, dim):
'\n Test of Gaussian pulse generator.\n If it looks, walks, and quacks like a duck, then ...\n '
cl_ctx = ctx_factory()
queue = cl.CommandQueue(cl_ctx)
actx = PyOpenCLArrayContext(queue)
nel_1d = 10
from meshmode.mesh.generation import generate_regular_rect_mesh
mesh = generate_regular_rect_mesh(a=(((- 0.5),) * dim), b=((0.5,) * dim), n=((nel_1d,) * dim))
order = 1
print(f'Number of elements: {mesh.nelements}')
discr = EagerDGDiscretization(actx, mesh, order=order)
nodes = thaw(actx, discr.nodes())
print(f'DIM = {dim}, {len(nodes)}')
print(f'Nodes={nodes}')
tol = 1e-15
from mirgecom.initializers import _make_pulse
amp = 1.0
w = 0.1
rms2 = (w * w)
r0 = np.zeros(dim)
r2 = (np.dot(nodes, nodes) / rms2)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
print(f'Pulse = {pulse}')
pulse_check = actx.np.exp(((- 0.5) * r2))
print(f'exact: {pulse_check}')
pulse_resid = (pulse - pulse_check)
print(f'pulse residual: {pulse_resid}')
assert (discr.norm(pulse_resid, np.inf) < tol)
amp = 2.0
pulse = 0
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
pulse_resid = (pulse - (pulse_check + pulse_check))
assert (discr.norm(pulse_resid, np.inf) < tol)
amp = 1.0
rcheck = (np.sqrt(2.0) * nodes)
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=rcheck)
assert (discr.norm((pulse - (pulse_check * pulse_check)), np.inf) < tol)
w = (w / np.sqrt(2.0))
pulse = _make_pulse(amp=amp, r0=r0, w=w, r=nodes)
assert (discr.norm((pulse - (pulse_check * pulse_check)), np.inf) < tol) |
def _to_container(cfg):
'\n mmdet will assert the type of dict/list.\n So convert omegaconf objects to dict/list.\n '
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg) | 5,467,841,335,562,911,000 | mmdet will assert the type of dict/list.
So convert omegaconf objects to dict/list. | detectron2/modeling/mmdet_wrapper.py | _to_container | KnightOfTheMoonlight/visdom4detectron2 | python | def _to_container(cfg):
'\n mmdet will assert the type of dict/list.\n So convert omegaconf objects to dict/list.\n '
if isinstance(cfg, DictConfig):
cfg = OmegaConf.to_container(cfg, resolve=True)
from mmcv.utils import ConfigDict
return ConfigDict(cfg) |
def __init__(self, backbone: Union[(nn.Module, Mapping)], neck: Union[(nn.Module, Mapping, None)]=None, *, pretrained_backbone: Optional[str]=None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]]=None):
'\n Args:\n backbone: either a backbone module or a mmdet config dict that defines a\n backbone. The backbone takes a 4D image tensor and returns a\n sequence of tensors.\n neck: either a backbone module or a mmdet config dict that defines a\n neck. The neck takes outputs of backbone and returns a\n sequence of tensors. If None, no neck is used.\n pretrained_backbone: defines the backbone weights that can be loaded by\n mmdet, such as "torchvision://resnet50".\n output_shapes: shape for every output of the backbone (or neck, if given).\n stride and channels are often needed.\n output_names: names for every output of the backbone (or neck, if given).\n By default, will use "out0", "out1", ...\n '
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
logger.info(f'Initializing mmdet backbone weights: {pretrained_backbone} ...')
self.backbone.init_weights(pretrained_backbone)
self.backbone.train()
if (self.neck is not None):
logger.info('Initializing mmdet neck weights ...')
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if (not output_names):
output_names = [f'out{i}' for i in range(len(output_shapes))]
self._output_names = output_names | 1,076,643,239,655,832,400 | Args:
backbone: either a backbone module or a mmdet config dict that defines a
backbone. The backbone takes a 4D image tensor and returns a
sequence of tensors.
neck: either a backbone module or a mmdet config dict that defines a
neck. The neck takes outputs of backbone and returns a
sequence of tensors. If None, no neck is used.
pretrained_backbone: defines the backbone weights that can be loaded by
mmdet, such as "torchvision://resnet50".
output_shapes: shape for every output of the backbone (or neck, if given).
stride and channels are often needed.
output_names: names for every output of the backbone (or neck, if given).
By default, will use "out0", "out1", ... | detectron2/modeling/mmdet_wrapper.py | __init__ | KnightOfTheMoonlight/visdom4detectron2 | python | def __init__(self, backbone: Union[(nn.Module, Mapping)], neck: Union[(nn.Module, Mapping, None)]=None, *, pretrained_backbone: Optional[str]=None, output_shapes: List[ShapeSpec], output_names: Optional[List[str]]=None):
'\n Args:\n backbone: either a backbone module or a mmdet config dict that defines a\n backbone. The backbone takes a 4D image tensor and returns a\n sequence of tensors.\n neck: either a backbone module or a mmdet config dict that defines a\n neck. The neck takes outputs of backbone and returns a\n sequence of tensors. If None, no neck is used.\n pretrained_backbone: defines the backbone weights that can be loaded by\n mmdet, such as "torchvision://resnet50".\n output_shapes: shape for every output of the backbone (or neck, if given).\n stride and channels are often needed.\n output_names: names for every output of the backbone (or neck, if given).\n By default, will use "out0", "out1", ...\n '
super().__init__()
if isinstance(backbone, Mapping):
from mmdet.models import build_backbone
backbone = build_backbone(_to_container(backbone))
self.backbone = backbone
if isinstance(neck, Mapping):
from mmdet.models import build_neck
neck = build_neck(_to_container(neck))
self.neck = neck
logger.info(f'Initializing mmdet backbone weights: {pretrained_backbone} ...')
self.backbone.init_weights(pretrained_backbone)
self.backbone.train()
if (self.neck is not None):
logger.info('Initializing mmdet neck weights ...')
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.neck.train()
self._output_shapes = output_shapes
if (not output_names):
output_names = [f'out{i}' for i in range(len(output_shapes))]
self._output_names = output_names |
def __init__(self, detector: Union[(nn.Module, Mapping)], *, size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float]):
'\n Args:\n detector: a mmdet detector, or a mmdet config dict that defines a detector.\n size_divisibility: pad input images to multiple of this number\n pixel_mean: per-channel mean to normalize input image\n pixel_std: per-channel stddev to normalize input image\n '
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer('pixel_mean', torch.tensor(pixel_mean).view((- 1), 1, 1), False)
self.register_buffer('pixel_std', torch.tensor(pixel_std).view((- 1), 1, 1), False)
assert (self.pixel_mean.shape == self.pixel_std.shape), f'{self.pixel_mean} and {self.pixel_std} have different shapes!' | 8,680,013,071,488,949,000 | Args:
detector: a mmdet detector, or a mmdet config dict that defines a detector.
size_divisibility: pad input images to multiple of this number
pixel_mean: per-channel mean to normalize input image
pixel_std: per-channel stddev to normalize input image | detectron2/modeling/mmdet_wrapper.py | __init__ | KnightOfTheMoonlight/visdom4detectron2 | python | def __init__(self, detector: Union[(nn.Module, Mapping)], *, size_divisibility=32, pixel_mean: Tuple[float], pixel_std: Tuple[float]):
'\n Args:\n detector: a mmdet detector, or a mmdet config dict that defines a detector.\n size_divisibility: pad input images to multiple of this number\n pixel_mean: per-channel mean to normalize input image\n pixel_std: per-channel stddev to normalize input image\n '
super().__init__()
if isinstance(detector, Mapping):
from mmdet.models import build_detector
detector = build_detector(_to_container(detector))
self.detector = detector
self.size_divisibility = size_divisibility
self.register_buffer('pixel_mean', torch.tensor(pixel_mean).view((- 1), 1, 1), False)
self.register_buffer('pixel_std', torch.tensor(pixel_std).view((- 1), 1, 1), False)
assert (self.pixel_mean.shape == self.pixel_std.shape), f'{self.pixel_mean} and {self.pixel_std} have different shapes!' |
def handle_message(self, message):
'Respond to the user message by retriving documents from the knowledge base. \n \n Args:\n message ([type]): [description]\n '
query = message.text
(candidates, similarities) = self.vector_index.retrieve(query, self.topk)
selected = [candidate for (candidate, similarity) in zip(candidates, similarities) if (similarity >= self.threshold)]
result = {}
if selected:
documents = self.es_index.get_answer_by_question_ids(selected)
response = self.template.render(documents=documents)
result['response'] = BotMessage(message.sender_id, response.strip())
else:
selected = [candidate for (candidate, similarity) in zip(candidates, similarities) if (similarity >= self.prompt_threshold)]
if selected:
documents = self.es_index.get_documents_by_ids(selected)
prompt = self.prompt_template.render(documents=documents)
result['prompt'] = BotMessage(message.sender_id, prompt.strip())
return result | 4,608,976,265,523,703,000 | Respond to the user message by retriving documents from the knowledge base.
Args:
message ([type]): [description] | Broca/faq_engine/agent.py | handle_message | lawRossi/Broca | python | def handle_message(self, message):
'Respond to the user message by retriving documents from the knowledge base. \n \n Args:\n message ([type]): [description]\n '
query = message.text
(candidates, similarities) = self.vector_index.retrieve(query, self.topk)
selected = [candidate for (candidate, similarity) in zip(candidates, similarities) if (similarity >= self.threshold)]
result = {}
if selected:
documents = self.es_index.get_answer_by_question_ids(selected)
response = self.template.render(documents=documents)
result['response'] = BotMessage(message.sender_id, response.strip())
else:
selected = [candidate for (candidate, similarity) in zip(candidates, similarities) if (similarity >= self.prompt_threshold)]
if selected:
documents = self.es_index.get_documents_by_ids(selected)
prompt = self.prompt_template.render(documents=documents)
result['prompt'] = BotMessage(message.sender_id, prompt.strip())
return result |
def _deep_update_type_hint(node: Node, type_hint: Any) -> None:
'Ensure node is compatible with type_hint, mutating if necessary.'
from omegaconf import DictConfig, ListConfig
from ._utils import get_dict_key_value_types, get_list_element_type
if (type_hint is Any):
return
_shallow_validate_type_hint(node, type_hint)
(new_is_optional, new_ref_type) = _resolve_optional(type_hint)
node._metadata.ref_type = new_ref_type
node._metadata.optional = new_is_optional
if (is_list_annotation(new_ref_type) and isinstance(node, ListConfig)):
new_element_type = get_list_element_type(new_ref_type)
node._metadata.element_type = new_element_type
if (not _is_special(node)):
for i in range(len(node)):
_deep_update_subnode(node, i, new_element_type)
if (is_dict_annotation(new_ref_type) and isinstance(node, DictConfig)):
(new_key_type, new_element_type) = get_dict_key_value_types(new_ref_type)
node._metadata.key_type = new_key_type
node._metadata.element_type = new_element_type
if (not _is_special(node)):
for key in node:
if ((new_key_type is not Any) and (not isinstance(key, new_key_type))):
raise KeyValidationError((f'Key {key!r} ({type(key).__name__}) is incompatible' + f" with key type hint '{new_key_type.__name__}'"))
_deep_update_subnode(node, key, new_element_type) | 7,486,164,471,862,268,000 | Ensure node is compatible with type_hint, mutating if necessary. | omegaconf/basecontainer.py | _deep_update_type_hint | gwenzek/omegaconf | python | def _deep_update_type_hint(node: Node, type_hint: Any) -> None:
from omegaconf import DictConfig, ListConfig
from ._utils import get_dict_key_value_types, get_list_element_type
if (type_hint is Any):
return
_shallow_validate_type_hint(node, type_hint)
(new_is_optional, new_ref_type) = _resolve_optional(type_hint)
node._metadata.ref_type = new_ref_type
node._metadata.optional = new_is_optional
if (is_list_annotation(new_ref_type) and isinstance(node, ListConfig)):
new_element_type = get_list_element_type(new_ref_type)
node._metadata.element_type = new_element_type
if (not _is_special(node)):
for i in range(len(node)):
_deep_update_subnode(node, i, new_element_type)
if (is_dict_annotation(new_ref_type) and isinstance(node, DictConfig)):
(new_key_type, new_element_type) = get_dict_key_value_types(new_ref_type)
node._metadata.key_type = new_key_type
node._metadata.element_type = new_element_type
if (not _is_special(node)):
for key in node:
if ((new_key_type is not Any) and (not isinstance(key, new_key_type))):
raise KeyValidationError((f'Key {key!r} ({type(key).__name__}) is incompatible' + f" with key type hint '{new_key_type.__name__}'"))
_deep_update_subnode(node, key, new_element_type) |
def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None:
'Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary.'
subnode = node._get_node(key)
assert isinstance(subnode, Node)
if _is_special(subnode):
node._wrap_value_and_set(key, subnode._value(), value_type_hint)
subnode = node._get_node(key)
assert isinstance(subnode, Node)
_deep_update_type_hint(subnode, value_type_hint) | 3,256,522,286,330,759,700 | Get node[key] and ensure it is compatible with value_type_hint, mutating if necessary. | omegaconf/basecontainer.py | _deep_update_subnode | gwenzek/omegaconf | python | def _deep_update_subnode(node: BaseContainer, key: Any, value_type_hint: Any) -> None:
subnode = node._get_node(key)
assert isinstance(subnode, Node)
if _is_special(subnode):
node._wrap_value_and_set(key, subnode._value(), value_type_hint)
subnode = node._get_node(key)
assert isinstance(subnode, Node)
_deep_update_type_hint(subnode, value_type_hint) |
def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None:
"Error if node's type, content and metadata are not compatible with type_hint."
from omegaconf import DictConfig, ListConfig, ValueNode
(is_optional, ref_type) = _resolve_optional(type_hint)
vk = get_value_kind(node)
if node._is_none():
if (not is_optional):
value = _get_value(node)
raise ValidationError((f'Value {value!r} ({type(value).__name__})' + f" is incompatible with type hint '{ref_type.__name__}'"))
return
elif (vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION)):
return
elif (vk == ValueKind.VALUE):
if (is_primitive_type(ref_type) and isinstance(node, ValueNode)):
value = node._value()
if (not isinstance(value, ref_type)):
raise ValidationError((f'Value {value!r} ({type(value).__name__})' + f" is incompatible with type hint '{ref_type.__name__}'"))
elif (is_structured_config(ref_type) and isinstance(node, DictConfig)):
return
elif (is_dict_annotation(ref_type) and isinstance(node, DictConfig)):
return
elif (is_list_annotation(ref_type) and isinstance(node, ListConfig)):
return
elif isinstance(node, ValueNode):
value = node._value()
raise ValidationError((f'Value {value!r} ({type(value).__name__})' + f" is incompatible with type hint '{ref_type}'"))
else:
raise ValidationError((f"'{type(node).__name__}' is incompatible" + f" with type hint '{ref_type}'"))
else:
assert False | -2,014,395,401,024,706,800 | Error if node's type, content and metadata are not compatible with type_hint. | omegaconf/basecontainer.py | _shallow_validate_type_hint | gwenzek/omegaconf | python | def _shallow_validate_type_hint(node: Node, type_hint: Any) -> None:
from omegaconf import DictConfig, ListConfig, ValueNode
(is_optional, ref_type) = _resolve_optional(type_hint)
vk = get_value_kind(node)
if node._is_none():
if (not is_optional):
value = _get_value(node)
raise ValidationError((f'Value {value!r} ({type(value).__name__})' + f" is incompatible with type hint '{ref_type.__name__}'"))
return
elif (vk in (ValueKind.MANDATORY_MISSING, ValueKind.INTERPOLATION)):
return
elif (vk == ValueKind.VALUE):
if (is_primitive_type(ref_type) and isinstance(node, ValueNode)):
value = node._value()
if (not isinstance(value, ref_type)):
raise ValidationError((f'Value {value!r} ({type(value).__name__})' + f" is incompatible with type hint '{ref_type.__name__}'"))
elif (is_structured_config(ref_type) and isinstance(node, DictConfig)):
return
elif (is_dict_annotation(ref_type) and isinstance(node, DictConfig)):
return
elif (is_list_annotation(ref_type) and isinstance(node, ListConfig)):
return
elif isinstance(node, ValueNode):
value = node._value()
raise ValidationError((f'Value {value!r} ({type(value).__name__})' + f" is incompatible with type hint '{ref_type}'"))
else:
raise ValidationError((f"'{type(node).__name__}' is incompatible" + f" with type hint '{ref_type}'"))
else:
assert False |
def _resolve_with_default(self, key: Union[(DictKeyType, int)], value: Node, default_value: Any=_DEFAULT_MARKER_) -> Any:
"returns the value with the specified key, like obj.key and obj['key']"
if _is_missing_value(value):
if (default_value is not _DEFAULT_MARKER_):
return default_value
raise MissingMandatoryValue('Missing mandatory value: $FULL_KEY')
resolved_node = self._maybe_resolve_interpolation(parent=self, key=key, value=value, throw_on_resolution_failure=True)
return _get_value(resolved_node) | -344,718,408,022,932,300 | returns the value with the specified key, like obj.key and obj['key'] | omegaconf/basecontainer.py | _resolve_with_default | gwenzek/omegaconf | python | def _resolve_with_default(self, key: Union[(DictKeyType, int)], value: Node, default_value: Any=_DEFAULT_MARKER_) -> Any:
if _is_missing_value(value):
if (default_value is not _DEFAULT_MARKER_):
return default_value
raise MissingMandatoryValue('Missing mandatory value: $FULL_KEY')
resolved_node = self._maybe_resolve_interpolation(parent=self, key=key, value=value, throw_on_resolution_failure=True)
return _get_value(resolved_node) |
def is_empty(self) -> bool:
'return true if config is empty'
return (len(self.__dict__['_content']) == 0) | 8,758,070,016,679,801,000 | return true if config is empty | omegaconf/basecontainer.py | is_empty | gwenzek/omegaconf | python | def is_empty(self) -> bool:
return (len(self.__dict__['_content']) == 0) |
@staticmethod
def _map_merge(dest: 'BaseContainer', src: 'BaseContainer') -> None:
'merge src into dest and return a new copy, does not modified input'
from omegaconf import AnyNode, DictConfig, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
src_ref_type = get_ref_type(src)
assert (src_ref_type is not None)
if (src._is_none() or src._is_interpolation()):
dest._set_value(src._value())
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
return
dest._validate_merge(value=src)
def expand(node: Container) -> None:
rt = node._metadata.ref_type
val: Any
if (rt is not Any):
if is_dict_annotation(rt):
val = {}
elif (is_list_annotation(rt) or is_tuple_annotation(rt)):
val = []
else:
val = rt
elif isinstance(node, DictConfig):
val = {}
else:
assert False
node._set_value(val)
if (src._is_missing() and (not dest._is_missing()) and is_structured_config(src_ref_type)):
src = _create_structured_with_missing_fields(ref_type=src_ref_type, object_type=src_type)
if ((dest._is_interpolation() or dest._is_missing()) and (not src._is_missing())):
expand(dest)
src_items = (src.items_ex(resolve=False) if (not src._is_missing()) else [])
for (key, src_value) in src_items:
src_node = src._get_node(key, validate_access=False)
dest_node = dest._get_node(key, validate_access=False)
assert ((src_node is None) or isinstance(src_node, Node))
assert ((dest_node is None) or isinstance(dest_node, Node))
if isinstance(dest_node, DictConfig):
dest_node._validate_merge(value=src_node)
missing_src_value = _is_missing_value(src_value)
if (isinstance(dest_node, Container) and dest_node._is_none() and (not missing_src_value) and (not _is_none(src_value, resolve=True))):
expand(dest_node)
if ((dest_node is not None) and dest_node._is_interpolation()):
target_node = dest_node._maybe_dereference_node()
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
(is_optional, et) = _resolve_optional(dest._metadata.element_type)
if ((dest_node is None) and is_structured_config(et) and (not missing_src_value)):
dest[key] = DictConfig(et, parent=dest, ref_type=et, is_optional=is_optional)
dest_node = dest._get_node(key)
if (dest_node is not None):
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest_node._merge_with(src_value)
elif (not missing_src_value):
dest.__setitem__(key, src_value)
elif isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
assert isinstance(src_node, ValueNode)
src_node_missing = _is_missing_literal(src_value)
try:
if isinstance(dest_node, AnyNode):
if src_node_missing:
node = copy.copy(src_node)
node._set_value(dest_node._value())
else:
node = src_node
dest.__setitem__(key, node)
elif (not src_node_missing):
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
flags = src._metadata.flags
assert (flags is not None)
for (flag, value) in flags.items():
if (value is not None):
dest._set_flag(flag, value) | -7,220,556,726,557,930,000 | merge src into dest and return a new copy, does not modified input | omegaconf/basecontainer.py | _map_merge | gwenzek/omegaconf | python | @staticmethod
def _map_merge(dest: 'BaseContainer', src: 'BaseContainer') -> None:
from omegaconf import AnyNode, DictConfig, ValueNode
assert isinstance(dest, DictConfig)
assert isinstance(src, DictConfig)
src_type = src._metadata.object_type
src_ref_type = get_ref_type(src)
assert (src_ref_type is not None)
if (src._is_none() or src._is_interpolation()):
dest._set_value(src._value())
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
return
dest._validate_merge(value=src)
def expand(node: Container) -> None:
rt = node._metadata.ref_type
val: Any
if (rt is not Any):
if is_dict_annotation(rt):
val = {}
elif (is_list_annotation(rt) or is_tuple_annotation(rt)):
val = []
else:
val = rt
elif isinstance(node, DictConfig):
val = {}
else:
assert False
node._set_value(val)
if (src._is_missing() and (not dest._is_missing()) and is_structured_config(src_ref_type)):
src = _create_structured_with_missing_fields(ref_type=src_ref_type, object_type=src_type)
if ((dest._is_interpolation() or dest._is_missing()) and (not src._is_missing())):
expand(dest)
src_items = (src.items_ex(resolve=False) if (not src._is_missing()) else [])
for (key, src_value) in src_items:
src_node = src._get_node(key, validate_access=False)
dest_node = dest._get_node(key, validate_access=False)
assert ((src_node is None) or isinstance(src_node, Node))
assert ((dest_node is None) or isinstance(dest_node, Node))
if isinstance(dest_node, DictConfig):
dest_node._validate_merge(value=src_node)
missing_src_value = _is_missing_value(src_value)
if (isinstance(dest_node, Container) and dest_node._is_none() and (not missing_src_value) and (not _is_none(src_value, resolve=True))):
expand(dest_node)
if ((dest_node is not None) and dest_node._is_interpolation()):
target_node = dest_node._maybe_dereference_node()
if isinstance(target_node, Container):
dest[key] = target_node
dest_node = dest._get_node(key)
(is_optional, et) = _resolve_optional(dest._metadata.element_type)
if ((dest_node is None) and is_structured_config(et) and (not missing_src_value)):
dest[key] = DictConfig(et, parent=dest, ref_type=et, is_optional=is_optional)
dest_node = dest._get_node(key)
if (dest_node is not None):
if isinstance(dest_node, BaseContainer):
if isinstance(src_value, BaseContainer):
dest_node._merge_with(src_value)
elif (not missing_src_value):
dest.__setitem__(key, src_value)
elif isinstance(src_value, BaseContainer):
dest.__setitem__(key, src_value)
else:
assert isinstance(dest_node, ValueNode)
assert isinstance(src_node, ValueNode)
src_node_missing = _is_missing_literal(src_value)
try:
if isinstance(dest_node, AnyNode):
if src_node_missing:
node = copy.copy(src_node)
node._set_value(dest_node._value())
else:
node = src_node
dest.__setitem__(key, node)
elif (not src_node_missing):
dest_node._set_value(src_value)
except (ValidationError, ReadonlyConfigError) as e:
dest._format_and_raise(key=key, value=src_value, cause=e)
else:
from omegaconf import open_dict
if is_structured_config(src_type):
with open_dict(dest):
dest[key] = src._get_node(key)
else:
dest[key] = src._get_node(key)
_update_types(node=dest, ref_type=src_ref_type, object_type=src_type)
flags = src._metadata.flags
assert (flags is not None)
for (flag, value) in flags.items():
if (value is not None):
dest._set_flag(flag, value) |
def _set_item_impl(self, key: Any, value: Any) -> None:
"\n Changes the value of the node key with the desired value. If the node key doesn't\n exist it creates a new one.\n "
from .nodes import AnyNode, ValueNode
if isinstance(value, Node):
do_deepcopy = (not self._get_flag('no_deepcopy_set_nodes'))
if ((not do_deepcopy) and isinstance(value, Container)):
if (self._get_root() is value._get_root()):
do_deepcopy = True
if do_deepcopy:
value = copy.deepcopy(value)
value._set_parent(None)
try:
old = value._key()
value._set_key(key)
self._validate_set(key, value)
finally:
value._set_key(old)
else:
self._validate_set(key, value)
if self._get_flag('readonly'):
raise ReadonlyConfigError('Cannot change read-only config container')
input_is_node = isinstance(value, Node)
target_node_ref = self._get_node(key)
input_is_typed_vnode = (isinstance(value, ValueNode) and (not isinstance(value, AnyNode)))
target_is_vnode = isinstance(target_node_ref, ValueNode)
def get_target_type_hint(val: Any) -> Any:
if (not is_structured_config(val)):
type_hint = self._metadata.element_type
else:
target = self._get_node(key)
if (target is None):
type_hint = self._metadata.element_type
else:
assert isinstance(target, Node)
type_hint = target._metadata.type_hint
return type_hint
def assign(value_key: Any, val: Node) -> None:
assert (val._get_parent() is None)
v = val
v._set_parent(self)
v._set_key(value_key)
_deep_update_type_hint(node=v, type_hint=self._metadata.element_type)
self.__dict__['_content'][value_key] = v
if input_is_typed_vnode:
assign(key, value)
else:
special_value = _is_special(value)
type_hint = get_target_type_hint(value)
should_set_value = ((target_node_ref is not None) and (special_value or (isinstance(target_node_ref, Container) and target_node_ref._has_ref_type()) or (target_is_vnode and (not isinstance(target_node_ref, AnyNode))) or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))))
if should_set_value:
if (special_value and isinstance(value, Node)):
value = value._value()
self.__dict__['_content'][key]._set_value(value)
elif input_is_node:
(_, ref_type) = _resolve_optional(type_hint)
if (special_value and (is_container_annotation(ref_type) or is_structured_config(ref_type))):
self._wrap_value_and_set(key, value._value(), type_hint)
else:
assign(key, value)
else:
self._wrap_value_and_set(key, value, type_hint) | 5,195,672,649,291,748,000 | Changes the value of the node key with the desired value. If the node key doesn't
exist it creates a new one. | omegaconf/basecontainer.py | _set_item_impl | gwenzek/omegaconf | python | def _set_item_impl(self, key: Any, value: Any) -> None:
"\n Changes the value of the node key with the desired value. If the node key doesn't\n exist it creates a new one.\n "
from .nodes import AnyNode, ValueNode
if isinstance(value, Node):
do_deepcopy = (not self._get_flag('no_deepcopy_set_nodes'))
if ((not do_deepcopy) and isinstance(value, Container)):
if (self._get_root() is value._get_root()):
do_deepcopy = True
if do_deepcopy:
value = copy.deepcopy(value)
value._set_parent(None)
try:
old = value._key()
value._set_key(key)
self._validate_set(key, value)
finally:
value._set_key(old)
else:
self._validate_set(key, value)
if self._get_flag('readonly'):
raise ReadonlyConfigError('Cannot change read-only config container')
input_is_node = isinstance(value, Node)
target_node_ref = self._get_node(key)
input_is_typed_vnode = (isinstance(value, ValueNode) and (not isinstance(value, AnyNode)))
target_is_vnode = isinstance(target_node_ref, ValueNode)
def get_target_type_hint(val: Any) -> Any:
if (not is_structured_config(val)):
type_hint = self._metadata.element_type
else:
target = self._get_node(key)
if (target is None):
type_hint = self._metadata.element_type
else:
assert isinstance(target, Node)
type_hint = target._metadata.type_hint
return type_hint
def assign(value_key: Any, val: Node) -> None:
assert (val._get_parent() is None)
v = val
v._set_parent(self)
v._set_key(value_key)
_deep_update_type_hint(node=v, type_hint=self._metadata.element_type)
self.__dict__['_content'][value_key] = v
if input_is_typed_vnode:
assign(key, value)
else:
special_value = _is_special(value)
type_hint = get_target_type_hint(value)
should_set_value = ((target_node_ref is not None) and (special_value or (isinstance(target_node_ref, Container) and target_node_ref._has_ref_type()) or (target_is_vnode and (not isinstance(target_node_ref, AnyNode))) or (isinstance(target_node_ref, AnyNode) and is_primitive_type(value))))
if should_set_value:
if (special_value and isinstance(value, Node)):
value = value._value()
self.__dict__['_content'][key]._set_value(value)
elif input_is_node:
(_, ref_type) = _resolve_optional(type_hint)
if (special_value and (is_container_annotation(ref_type) or is_structured_config(ref_type))):
self._wrap_value_and_set(key, value._value(), type_hint)
else:
assign(key, value)
else:
self._wrap_value_and_set(key, value, type_hint) |
def profile_config_file(binary_path: PathLike, config_path: PathLike, output_config_path: PathLike, progress_bar: bool=True, profile_filename: str='profile_info.txt', qos_filename: str='final_accuracy') -> None:
'Profile an HPVM configuration file with an HPVM binary,\n and write the updated configuration file to a given location.\n The configuration file must have the baseline as the first configuration.\n\n :param binary_path: Path to binary to be executed in profiling.\n :param config_path: Path to config file (HPVM configuration format)\n with configs to enumerate for profiling.\n :param output_config_path: Path where the output configs are written.\n The output config file has the same configs as the input `config_path` file,\n but the performance and energy readings are updated.\n :param progress_bar: If `True`, show a progress bar for number of configs already profiled.\n :param profile_filename: Name of profile file generated by the binary (in current directory).\n This defaults to "profile_info.txt" and should not be changed for HPVM binaries.\n :param qos_filename: Name of QoS file generated by the binary (in current directory).\n It contains a single float number as the QoS of this run.\n This defaults to "final_accuracy" and should not be changed for HPVM binaries.\n '
(header, configs) = read_hpvm_configs(Path(config_path))
if (not configs):
raise ValueError('Config file with no configs is unsupported.')
profile_configs(binary_path, configs[1:], configs[0], progress_bar, profile_filename, qos_filename)
write_hpvm_configs(header, configs, Path(output_config_path)) | -4,053,751,064,055,455,000 | Profile an HPVM configuration file with an HPVM binary,
and write the updated configuration file to a given location.
The configuration file must have the baseline as the first configuration.
:param binary_path: Path to binary to be executed in profiling.
:param config_path: Path to config file (HPVM configuration format)
with configs to enumerate for profiling.
:param output_config_path: Path where the output configs are written.
The output config file has the same configs as the input `config_path` file,
but the performance and energy readings are updated.
:param progress_bar: If `True`, show a progress bar for number of configs already profiled.
:param profile_filename: Name of profile file generated by the binary (in current directory).
This defaults to "profile_info.txt" and should not be changed for HPVM binaries.
:param qos_filename: Name of QoS file generated by the binary (in current directory).
It contains a single float number as the QoS of this run.
This defaults to "final_accuracy" and should not be changed for HPVM binaries. | hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py | profile_config_file | vzyrianov/hpvm-autograd | python | def profile_config_file(binary_path: PathLike, config_path: PathLike, output_config_path: PathLike, progress_bar: bool=True, profile_filename: str='profile_info.txt', qos_filename: str='final_accuracy') -> None:
'Profile an HPVM configuration file with an HPVM binary,\n and write the updated configuration file to a given location.\n The configuration file must have the baseline as the first configuration.\n\n :param binary_path: Path to binary to be executed in profiling.\n :param config_path: Path to config file (HPVM configuration format)\n with configs to enumerate for profiling.\n :param output_config_path: Path where the output configs are written.\n The output config file has the same configs as the input `config_path` file,\n but the performance and energy readings are updated.\n :param progress_bar: If `True`, show a progress bar for number of configs already profiled.\n :param profile_filename: Name of profile file generated by the binary (in current directory).\n This defaults to "profile_info.txt" and should not be changed for HPVM binaries.\n :param qos_filename: Name of QoS file generated by the binary (in current directory).\n It contains a single float number as the QoS of this run.\n This defaults to "final_accuracy" and should not be changed for HPVM binaries.\n '
(header, configs) = read_hpvm_configs(Path(config_path))
if (not configs):
raise ValueError('Config file with no configs is unsupported.')
profile_configs(binary_path, configs[1:], configs[0], progress_bar, profile_filename, qos_filename)
write_hpvm_configs(header, configs, Path(output_config_path)) |
def profile_configs(binary_path: PathLike, configs: Iterable['Config'], baseline_config: 'Config', progress_bar: bool=True, profile_filename: str='profile_info.txt', qos_filename: str='final_accuracy') -> None:
'Profile a sequence of HPVM configs.\n This function modifies argument `configs` in place.'
from tqdm import tqdm
(baseline_time, baseline_acc) = measure_config(binary_path, baseline_config)
iterable = (tqdm(configs, desc='Configs profiled') if progress_bar else configs)
for config in iterable:
(time, acc) = measure_config(binary_path, config, profile_filename, qos_filename)
speedup = (baseline_time / time)
config.update_profile_results(speedup, acc, baseline_acc)
return configs | 7,706,032,283,908,009,000 | Profile a sequence of HPVM configs.
This function modifies argument `configs` in place. | hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py | profile_configs | vzyrianov/hpvm-autograd | python | def profile_configs(binary_path: PathLike, configs: Iterable['Config'], baseline_config: 'Config', progress_bar: bool=True, profile_filename: str='profile_info.txt', qos_filename: str='final_accuracy') -> None:
'Profile a sequence of HPVM configs.\n This function modifies argument `configs` in place.'
from tqdm import tqdm
(baseline_time, baseline_acc) = measure_config(binary_path, baseline_config)
iterable = (tqdm(configs, desc='Configs profiled') if progress_bar else configs)
for config in iterable:
(time, acc) = measure_config(binary_path, config, profile_filename, qos_filename)
speedup = (baseline_time / time)
config.update_profile_results(speedup, acc, baseline_acc)
return configs |
def plot_hpvm_configs(config_path: PathLike, save_to: PathLike=None, show_qos_loss: bool=True, **fig_kwargs) -> plt.Figure:
"\n Plot the QoS-speedup information in an HPVM configuration file.\n It is recommended to profile the config file first (using `profile_configs`)\n to obtain real speedup numbers.\n This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.\n\n :param config_path: Path to the config file (HPVM configuration format).\n :param save_to: File to save figure into. Default is None: don't save figure (just return it).\n :param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.\n If False, will use (absolute) QoS instead of QoS loss.\n :param fig_kwargs: Arguments to pass to `plt.subplots`.\n "
import numpy as np
(_, configs) = read_hpvm_configs(config_path)
get_qos = (lambda c: (c.qos_loss if show_qos_loss else c.qos))
qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs])
(qoses, speedups) = qos_speedup.T
(fig, ax) = plt.subplots(**fig_kwargs)
ax.scatter(qoses, speedups)
ax.set_xlabel('QoS Loss')
ax.set_ylabel('Speedup (X)')
if save_to:
fig.savefig(save_to, dpi=300)
return fig | -218,414,508,651,769,120 | Plot the QoS-speedup information in an HPVM configuration file.
It is recommended to profile the config file first (using `profile_configs`)
to obtain real speedup numbers.
This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.
:param config_path: Path to the config file (HPVM configuration format).
:param save_to: File to save figure into. Default is None: don't save figure (just return it).
:param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.
If False, will use (absolute) QoS instead of QoS loss.
:param fig_kwargs: Arguments to pass to `plt.subplots`. | hpvm/projects/hpvm-profiler/hpvm_profiler/__init__.py | plot_hpvm_configs | vzyrianov/hpvm-autograd | python | def plot_hpvm_configs(config_path: PathLike, save_to: PathLike=None, show_qos_loss: bool=True, **fig_kwargs) -> plt.Figure:
"\n Plot the QoS-speedup information in an HPVM configuration file.\n It is recommended to profile the config file first (using `profile_configs`)\n to obtain real speedup numbers.\n This function creates a `matplotlib.pyplot.Figure`, plots on it, and returns it.\n\n :param config_path: Path to the config file (HPVM configuration format).\n :param save_to: File to save figure into. Default is None: don't save figure (just return it).\n :param show_qos_loss: Show the loss of QoS on x axis of the figure. Defaults to True.\n If False, will use (absolute) QoS instead of QoS loss.\n :param fig_kwargs: Arguments to pass to `plt.subplots`.\n "
import numpy as np
(_, configs) = read_hpvm_configs(config_path)
get_qos = (lambda c: (c.qos_loss if show_qos_loss else c.qos))
qos_speedup = np.array([(get_qos(c), c.speedup) for c in configs])
(qoses, speedups) = qos_speedup.T
(fig, ax) = plt.subplots(**fig_kwargs)
ax.scatter(qoses, speedups)
ax.set_xlabel('QoS Loss')
ax.set_ylabel('Speedup (X)')
if save_to:
fig.savefig(save_to, dpi=300)
return fig |
def make_test_file(path: str, file_size=(- 1), file_name='') -> RetVal:
'Generate a test file containing nothing but zeroes. If the file size is negative, a random \n\tsize between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be \n\tgenerated.\n\n\tReturns:\n\t\tname: (str) name of the test file generated\n\t\tsize: (int) size of the test file generated\n\t'
if (file_size < 0):
file_size = (random.randint(1, 10) * 1024)
if ((file_name == '') or (not file_name)):
file_name = f'{int(time.time())}.{file_size}.{str(uuid.uuid4())}'
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write(('0' * file_size))
fhandle.close()
return RetVal().set_values({'name': file_name, 'size': file_size}) | -5,435,547,621,792,436,000 | Generate a test file containing nothing but zeroes. If the file size is negative, a random
size between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be
generated.
Returns:
name: (str) name of the test file generated
size: (int) size of the test file generated | tests/integration/test_fscmds.py | make_test_file | mensago/mensagod | python | def make_test_file(path: str, file_size=(- 1), file_name=) -> RetVal:
'Generate a test file containing nothing but zeroes. If the file size is negative, a random \n\tsize between 1 and 10 Kb will be chosen. If the file name is empty, a random one will be \n\tgenerated.\n\n\tReturns:\n\t\tname: (str) name of the test file generated\n\t\tsize: (int) size of the test file generated\n\t'
if (file_size < 0):
file_size = (random.randint(1, 10) * 1024)
if ((file_name == ) or (not file_name)):
file_name = f'{int(time.time())}.{file_size}.{str(uuid.uuid4())}'
try:
fhandle = open(os.path.join(path, file_name), 'w')
except Exception as e:
return RetVal().wrap_exception(e)
fhandle.write(('0' * file_size))
fhandle.close()
return RetVal().set_values({'name': file_name, 'size': file_size}) |
def setup_testdir(name) -> str:
'Creates a test folder for holding files'
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'testfiles')
if (not os.path.exists(topdir)):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print('Waiting a second for test folder to unlock')
time.sleep(1.0)
os.mkdir(testdir)
return testdir | 1,744,735,025,659,953,700 | Creates a test folder for holding files | tests/integration/test_fscmds.py | setup_testdir | mensago/mensagod | python | def setup_testdir(name) -> str:
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'testfiles')
if (not os.path.exists(topdir)):
os.mkdir(topdir)
testdir = os.path.join(topdir, name)
while os.path.exists(testdir):
try:
shutil.rmtree(testdir)
except:
print('Waiting a second for test folder to unlock')
time.sleep(1.0)
os.mkdir(testdir)
return testdir |
def test_copy():
'Tests the COPY command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': (('/ wsp ' + dbdata['admin_wid']) + ' 1.1.01234567-89ab-cdef-0123-456789abcdef'), 'DestDir': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_copy: #1 failed to handle nonexistent source file'
status = make_test_file(admin_dir, file_size=1048577)
assert (not status.error()), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_copy: #2 failed to handle nonexistent destination dir'
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_copy: #3 failed to handle directory as source'
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_copy: #4 failed to handle file as destination'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 409), 'test_copy: #5 failed to handle quota limit'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_copy: #6 failed to succeed'
conn.disconnect() | 5,179,860,775,048,239,000 | Tests the COPY command | tests/integration/test_fscmds.py | test_copy | mensago/mensagod | python | def test_copy():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': (('/ wsp ' + dbdata['admin_wid']) + ' 1.1.01234567-89ab-cdef-0123-456789abcdef'), 'DestDir': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_copy: #1 failed to handle nonexistent source file'
status = make_test_file(admin_dir, file_size=1048577)
assert (not status.error()), 'test_copy: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_copy: #2 failed to handle nonexistent destination dir'
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_copy: #3 failed to handle directory as source'
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_copy: #4 failed to handle file as destination'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=1 WHERE wid='{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 409), 'test_copy: #5 failed to handle quota limit'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({'Action': 'COPY', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_copy: #6 failed to succeed'
conn.disconnect() |
def test_delete():
'Test the DELETE command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'DELETE', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), f'{funcname()}: failed to handle bad path'
conn.send_message({'Action': 'DELETE', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), f'{funcname()}: #2 failed to handle nonexistent file'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), f'{funcname()}: #3 failed to create test file'
filename = status['name']
conn.send_message({'Action': 'DELETE', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {filename}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), f'{funcname()}: #3 failed to delete file' | -3,826,681,085,504,762,400 | Test the DELETE command | tests/integration/test_fscmds.py | test_delete | mensago/mensagod | python | def test_delete():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'DELETE', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} some_dir_name"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), f'{funcname()}: failed to handle bad path'
conn.send_message({'Action': 'DELETE', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), f'{funcname()}: #2 failed to handle nonexistent file'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), f'{funcname()}: #3 failed to create test file'
filename = status['name']
conn.send_message({'Action': 'DELETE', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {filename}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), f'{funcname()}: #3 failed to delete file' |
def test_download():
'This tests the command DOWNLOAD'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
conn.send_message({'Action': 'DOWNLOAD', 'Data': {}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_download: #1 failed to handle missing parameter'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': ((('/ wsp ' + dbdata['admin_wid']) + ' 22222222-2222-2222-2222-222222222222') + ' 1000.1000.22222222-2222-2222-2222-222222222222')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_download: #2 failed to handle non-existent path'
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000)
assert (not status.error()), f'test_download: #3 failed to create test file: {status.info}'
testname = status['name']
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_download: #3 failed to proceed to file download'
assert (('Size' in response['Data']) and (response['Data']['Size'] == '1000')), 'test_download: #3 server failed to respond with file size'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Size': '1000'}})
rawdata = conn.read()
assert (len(rawdata) == 1000), 'test_download: #3 downloaded file had wrong length'
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000)
assert (not status.error()), f'test_download: #4 failed to create test file: {status.info}'
testname = status['name']
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '2500'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_download: #4 failed to handle offset > file size'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_download: #3 failed to proceed to file download'
assert (('Size' in response['Data']) and (response['Data']['Size'] == '1000')), 'test_download: #5 server failed to respond with file size'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500', 'Size': '1000'}})
rawdata = conn.read()
assert (len(rawdata) == 500), 'test_download: #5 resumed data had wrong length'
assert (blake2hash((('0' * 500) + rawdata).encode()) == 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'), 'test_download: #8 resumed file hash failure'
conn.disconnect() | 5,452,001,439,904,321,000 | This tests the command DOWNLOAD | tests/integration/test_fscmds.py | test_download | mensago/mensagod | python | def test_download():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
conn.send_message({'Action': 'DOWNLOAD', 'Data': {}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_download: #1 failed to handle missing parameter'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': ((('/ wsp ' + dbdata['admin_wid']) + ' 22222222-2222-2222-2222-222222222222') + ' 1000.1000.22222222-2222-2222-2222-222222222222')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_download: #2 failed to handle non-existent path'
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000)
assert (not status.error()), f'test_download: #3 failed to create test file: {status.info}'
testname = status['name']
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_download: #3 failed to proceed to file download'
assert (('Size' in response['Data']) and (response['Data']['Size'] == '1000')), 'test_download: #3 server failed to respond with file size'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Size': '1000'}})
rawdata = conn.read()
assert (len(rawdata) == 1000), 'test_download: #3 downloaded file had wrong length'
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000)
assert (not status.error()), f'test_download: #4 failed to create test file: {status.info}'
testname = status['name']
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '2500'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_download: #4 failed to handle offset > file size'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_download: #3 failed to proceed to file download'
assert (('Size' in response['Data']) and (response['Data']['Size'] == '1000')), 'test_download: #5 server failed to respond with file size'
conn.send_message({'Action': 'DOWNLOAD', 'Data': {'Path': f"/ wsp {dbdata['admin_wid']} {testname}", 'Offset': '500', 'Size': '1000'}})
rawdata = conn.read()
assert (len(rawdata) == 500), 'test_download: #5 resumed data had wrong length'
assert (blake2hash((('0' * 500) + rawdata).encode()) == 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'), 'test_download: #8 resumed file hash failure'
conn.disconnect() |
def test_getquotainfo():
'This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the \n\tdisk usage'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000)
assert (not status.error()), f'Failed to create test workspace file: {status.info}'
conn.send_message({'Action': 'GETQUOTAINFO', 'Data': {}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_getquotainfo: failed to get quota information'
assert (response['Data']['DiskUsage'] == '1000'), 'test_getquotainfo: disk usage was incorrect'
assert (response['Data']['QuotaSize'] == '0'), "test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect() | -2,799,457,588,750,778,400 | This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the
disk usage | tests/integration/test_fscmds.py | test_getquotainfo | mensago/mensagod | python | def test_getquotainfo():
'This tests the command GETQUOTAINFO, which gets both the quota for the workspace and the \n\tdisk usage'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
status = make_test_file(os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid']), file_size=1000)
assert (not status.error()), f'Failed to create test workspace file: {status.info}'
conn.send_message({'Action': 'GETQUOTAINFO', 'Data': {}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_getquotainfo: failed to get quota information'
assert (response['Data']['DiskUsage'] == '1000'), 'test_getquotainfo: disk usage was incorrect'
assert (response['Data']['QuotaSize'] == '0'), "test_getquotainfo: admin quota wasn't unlimited"
conn.disconnect() |
def test_list():
'Tests the LIST command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'LIST', 'Data': {'Path': '/ 11111111-1111-1111-1111-111111111111'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_list: #1 failed to handle missing path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), 'test_list: #2 failed to create test file'
conn.send_message({'Action': 'LIST', 'Data': {'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_list: #2 failed to handle path as file'
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({'Action': 'LIST', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_list: #3 failed to handle empty directory'
assert (('Files' in response['Data']) and (len(response['Data']['Files']) == 0)), 'test_list: #3 failed to have empty response for empty directory'
for i in range(1, 6):
tempname = '.'.join([str((1000 * i)), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname), 'w')
except Exception as e:
assert False, ('test_list: #4 failed to create test files: ' + e)
fhandle.write(('0' * 500))
fhandle.close()
conn.send_message({'Action': 'LIST', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_list: #4 failed to handle non-empty directory'
assert (('Files' in response['Data']) and (len(response['Data']['Files']) == 5)), 'test_list: #4 failed to list all files in directory'
conn.send_message({'Action': 'LIST', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111'), 'Time': '3000'}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_list: #5 failed to handle non-empty directory'
assert (('Files' in response['Data']) and (len(response['Data']['Files']) == 3)), 'test_list: #5 failed to filter files'
conn.disconnect() | -1,777,570,973,949,736,200 | Tests the LIST command | tests/integration/test_fscmds.py | test_list | mensago/mensagod | python | def test_list():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'LIST', 'Data': {'Path': '/ 11111111-1111-1111-1111-111111111111'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_list: #1 failed to handle missing path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), 'test_list: #2 failed to create test file'
conn.send_message({'Action': 'LIST', 'Data': {'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_list: #2 failed to handle path as file'
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({'Action': 'LIST', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_list: #3 failed to handle empty directory'
assert (('Files' in response['Data']) and (len(response['Data']['Files']) == 0)), 'test_list: #3 failed to have empty response for empty directory'
for i in range(1, 6):
tempname = '.'.join([str((1000 * i)), '500', str(uuid.uuid4())])
try:
fhandle = open(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname), 'w')
except Exception as e:
assert False, ('test_list: #4 failed to create test files: ' + e)
fhandle.write(('0' * 500))
fhandle.close()
conn.send_message({'Action': 'LIST', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_list: #4 failed to handle non-empty directory'
assert (('Files' in response['Data']) and (len(response['Data']['Files']) == 5)), 'test_list: #4 failed to list all files in directory'
conn.send_message({'Action': 'LIST', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111'), 'Time': '3000'}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_list: #5 failed to handle non-empty directory'
assert (('Files' in response['Data']) and (len(response['Data']['Files']) == 3)), 'test_list: #5 failed to filter files'
conn.disconnect() |
def test_listdirs():
'Tests the LISTDIRS command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'LISTDIRS', 'Data': {'Path': '/ 11111111-1111-1111-1111-111111111111'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_listdirs: #1 failed to handle missing path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), 'test_listdirs: #2 failed to create test file'
conn.send_message({'Action': 'LIST', 'Data': {'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_listdirs: #2 failed to handle path as file'
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({'Action': 'LISTDIRS', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_listdirs: #3 failed to handle empty directory'
assert (('Directories' in response['Data']) and (len(response['Data']['Directories']) == 0)), 'test_listdirs: #3 failed to have empty response for empty directory'
for i in range(2, 7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, ('test_listdirs: #4 failed to create test directories: ' + e)
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({'Action': 'LISTDIRS', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_listdirs: #4 failed to handle non-empty directory'
assert (('Directories' in response['Data']) and (len(response['Data']['Directories']) == 5)), 'test_list: #4 failed to list all subdirectories'
conn.disconnect() | 2,690,979,932,043,363,000 | Tests the LISTDIRS command | tests/integration/test_fscmds.py | test_listdirs | mensago/mensagod | python | def test_listdirs():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'LISTDIRS', 'Data': {'Path': '/ 11111111-1111-1111-1111-111111111111'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_listdirs: #1 failed to handle missing path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), 'test_listdirs: #2 failed to create test file'
conn.send_message({'Action': 'LIST', 'Data': {'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_listdirs: #2 failed to handle path as file'
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({'Action': 'LISTDIRS', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_listdirs: #3 failed to handle empty directory'
assert (('Directories' in response['Data']) and (len(response['Data']['Directories']) == 0)), 'test_listdirs: #3 failed to have empty response for empty directory'
for i in range(2, 7):
tempname = '-'.join([(str(i) * 8), (str(i) * 4), (str(i) * 4), (str(i) * 4), (str(i) * 12)])
try:
os.mkdir(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111', tempname))
except Exception as e:
assert False, ('test_listdirs: #4 failed to create test directories: ' + e)
make_test_file(os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111'))
conn.send_message({'Action': 'LISTDIRS', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_listdirs: #4 failed to handle non-empty directory'
assert (('Directories' in response['Data']) and (len(response['Data']['Directories']) == 5)), 'test_list: #4 failed to list all subdirectories'
conn.disconnect() |
def test_mkdir():
'Tests the MKDIR command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' some_dir_name')}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_mkdir: #1 failed to handle bad path'
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_mkdir: #2 failed to create legitimate directory'
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 408), 'test_mkdir: #3 failed to handle existing directory'
multipath = ' '.join(['/', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'])
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': multipath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect() | 6,503,906,916,795,076,000 | Tests the MKDIR command | tests/integration/test_fscmds.py | test_mkdir | mensago/mensagod | python | def test_mkdir():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' some_dir_name')}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_mkdir: #1 failed to handle bad path'
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_mkdir: #2 failed to create legitimate directory'
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 408), 'test_mkdir: #3 failed to handle existing directory'
multipath = ' '.join(['/', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'])
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': multipath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_mkdir: #2 failed to create legitimate directory'
conn.disconnect() |
def test_move():
'Tests the MOVE command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': (('/ ' + dbdata['admin_wid']) + ' 1.1.01234567-89ab-cdef-0123-456789abcdef'), 'DestDir': (('/ ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_move: #1 failed to handle nonexistent source file'
status = make_test_file(admin_dir)
assert (not status.error()), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_move: #2 failed to handle nonexistent destination dir'
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_move: #3 failed to handle directory as source'
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_copy: #6 failed to succeed'
conn.disconnect() | -5,694,254,815,847,446,000 | Tests the MOVE command | tests/integration/test_fscmds.py | test_move | mensago/mensagod | python | def test_move():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
inner_dir = os.path.join(admin_dir, '11111111-1111-1111-1111-111111111111')
os.mkdir(inner_dir)
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': (('/ ' + dbdata['admin_wid']) + ' 1.1.01234567-89ab-cdef-0123-456789abcdef'), 'DestDir': (('/ ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_move: #1 failed to handle nonexistent source file'
status = make_test_file(admin_dir)
assert (not status.error()), 'test_move: #2 failed to create a test file'
testfile1 = status['name']
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 22222222-2222-2222-2222-222222222222"}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_move: #2 failed to handle nonexistent destination dir'
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_move: #3 failed to handle directory as source'
status = make_test_file(inner_dir, 102400, testfile1)
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111 {testfile1}"}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_copy: #4 failed to handle file as destination'
os.remove(os.path.join(inner_dir, status['name']))
conn.send_message({'Action': 'MOVE', 'Data': {'SourceFile': f"/ wsp {dbdata['admin_wid']} {testfile1}", 'DestDir': f"/ wsp {dbdata['admin_wid']} 11111111-1111-1111-1111-111111111111"}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_copy: #6 failed to succeed'
conn.disconnect() |
def test_replace():
'Test the REPLACE command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'Size': '1234', 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), f'{funcname()}: #1 failed to handle bad old file path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'Size': '1234', 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), f'{funcname()}: #2 failed to handle bad new file path'
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'NewPath': '/ wsp 11111111-1111-1111-1111-111111111111', 'Size': '4321', 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), f'{funcname()}: #4 failed to handle nonexistent destination dir'
status = make_test_file(admin_dir)
assert (not status.error()), f'{funcname()}: #3 failed to create test file'
filename = status['name']
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']}", 'Size': '1000', 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), f'{funcname()}: #6 failed to proceed to file upload'
conn.write(('0' * 1000))
response = conn.read_response(server_response)
assert (response['Code'] == 200), f'{funcname()}: #6 failed to replace file'
conn.disconnect() | 7,099,241,659,638,598,000 | Test the REPLACE command | tests/integration/test_fscmds.py | test_replace | mensago/mensagod | python | def test_replace():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'NewPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'Size': '1234', 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), f'{funcname()}: #1 failed to handle bad old file path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
filename = status['name']
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']} some_dir_name", 'Size': '1234', 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), f'{funcname()}: #2 failed to handle bad new file path'
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} 1234.1234.11111111-1111-1111-1111-111111111111", 'NewPath': '/ wsp 11111111-1111-1111-1111-111111111111', 'Size': '4321', 'Hash': 'BLAKE2B-256:tSl@QzD1w-vNq@CC-5`($KuxO0#aOl^-cy(l7XXT'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), f'{funcname()}: #4 failed to handle nonexistent destination dir'
status = make_test_file(admin_dir)
assert (not status.error()), f'{funcname()}: #3 failed to create test file'
filename = status['name']
conn.send_message({'Action': 'REPLACE', 'Data': {'OldPath': f"/ wsp {dbdata['admin_wid']} {filename}", 'NewPath': f"/ wsp {dbdata['admin_wid']}", 'Size': '1000', 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), f'{funcname()}: #6 failed to proceed to file upload'
conn.write(('0' * 1000))
response = conn.read_response(server_response)
assert (response['Code'] == 200), f'{funcname()}: #6 failed to replace file'
conn.disconnect() |
def test_rmdir():
'Tests the RMDIR command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' some_dir_name'), 'Recursive': 'False'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_rmdir: #1 failed to handle bad path'
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111'), 'Recursive': 'False'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_rmdir: #2 failed to handle nonexistent directory'
multipath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'])
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': multipath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 22222222-2222-2222-2222-222222222222'), 'Recursive': 'False'}})
response = conn.read_response(server_response)
assert (response['Code'] == 408), 'test_rmdir: #3 failed to handle non-empty directory'
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': multipath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_rmdir: #4 failed to remove an empty directory' | -1,072,886,822,630,401,800 | Tests the RMDIR command | tests/integration/test_fscmds.py | test_rmdir | mensago/mensagod | python | def test_rmdir():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' some_dir_name'), 'Recursive': 'False'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_rmdir: #1 failed to handle bad path'
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 11111111-1111-1111-1111-111111111111'), 'Recursive': 'False'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_rmdir: #2 failed to handle nonexistent directory'
multipath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'])
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': multipath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_rmdir: #3 failed to create test hierarchy'
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 22222222-2222-2222-2222-222222222222'), 'Recursive': 'False'}})
response = conn.read_response(server_response)
assert (response['Code'] == 408), 'test_rmdir: #3 failed to handle non-empty directory'
conn.send_message({'Action': 'RMDIR', 'Data': {'Path': multipath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_rmdir: #4 failed to remove an empty directory' |
def test_select():
'Tests the SELECT command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'SELECT', 'Data': {'Path': '/ 11111111-1111-1111-1111-111111111111'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_select: #1 failed to handle missing path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), 'test_select: #2 failed to create test file'
conn.send_message({'Action': 'SELECT', 'Data': {'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_select: #2 failed to handle path as file'
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': innerpath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_select: #3 failed to create test directory'
conn.send_message({'Action': 'SELECT', 'Data': {'Path': innerpath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_select: #3 failed to work correctly'
conn.disconnect() | 4,892,010,180,817,249,000 | Tests the SELECT command | tests/integration/test_fscmds.py | test_select | mensago/mensagod | python | def test_select():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
conn.send_message({'Action': 'SELECT', 'Data': {'Path': '/ 11111111-1111-1111-1111-111111111111'}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_select: #1 failed to handle missing path'
admin_dir = os.path.join(dbdata['configfile']['global']['workspace_dir'], dbdata['admin_wid'])
status = make_test_file(admin_dir)
assert (not status.error()), 'test_select: #2 failed to create test file'
conn.send_message({'Action': 'SELECT', 'Data': {'Path': ' '.join(['/ wsp', dbdata['admin_wid'], status['name']])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_select: #2 failed to handle path as file'
innerpath = ' '.join(['/ wsp', dbdata['admin_wid'], '22222222-2222-2222-2222-222222222222'])
conn.send_message({'Action': 'MKDIR', 'Data': {'Path': innerpath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_select: #3 failed to create test directory'
conn.send_message({'Action': 'SELECT', 'Data': {'Path': innerpath}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_select: #3 failed to work correctly'
conn.disconnect() |
def test_setquota():
'Tests the SETQUOTA command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': '0', 'Workspaces': '33333333-3333-3333-3333-333333333333'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_setquota: failed to handle bad size value'
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': "Real programmers don't eat quiche ;)", 'Workspaces': '33333333-3333-3333-3333-333333333333'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_setquota: failed to handle bad size data type'
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': '4096', 'Workspaces': '33333333-3333-3333-3333-333333333333,'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_setquota: failed to handle bad workspace list'
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': '4096', 'Workspaces': '33333333-3333-3333-3333-333333333333, 44444444-4444-4444-4444-444444444444'}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_setquota: failed to handle actual success'
conn.disconnect() | 6,664,158,948,814,282,000 | Tests the SETQUOTA command | tests/integration/test_fscmds.py | test_setquota | mensago/mensagod | python | def test_setquota():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
init_user2(dbdata, conn)
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': '0', 'Workspaces': '33333333-3333-3333-3333-333333333333'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_setquota: failed to handle bad size value'
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': "Real programmers don't eat quiche ;)", 'Workspaces': '33333333-3333-3333-3333-333333333333'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_setquota: failed to handle bad size data type'
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': '4096', 'Workspaces': '33333333-3333-3333-3333-333333333333,'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_setquota: failed to handle bad workspace list'
conn.send_message({'Action': 'SETQUOTA', 'Data': {'Size': '4096', 'Workspaces': '33333333-3333-3333-3333-333333333333, 44444444-4444-4444-4444-444444444444'}})
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_setquota: failed to handle actual success'
conn.disconnect() |
def test_upload():
'Tests the UPLOAD command'
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': '1000', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_upload: #1 failed to handle missing parameter'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': '1000', 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 22222222-2222-2222-2222-222222222222')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_upload: #2 failed to handle non-existent path'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str((1073741824 * 200)), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 414), 'test_upload: #3 failed to handle file too big'
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str((1048576 * 30)), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 409), 'test_upload: #4 quota check failed'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #5 failed to proceed to file upload'
conn.write(('0' * 1000))
response = conn.read_response(server_response)
assert (response['Code'] == 410), 'test_upload: #5 failed to handle file hash mismatch'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #6 failed to proceed to file upload'
conn.write(('0' * 1000))
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_upload: #6 failed to handle file hash mismatch'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert (response['Code'] == 100), 'test_upload: #6 failed to proceed to file upload'
assert (tempFileName != ''), 'test_upload: #6 server failed to return temp file name'
conn.write(('0' * 500))
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
login_admin(dbdata, conn)
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid']), 'TempName': tempFileName, 'Offset': '2000'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_upload: #7 failed to handle offset > file size'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid']), 'TempName': tempFileName, 'Offset': '500'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #8 failed to proceed to file upload'
conn.write(('0' * 500))
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_upload: #8 failed to resume with exact offset match'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert (response['Code'] == 100), 'test_upload: #6 failed to proceed to file upload'
assert (tempFileName != ''), 'test_upload: #6 server failed to return temp file name'
conn.write(('0' * 500))
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
login_admin(dbdata, conn)
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid']), 'TempName': tempFileName, 'Offset': '400'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #9 failed to proceed to file upload'
conn.write(('0' * 600))
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect() | -4,006,653,102,645,890,000 | Tests the UPLOAD command | tests/integration/test_fscmds.py | test_upload | mensago/mensagod | python | def test_upload():
dbconn = setup_test()
dbdata = init_server(dbconn)
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
reset_top_dir(dbdata)
pwhash = '$argon2id$v=19$m=65536,t=2,p=1$ew5lqHA5z38za+257DmnTA$0LWVrI2r7XCqdcCYkJLok65qussSyhN5TTZP+OTgzEI'
devid = '22222222-2222-2222-2222-222222222222'
devpair = EncryptionPair(CryptoString('CURVE25519:@X~msiMmBq0nsNnn0%~x{M|NU_{?<Wj)cYybdh&Z'), CryptoString('CURVE25519:W30{oJ?w~NBbj{F8Ag4~<bcWy6_uQ{i{X?NDq4^l'))
dbdata['pwhash'] = pwhash
dbdata['devid'] = devid
dbdata['devpair'] = devpair
regcode_admin(dbdata, conn)
login_admin(dbdata, conn)
init_user(dbdata, conn)
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': '1000', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_upload: #1 failed to handle missing parameter'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': '1000', 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': (('/ wsp ' + dbdata['admin_wid']) + ' 22222222-2222-2222-2222-222222222222')}})
response = conn.read_response(server_response)
assert (response['Code'] == 404), 'test_upload: #2 failed to handle non-existent path'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str((1073741824 * 200)), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 414), 'test_upload: #3 failed to handle file too big'
cur = dbconn.cursor()
cur.execute(f"INSERT INTO quotas(wid, usage, quota) VALUES('{dbdata['admin_wid']}', 5100 , 5120)")
dbconn.commit()
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str((1048576 * 30)), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 409), 'test_upload: #4 quota check failed'
cur = dbconn.cursor()
cur.execute(f"UPDATE quotas SET quota=0 WHERE wid = '{dbdata['admin_wid']}'")
dbconn.commit()
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:5(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #5 failed to proceed to file upload'
conn.write(('0' * 1000))
response = conn.read_response(server_response)
assert (response['Code'] == 410), 'test_upload: #5 failed to handle file hash mismatch'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #6 failed to proceed to file upload'
conn.write(('0' * 1000))
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_upload: #6 failed to handle file hash mismatch'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert (response['Code'] == 100), 'test_upload: #6 failed to proceed to file upload'
assert (tempFileName != ), 'test_upload: #6 server failed to return temp file name'
conn.write(('0' * 500))
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
login_admin(dbdata, conn)
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid']), 'TempName': tempFileName, 'Offset': '2000'}})
response = conn.read_response(server_response)
assert (response['Code'] == 400), 'test_upload: #7 failed to handle offset > file size'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid']), 'TempName': tempFileName, 'Offset': '500'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #8 failed to proceed to file upload'
conn.write(('0' * 500))
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_upload: #8 failed to resume with exact offset match'
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid'])}})
response = conn.read_response(server_response)
tempFileName = response['Data']['TempName']
assert (response['Code'] == 100), 'test_upload: #6 failed to proceed to file upload'
assert (tempFileName != ), 'test_upload: #6 server failed to return temp file name'
conn.write(('0' * 500))
del conn
conn = ServerConnection()
assert conn.connect('localhost', 2001), 'Connection to server at localhost:2001 failed'
login_admin(dbdata, conn)
conn.send_message({'Action': 'UPLOAD', 'Data': {'Size': str(1000), 'Hash': 'BLAKE2B-256:4(8V*JuSdLH#SL%edxldiA<&TayrTtdIV9yiK~Tp', 'Path': ('/ wsp ' + dbdata['admin_wid']), 'TempName': tempFileName, 'Offset': '400'}})
response = conn.read_response(server_response)
assert (response['Code'] == 100), 'test_upload: #9 failed to proceed to file upload'
conn.write(('0' * 600))
response = conn.read_response(server_response)
assert (response['Code'] == 200), 'test_upload: #9 failed to resume with overlapping offset'
conn.disconnect() |
def __init__(self, messageHandler, **kwargs):
'\n A constructor that will appropriately intialize a supervised learning object\n @ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages\n @ In, kwargs, dict, an arbitrary list of kwargs\n @ Out, None\n '
self.printTag = 'pickledROM'
self.messageHandler = messageHandler
self._dynamicHandling = False
self.initOptionDict = {}
self.features = ['PlaceHolder']
self.target = 'PlaceHolder' | 1,728,743,759,327,529,700 | A constructor that will appropriately intialize a supervised learning object
@ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages
@ In, kwargs, dict, an arbitrary list of kwargs
@ Out, None | framework/SupervisedLearning/pickledROM.py | __init__ | alptezbasaran/raven | python | def __init__(self, messageHandler, **kwargs):
'\n A constructor that will appropriately intialize a supervised learning object\n @ In, messageHandler, MessageHandler object, it is in charge of raising errors, and printing messages\n @ In, kwargs, dict, an arbitrary list of kwargs\n @ Out, None\n '
self.printTag = 'pickledROM'
self.messageHandler = messageHandler
self._dynamicHandling = False
self.initOptionDict = {}
self.features = ['PlaceHolder']
self.target = 'PlaceHolder' |
def __confidenceLocal__(self, featureVals):
'\n This should return an estimation of the quality of the prediction.\n @ In, featureVals, 2-D numpy array, [n_samples,n_features]\n @ Out, confidence, float, the confidence\n '
pass | -5,770,154,723,567,822,000 | This should return an estimation of the quality of the prediction.
@ In, featureVals, 2-D numpy array, [n_samples,n_features]
@ Out, confidence, float, the confidence | framework/SupervisedLearning/pickledROM.py | __confidenceLocal__ | alptezbasaran/raven | python | def __confidenceLocal__(self, featureVals):
'\n This should return an estimation of the quality of the prediction.\n @ In, featureVals, 2-D numpy array, [n_samples,n_features]\n @ Out, confidence, float, the confidence\n '
pass |
def __resetLocal__(self):
'\n Reset ROM. After this method the ROM should be described only by the initial parameter settings\n @ In, None\n @ Out, None\n '
pass | 3,285,244,487,013,803,000 | Reset ROM. After this method the ROM should be described only by the initial parameter settings
@ In, None
@ Out, None | framework/SupervisedLearning/pickledROM.py | __resetLocal__ | alptezbasaran/raven | python | def __resetLocal__(self):
'\n Reset ROM. After this method the ROM should be described only by the initial parameter settings\n @ In, None\n @ Out, None\n '
pass |
def __returnCurrentSettingLocal__(self):
'\n Returns a dictionary with the parameters and their current values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and current values\n '
pass | -7,267,784,236,939,705,000 | Returns a dictionary with the parameters and their current values
@ In, None
@ Out, params, dict, dictionary of parameter names and current values | framework/SupervisedLearning/pickledROM.py | __returnCurrentSettingLocal__ | alptezbasaran/raven | python | def __returnCurrentSettingLocal__(self):
'\n Returns a dictionary with the parameters and their current values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and current values\n '
pass |
def __returnInitialParametersLocal__(self):
'\n Returns a dictionary with the parameters and their initial values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and initial values\n '
params = {}
return params | -1,226,478,902,455,152,400 | Returns a dictionary with the parameters and their initial values
@ In, None
@ Out, params, dict, dictionary of parameter names and initial values | framework/SupervisedLearning/pickledROM.py | __returnInitialParametersLocal__ | alptezbasaran/raven | python | def __returnInitialParametersLocal__(self):
'\n Returns a dictionary with the parameters and their initial values\n @ In, None\n @ Out, params, dict, dictionary of parameter names and initial values\n '
params = {}
return params |
def __evaluateLocal__(self, featureVals):
'\n Evaluates a point.\n @ In, featureVals, list, of values at which to evaluate the ROM\n @ Out, returnDict, dict, the evaluated point for each target\n '
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.') | 4,563,226,393,383,996,000 | Evaluates a point.
@ In, featureVals, list, of values at which to evaluate the ROM
@ Out, returnDict, dict, the evaluated point for each target | framework/SupervisedLearning/pickledROM.py | __evaluateLocal__ | alptezbasaran/raven | python | def __evaluateLocal__(self, featureVals):
'\n Evaluates a point.\n @ In, featureVals, list, of values at which to evaluate the ROM\n @ Out, returnDict, dict, the evaluated point for each target\n '
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.') |
def __trainLocal__(self, featureVals, targetVals):
'\n Trains ROM.\n @ In, featureVals, np.ndarray, feature values\n @ In, targetVals, np.ndarray, target values\n '
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.') | 9,086,658,020,351,408,000 | Trains ROM.
@ In, featureVals, np.ndarray, feature values
@ In, targetVals, np.ndarray, target values | framework/SupervisedLearning/pickledROM.py | __trainLocal__ | alptezbasaran/raven | python | def __trainLocal__(self, featureVals, targetVals):
'\n Trains ROM.\n @ In, featureVals, np.ndarray, feature values\n @ In, targetVals, np.ndarray, target values\n '
self.raiseAnError(RuntimeError, 'PickledROM has not been loaded from file yet! An IO step is required to perform this action.') |
def seed(seed=None):
"\n Seed the internal random number generator used in this ID package.\n\n The generator is a lagged Fibonacci method with 55-element internal state.\n\n Parameters\n ----------\n seed : int, sequence, 'default', optional\n If 'default', the random seed is reset to a default value.\n\n If `seed` is a sequence containing 55 floating-point numbers\n in range [0,1], these are used to set the internal state of\n the generator.\n\n If the value is an integer, the internal state is obtained\n from `numpy.random.RandomState` (MT19937) with the integer\n used as the initial seed.\n\n If `seed` is omitted (None), ``numpy.random.rand`` is used to\n initialize the generator.\n\n "
if (isinstance(seed, str) and (seed == 'default')):
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if (state.shape != (55,)):
raise ValueError('invalid input size')
elif ((state.min() < 0) or (state.max() > 1)):
raise ValueError('values not in range [0,1]')
_backend.id_srandi(state)
elif (seed is None):
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55)) | -4,237,625,835,096,189,000 | Seed the internal random number generator used in this ID package.
The generator is a lagged Fibonacci method with 55-element internal state.
Parameters
----------
seed : int, sequence, 'default', optional
If 'default', the random seed is reset to a default value.
If `seed` is a sequence containing 55 floating-point numbers
in range [0,1], these are used to set the internal state of
the generator.
If the value is an integer, the internal state is obtained
from `numpy.random.RandomState` (MT19937) with the integer
used as the initial seed.
If `seed` is omitted (None), ``numpy.random.rand`` is used to
initialize the generator. | scipy/linalg/interpolative.py | seed | AtsushiSakai/scipy | python | def seed(seed=None):
"\n Seed the internal random number generator used in this ID package.\n\n The generator is a lagged Fibonacci method with 55-element internal state.\n\n Parameters\n ----------\n seed : int, sequence, 'default', optional\n If 'default', the random seed is reset to a default value.\n\n If `seed` is a sequence containing 55 floating-point numbers\n in range [0,1], these are used to set the internal state of\n the generator.\n\n If the value is an integer, the internal state is obtained\n from `numpy.random.RandomState` (MT19937) with the integer\n used as the initial seed.\n\n If `seed` is omitted (None), ``numpy.random.rand`` is used to\n initialize the generator.\n\n "
if (isinstance(seed, str) and (seed == 'default')):
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if (state.shape != (55,)):
raise ValueError('invalid input size')
elif ((state.min() < 0) or (state.max() > 1)):
raise ValueError('values not in range [0,1]')
_backend.id_srandi(state)
elif (seed is None):
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55)) |
def rand(*shape):
'\n Generate standard uniform pseudorandom numbers via a very efficient lagged\n Fibonacci method.\n\n This routine is used for all random number generation in this package and\n can affect ID and SVD results.\n\n Parameters\n ----------\n *shape\n Shape of output array\n\n '
return _backend.id_srand(np.prod(shape)).reshape(shape) | -4,954,254,495,813,813,000 | Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
This routine is used for all random number generation in this package and
can affect ID and SVD results.
Parameters
----------
*shape
Shape of output array | scipy/linalg/interpolative.py | rand | AtsushiSakai/scipy | python | def rand(*shape):
'\n Generate standard uniform pseudorandom numbers via a very efficient lagged\n Fibonacci method.\n\n This routine is used for all random number generation in this package and\n can affect ID and SVD results.\n\n Parameters\n ----------\n *shape\n Shape of output array\n\n '
return _backend.id_srand(np.prod(shape)).reshape(shape) |
def interp_decomp(A, eps_or_k, rand=True):
'\n Compute ID of a matrix.\n\n An ID of a matrix `A` is a factorization defined by a rank `k`, a column\n index array `idx`, and interpolation coefficients `proj` such that::\n\n numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]\n\n The original matrix can then be reconstructed as::\n\n numpy.hstack([A[:,idx[:k]],\n numpy.dot(A[:,idx[:k]], proj)]\n )[:,numpy.argsort(idx)]\n\n or via the routine :func:`reconstruct_matrix_from_id`. This can\n equivalently be written as::\n\n numpy.dot(A[:,idx[:k]],\n numpy.hstack([numpy.eye(k), proj])\n )[:,np.argsort(idx)]\n\n in terms of the skeleton and interpolation matrices::\n\n B = A[:,idx[:k]]\n\n and::\n\n P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]\n\n respectively. See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n The ID can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then\n this function has the output signature::\n\n k, idx, proj = interp_decomp(A, eps_or_k)\n\n Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output\n signature is::\n\n idx, proj = interp_decomp(A, eps_or_k)\n\n .. This function automatically detects the form of the input parameters\n and passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,\n :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,\n :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,\n :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,\n :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,\n :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`\n Matrix to be factored\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n k : int\n Rank required to achieve specified relative precision if\n `eps_or_k < 1`.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n '
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if (eps_or_k < 1):
eps = eps_or_k
if rand:
if real:
(k, idx, proj) = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(k, idx, proj) = _backend.idzp_aid(eps, A)
elif real:
(k, idx, proj) = _backend.iddp_id(eps, A)
else:
(k, idx, proj) = _backend.idzp_id(eps, A)
return (k, (idx - 1), proj)
else:
k = int(eps_or_k)
if rand:
if real:
(idx, proj) = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(idx, proj) = _backend.idzr_aid(A, k)
elif real:
(idx, proj) = _backend.iddr_id(A, k)
else:
(idx, proj) = _backend.idzr_id(A, k)
return ((idx - 1), proj)
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matveca = A.rmatvec
if (eps_or_k < 1):
eps = eps_or_k
if real:
(k, idx, proj) = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(k, idx, proj) = _backend.idzp_rid(eps, m, n, matveca)
return (k, (idx - 1), proj)
else:
k = int(eps_or_k)
if real:
(idx, proj) = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(idx, proj) = _backend.idzr_rid(m, n, matveca, k)
return ((idx - 1), proj)
else:
raise _TYPE_ERROR | -2,603,866,810,322,402,000 | Compute ID of a matrix.
An ID of a matrix `A` is a factorization defined by a rank `k`, a column
index array `idx`, and interpolation coefficients `proj` such that::
numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
The original matrix can then be reconstructed as::
numpy.hstack([A[:,idx[:k]],
numpy.dot(A[:,idx[:k]], proj)]
)[:,numpy.argsort(idx)]
or via the routine :func:`reconstruct_matrix_from_id`. This can
equivalently be written as::
numpy.dot(A[:,idx[:k]],
numpy.hstack([numpy.eye(k), proj])
)[:,np.argsort(idx)]
in terms of the skeleton and interpolation matrices::
B = A[:,idx[:k]]
and::
P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
respectively. See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
The ID can be computed to any relative precision or rank (depending on the
value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
this function has the output signature::
k, idx, proj = interp_decomp(A, eps_or_k)
Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
signature is::
idx, proj = interp_decomp(A, eps_or_k)
.. This function automatically detects the form of the input parameters
and passes them to the appropriate backend. For details, see
:func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
:func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
:func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
:func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
:func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
:func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
Matrix to be factored
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
k : int
Rank required to achieve specified relative precision if
`eps_or_k < 1`.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients. | scipy/linalg/interpolative.py | interp_decomp | AtsushiSakai/scipy | python | def interp_decomp(A, eps_or_k, rand=True):
'\n Compute ID of a matrix.\n\n An ID of a matrix `A` is a factorization defined by a rank `k`, a column\n index array `idx`, and interpolation coefficients `proj` such that::\n\n numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]\n\n The original matrix can then be reconstructed as::\n\n numpy.hstack([A[:,idx[:k]],\n numpy.dot(A[:,idx[:k]], proj)]\n )[:,numpy.argsort(idx)]\n\n or via the routine :func:`reconstruct_matrix_from_id`. This can\n equivalently be written as::\n\n numpy.dot(A[:,idx[:k]],\n numpy.hstack([numpy.eye(k), proj])\n )[:,np.argsort(idx)]\n\n in terms of the skeleton and interpolation matrices::\n\n B = A[:,idx[:k]]\n\n and::\n\n P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]\n\n respectively. See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n The ID can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then\n this function has the output signature::\n\n k, idx, proj = interp_decomp(A, eps_or_k)\n\n Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output\n signature is::\n\n idx, proj = interp_decomp(A, eps_or_k)\n\n .. This function automatically detects the form of the input parameters\n and passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,\n :func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,\n :func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,\n :func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,\n :func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,\n :func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`\n Matrix to be factored\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n k : int\n Rank required to achieve specified relative precision if\n `eps_or_k < 1`.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n '
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if (eps_or_k < 1):
eps = eps_or_k
if rand:
if real:
(k, idx, proj) = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(k, idx, proj) = _backend.idzp_aid(eps, A)
elif real:
(k, idx, proj) = _backend.iddp_id(eps, A)
else:
(k, idx, proj) = _backend.idzp_id(eps, A)
return (k, (idx - 1), proj)
else:
k = int(eps_or_k)
if rand:
if real:
(idx, proj) = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(idx, proj) = _backend.idzr_aid(A, k)
elif real:
(idx, proj) = _backend.iddr_id(A, k)
else:
(idx, proj) = _backend.idzr_id(A, k)
return ((idx - 1), proj)
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matveca = A.rmatvec
if (eps_or_k < 1):
eps = eps_or_k
if real:
(k, idx, proj) = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(k, idx, proj) = _backend.idzp_rid(eps, m, n, matveca)
return (k, (idx - 1), proj)
else:
k = int(eps_or_k)
if real:
(idx, proj) = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(idx, proj) = _backend.idzr_rid(m, n, matveca, k)
return ((idx - 1), proj)
else:
raise _TYPE_ERROR |
def reconstruct_matrix_from_id(B, idx, proj):
'\n Reconstruct matrix from its ID.\n\n A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`\n and `proj`, respectively, can be reconstructed as::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconid` and\n :func:`_backend.idz_reconid`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Reconstructed matrix.\n '
if _is_real(B):
return _backend.idd_reconid(B, (idx + 1), proj)
else:
return _backend.idz_reconid(B, (idx + 1), proj) | 468,255,064,324,423,800 | Reconstruct matrix from its ID.
A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
and `proj`, respectively, can be reconstructed as::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconid` and
:func:`_backend.idz_reconid`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Reconstructed matrix. | scipy/linalg/interpolative.py | reconstruct_matrix_from_id | AtsushiSakai/scipy | python | def reconstruct_matrix_from_id(B, idx, proj):
'\n Reconstruct matrix from its ID.\n\n A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`\n and `proj`, respectively, can be reconstructed as::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_interp_matrix` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconid` and\n :func:`_backend.idz_reconid`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Reconstructed matrix.\n '
if _is_real(B):
return _backend.idd_reconid(B, (idx + 1), proj)
else:
return _backend.idz_reconid(B, (idx + 1), proj) |
def reconstruct_interp_matrix(idx, proj):
'\n Reconstruct interpolation matrix from ID.\n\n The interpolation matrix can be reconstructed from the ID indices and\n coefficients `idx` and `proj`, respectively, as::\n\n P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]\n\n The original matrix can then be reconstructed from its skeleton matrix `B`\n via::\n\n numpy.dot(B, P)\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconint` and\n :func:`_backend.idz_reconint`.\n\n Parameters\n ----------\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Interpolation matrix.\n '
if _is_real(proj):
return _backend.idd_reconint((idx + 1), proj)
else:
return _backend.idz_reconint((idx + 1), proj) | -7,668,722,547,010,333,000 | Reconstruct interpolation matrix from ID.
The interpolation matrix can be reconstructed from the ID indices and
coefficients `idx` and `proj`, respectively, as::
P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
The original matrix can then be reconstructed from its skeleton matrix `B`
via::
numpy.dot(B, P)
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconint` and
:func:`_backend.idz_reconint`.
Parameters
----------
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Interpolation matrix. | scipy/linalg/interpolative.py | reconstruct_interp_matrix | AtsushiSakai/scipy | python | def reconstruct_interp_matrix(idx, proj):
'\n Reconstruct interpolation matrix from ID.\n\n The interpolation matrix can be reconstructed from the ID indices and\n coefficients `idx` and `proj`, respectively, as::\n\n P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]\n\n The original matrix can then be reconstructed from its skeleton matrix `B`\n via::\n\n numpy.dot(B, P)\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_skel_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_reconint` and\n :func:`_backend.idz_reconint`.\n\n Parameters\n ----------\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Interpolation matrix.\n '
if _is_real(proj):
return _backend.idd_reconint((idx + 1), proj)
else:
return _backend.idz_reconint((idx + 1), proj) |
def reconstruct_skel_matrix(A, k, idx):
'\n Reconstruct skeleton matrix from ID.\n\n The skeleton matrix can be reconstructed from the original matrix `A` and its\n ID rank and indices `k` and `idx`, respectively, as::\n\n B = A[:,idx[:k]]\n\n The original matrix can then be reconstructed via::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_interp_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_copycols` and\n :func:`_backend.idz_copycols`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray`\n Original matrix.\n k : int\n Rank of ID.\n idx : :class:`numpy.ndarray`\n Column index array.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Skeleton matrix.\n '
if _is_real(A):
return _backend.idd_copycols(A, k, (idx + 1))
else:
return _backend.idz_copycols(A, k, (idx + 1)) | 8,174,668,372,000,422,000 | Reconstruct skeleton matrix from ID.
The skeleton matrix can be reconstructed from the original matrix `A` and its
ID rank and indices `k` and `idx`, respectively, as::
B = A[:,idx[:k]]
The original matrix can then be reconstructed via::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_interp_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_copycols` and
:func:`_backend.idz_copycols`.
Parameters
----------
A : :class:`numpy.ndarray`
Original matrix.
k : int
Rank of ID.
idx : :class:`numpy.ndarray`
Column index array.
Returns
-------
:class:`numpy.ndarray`
Skeleton matrix. | scipy/linalg/interpolative.py | reconstruct_skel_matrix | AtsushiSakai/scipy | python | def reconstruct_skel_matrix(A, k, idx):
'\n Reconstruct skeleton matrix from ID.\n\n The skeleton matrix can be reconstructed from the original matrix `A` and its\n ID rank and indices `k` and `idx`, respectively, as::\n\n B = A[:,idx[:k]]\n\n The original matrix can then be reconstructed via::\n\n numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]\n\n See also :func:`reconstruct_matrix_from_id` and\n :func:`reconstruct_interp_matrix`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_copycols` and\n :func:`_backend.idz_copycols`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray`\n Original matrix.\n k : int\n Rank of ID.\n idx : :class:`numpy.ndarray`\n Column index array.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Skeleton matrix.\n '
if _is_real(A):
return _backend.idd_copycols(A, k, (idx + 1))
else:
return _backend.idz_copycols(A, k, (idx + 1)) |
def id_to_svd(B, idx, proj):
'\n Convert ID to SVD.\n\n The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and\n coefficients `idx` and `proj`, respectively, is::\n\n U, S, V = id_to_svd(B, idx, proj)\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n See also :func:`svd`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_id2svd` and\n :func:`_backend.idz_id2svd`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n '
if _is_real(B):
(U, V, S) = _backend.idd_id2svd(B, (idx + 1), proj)
else:
(U, V, S) = _backend.idz_id2svd(B, (idx + 1), proj)
return (U, S, V) | -5,234,185,594,294,006,000 | Convert ID to SVD.
The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
coefficients `idx` and `proj`, respectively, is::
U, S, V = id_to_svd(B, idx, proj)
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
See also :func:`svd`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_id2svd` and
:func:`_backend.idz_id2svd`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors. | scipy/linalg/interpolative.py | id_to_svd | AtsushiSakai/scipy | python | def id_to_svd(B, idx, proj):
'\n Convert ID to SVD.\n\n The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and\n coefficients `idx` and `proj`, respectively, is::\n\n U, S, V = id_to_svd(B, idx, proj)\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n See also :func:`svd`.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_id2svd` and\n :func:`_backend.idz_id2svd`.\n\n Parameters\n ----------\n B : :class:`numpy.ndarray`\n Skeleton matrix.\n idx : :class:`numpy.ndarray`\n Column index array.\n proj : :class:`numpy.ndarray`\n Interpolation coefficients.\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n '
if _is_real(B):
(U, V, S) = _backend.idd_id2svd(B, (idx + 1), proj)
else:
(U, V, S) = _backend.idz_id2svd(B, (idx + 1), proj)
return (U, S, V) |
def estimate_spectral_norm(A, its=20):
'\n Estimate spectral norm of a matrix by the randomized power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_snorm` and\n :func:`_backend.idz_snorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate.\n '
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
(m, n) = A.shape
matvec = (lambda x: A.matvec(x))
matveca = (lambda x: A.rmatvec(x))
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its) | -5,031,942,864,439,588,000 | Estimate spectral norm of a matrix by the randomized power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_snorm` and
:func:`_backend.idz_snorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate. | scipy/linalg/interpolative.py | estimate_spectral_norm | AtsushiSakai/scipy | python | def estimate_spectral_norm(A, its=20):
'\n Estimate spectral norm of a matrix by the randomized power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_snorm` and\n :func:`_backend.idz_snorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate.\n '
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
(m, n) = A.shape
matvec = (lambda x: A.matvec(x))
matveca = (lambda x: A.rmatvec(x))
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its) |
def estimate_spectral_norm_diff(A, B, its=20):
'\n Estimate spectral norm of the difference of two matrices by the randomized\n power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and\n :func:`_backend.idz_diffsnorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n B : :class:`scipy.sparse.linalg.LinearOperator`\n Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with\n the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate of matrix difference.\n '
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
(m, n) = A.shape
matvec1 = (lambda x: A.matvec(x))
matveca1 = (lambda x: A.rmatvec(x))
matvec2 = (lambda x: B.matvec(x))
matveca2 = (lambda x: B.rmatvec(x))
if _is_real(A):
return _backend.idd_diffsnorm(m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(m, n, matveca1, matveca2, matvec1, matvec2, its=its) | -2,420,848,259,595,438,000 | Estimate spectral norm of the difference of two matrices by the randomized
power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
:func:`_backend.idz_diffsnorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
B : :class:`scipy.sparse.linalg.LinearOperator`
Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate of matrix difference. | scipy/linalg/interpolative.py | estimate_spectral_norm_diff | AtsushiSakai/scipy | python | def estimate_spectral_norm_diff(A, B, its=20):
'\n Estimate spectral norm of the difference of two matrices by the randomized\n power method.\n\n .. This function automatically detects the matrix data type and calls the\n appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and\n :func:`_backend.idz_diffsnorm`.\n\n Parameters\n ----------\n A : :class:`scipy.sparse.linalg.LinearOperator`\n First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the\n `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n B : :class:`scipy.sparse.linalg.LinearOperator`\n Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with\n the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).\n its : int, optional\n Number of power method iterations.\n\n Returns\n -------\n float\n Spectral norm estimate of matrix difference.\n '
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
(m, n) = A.shape
matvec1 = (lambda x: A.matvec(x))
matveca1 = (lambda x: A.rmatvec(x))
matvec2 = (lambda x: B.matvec(x))
matveca2 = (lambda x: B.rmatvec(x))
if _is_real(A):
return _backend.idd_diffsnorm(m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(m, n, matveca1, matveca2, matvec1, matvec2, its=its) |
def svd(A, eps_or_k, rand=True):
'\n Compute SVD of a matrix via an ID.\n\n An SVD of a matrix `A` is a factorization::\n\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n where `U` and `V` have orthonormal columns and `S` is nonnegative.\n\n The SVD can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`).\n\n See also :func:`interp_decomp` and :func:`id_to_svd`.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,\n :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,\n :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,\n :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,\n :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,\n :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix to be factored, given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and\n `rmatvec` methods (to apply the matrix and its adjoint).\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n '
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if (eps_or_k < 1):
eps = eps_or_k
if rand:
if real:
(U, V, S) = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzp_asvd(eps, A)
elif real:
(U, V, S) = _backend.iddp_svd(eps, A)
else:
(U, V, S) = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if (k > min(A.shape)):
raise ValueError(('Approximation rank %s exceeds min(A.shape) = %s ' % (k, min(A.shape))))
if rand:
if real:
(U, V, S) = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzr_asvd(A, k)
elif real:
(U, V, S) = _backend.iddr_svd(A, k)
else:
(U, V, S) = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matvec = (lambda x: A.matvec(x))
matveca = (lambda x: A.rmatvec(x))
if (eps_or_k < 1):
eps = eps_or_k
if real:
(U, V, S) = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
(U, V, S) = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return (U, S, V) | -8,320,704,618,652,077,000 | Compute SVD of a matrix via an ID.
An SVD of a matrix `A` is a factorization::
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
where `U` and `V` have orthonormal columns and `S` is nonnegative.
The SVD can be computed to any relative precision or rank (depending on the
value of `eps_or_k`).
See also :func:`interp_decomp` and :func:`id_to_svd`.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details, see
:func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
:func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
:func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
:func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
:func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
:func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix to be factored, given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
`rmatvec` methods (to apply the matrix and its adjoint).
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors. | scipy/linalg/interpolative.py | svd | AtsushiSakai/scipy | python | def svd(A, eps_or_k, rand=True):
'\n Compute SVD of a matrix via an ID.\n\n An SVD of a matrix `A` is a factorization::\n\n A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))\n\n where `U` and `V` have orthonormal columns and `S` is nonnegative.\n\n The SVD can be computed to any relative precision or rank (depending on the\n value of `eps_or_k`).\n\n See also :func:`interp_decomp` and :func:`id_to_svd`.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details, see\n :func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,\n :func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,\n :func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,\n :func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,\n :func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,\n :func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix to be factored, given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and\n `rmatvec` methods (to apply the matrix and its adjoint).\n eps_or_k : float or int\n Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of\n approximation.\n rand : bool, optional\n Whether to use random sampling if `A` is of type :class:`numpy.ndarray`\n (randomized algorithms are always used if `A` is of type\n :class:`scipy.sparse.linalg.LinearOperator`).\n\n Returns\n -------\n U : :class:`numpy.ndarray`\n Left singular vectors.\n S : :class:`numpy.ndarray`\n Singular values.\n V : :class:`numpy.ndarray`\n Right singular vectors.\n '
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if (eps_or_k < 1):
eps = eps_or_k
if rand:
if real:
(U, V, S) = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzp_asvd(eps, A)
elif real:
(U, V, S) = _backend.iddp_svd(eps, A)
else:
(U, V, S) = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if (k > min(A.shape)):
raise ValueError(('Approximation rank %s exceeds min(A.shape) = %s ' % (k, min(A.shape))))
if rand:
if real:
(U, V, S) = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzr_asvd(A, k)
elif real:
(U, V, S) = _backend.iddr_svd(A, k)
else:
(U, V, S) = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matvec = (lambda x: A.matvec(x))
matveca = (lambda x: A.rmatvec(x))
if (eps_or_k < 1):
eps = eps_or_k
if real:
(U, V, S) = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
(U, V, S) = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
(U, V, S) = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return (U, S, V) |
def estimate_rank(A, eps):
'\n Estimate matrix rank to a specified relative precision using randomized\n methods.\n\n The matrix `A` can be given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used\n for each case. If `A` is of type :class:`numpy.ndarray`, then the output\n rank is typically about 8 higher than the actual numerical rank.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details,\n see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,\n :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix whose rank is to be estimated, given as either a\n :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`\n with the `rmatvec` method (to apply the matrix adjoint).\n eps : float\n Relative error for numerical rank definition.\n\n Returns\n -------\n int\n Estimated matrix rank.\n '
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if (rank == 0):
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR | -146,156,896,877,322,050 | Estimate matrix rank to a specified relative precision using randomized
methods.
The matrix `A` can be given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
for each case. If `A` is of type :class:`numpy.ndarray`, then the output
rank is typically about 8 higher than the actual numerical rank.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details,
see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
:func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix whose rank is to be estimated, given as either a
:class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
with the `rmatvec` method (to apply the matrix adjoint).
eps : float
Relative error for numerical rank definition.
Returns
-------
int
Estimated matrix rank. | scipy/linalg/interpolative.py | estimate_rank | AtsushiSakai/scipy | python | def estimate_rank(A, eps):
'\n Estimate matrix rank to a specified relative precision using randomized\n methods.\n\n The matrix `A` can be given as either a :class:`numpy.ndarray` or a\n :class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used\n for each case. If `A` is of type :class:`numpy.ndarray`, then the output\n rank is typically about 8 higher than the actual numerical rank.\n\n .. This function automatically detects the form of the input parameters and\n passes them to the appropriate backend. For details,\n see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,\n :func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.\n\n Parameters\n ----------\n A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`\n Matrix whose rank is to be estimated, given as either a\n :class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`\n with the `rmatvec` method (to apply the matrix adjoint).\n eps : float\n Relative error for numerical rank definition.\n\n Returns\n -------\n int\n Estimated matrix rank.\n '
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if (rank == 0):
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
(m, n) = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR |
def __init__(__self__, *, resource_group_name: pulumi.Input[str], boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, cloud_init_configuration: Optional[pulumi.Input[str]]=None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, id: Optional[pulumi.Input[str]]=None, identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']]=None, location: Optional[pulumi.Input[str]]=None, network_virtual_appliance_name: Optional[pulumi.Input[str]]=None, nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_appliance_asn: Optional[pulumi.Input[float]]=None, virtual_hub: Optional[pulumi.Input['SubResourceArgs']]=None):
"\n The set of arguments for constructing a NetworkVirtualAppliance resource.\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.\n :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.\n :param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.\n :param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
if (boot_strap_configuration_blobs is not None):
pulumi.set(__self__, 'boot_strap_configuration_blobs', boot_strap_configuration_blobs)
if (cloud_init_configuration is not None):
pulumi.set(__self__, 'cloud_init_configuration', cloud_init_configuration)
if (cloud_init_configuration_blobs is not None):
pulumi.set(__self__, 'cloud_init_configuration_blobs', cloud_init_configuration_blobs)
if (id is not None):
pulumi.set(__self__, 'id', id)
if (identity is not None):
pulumi.set(__self__, 'identity', identity)
if (location is not None):
pulumi.set(__self__, 'location', location)
if (network_virtual_appliance_name is not None):
pulumi.set(__self__, 'network_virtual_appliance_name', network_virtual_appliance_name)
if (nva_sku is not None):
pulumi.set(__self__, 'nva_sku', nva_sku)
if (tags is not None):
pulumi.set(__self__, 'tags', tags)
if (virtual_appliance_asn is not None):
pulumi.set(__self__, 'virtual_appliance_asn', virtual_appliance_asn)
if (virtual_hub is not None):
pulumi.set(__self__, 'virtual_hub', virtual_hub) | 6,667,925,869,452,050,000 | The set of arguments for constructing a NetworkVirtualAppliance resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | __init__ | polivbr/pulumi-azure-native | python | def __init__(__self__, *, resource_group_name: pulumi.Input[str], boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, cloud_init_configuration: Optional[pulumi.Input[str]]=None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, id: Optional[pulumi.Input[str]]=None, identity: Optional[pulumi.Input['ManagedServiceIdentityArgs']]=None, location: Optional[pulumi.Input[str]]=None, network_virtual_appliance_name: Optional[pulumi.Input[str]]=None, nva_sku: Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_appliance_asn: Optional[pulumi.Input[float]]=None, virtual_hub: Optional[pulumi.Input['SubResourceArgs']]=None):
"\n The set of arguments for constructing a NetworkVirtualAppliance resource.\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.\n :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input['ManagedServiceIdentityArgs'] identity: The service principal that has read access to cloud-init and config blob.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.\n :param pulumi.Input['VirtualApplianceSkuPropertiesArgs'] nva_sku: Network Virtual Appliance SKU.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.\n :param pulumi.Input['SubResourceArgs'] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.\n "
pulumi.set(__self__, 'resource_group_name', resource_group_name)
if (boot_strap_configuration_blobs is not None):
pulumi.set(__self__, 'boot_strap_configuration_blobs', boot_strap_configuration_blobs)
if (cloud_init_configuration is not None):
pulumi.set(__self__, 'cloud_init_configuration', cloud_init_configuration)
if (cloud_init_configuration_blobs is not None):
pulumi.set(__self__, 'cloud_init_configuration_blobs', cloud_init_configuration_blobs)
if (id is not None):
pulumi.set(__self__, 'id', id)
if (identity is not None):
pulumi.set(__self__, 'identity', identity)
if (location is not None):
pulumi.set(__self__, 'location', location)
if (network_virtual_appliance_name is not None):
pulumi.set(__self__, 'network_virtual_appliance_name', network_virtual_appliance_name)
if (nva_sku is not None):
pulumi.set(__self__, 'nva_sku', nva_sku)
if (tags is not None):
pulumi.set(__self__, 'tags', tags)
if (virtual_appliance_asn is not None):
pulumi.set(__self__, 'virtual_appliance_asn', virtual_appliance_asn)
if (virtual_hub is not None):
pulumi.set(__self__, 'virtual_hub', virtual_hub) |
@property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n The name of the resource group.\n '
return pulumi.get(self, 'resource_group_name') | 5,898,586,357,340,442,000 | The name of the resource group. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | resource_group_name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='resourceGroupName')
def resource_group_name(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'resource_group_name') |
@property
@pulumi.getter(name='bootStrapConfigurationBlobs')
def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n BootStrapConfigurationBlobs storage URLs.\n '
return pulumi.get(self, 'boot_strap_configuration_blobs') | 6,968,465,771,750,487,000 | BootStrapConfigurationBlobs storage URLs. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | boot_strap_configuration_blobs | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='bootStrapConfigurationBlobs')
def boot_strap_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n \n '
return pulumi.get(self, 'boot_strap_configuration_blobs') |
@property
@pulumi.getter(name='cloudInitConfiguration')
def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]:
'\n CloudInitConfiguration string in plain text.\n '
return pulumi.get(self, 'cloud_init_configuration') | -2,064,485,249,122,486,300 | CloudInitConfiguration string in plain text. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | cloud_init_configuration | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='cloudInitConfiguration')
def cloud_init_configuration(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'cloud_init_configuration') |
@property
@pulumi.getter(name='cloudInitConfigurationBlobs')
def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n CloudInitConfigurationBlob storage URLs.\n '
return pulumi.get(self, 'cloud_init_configuration_blobs') | -4,878,935,036,091,921,000 | CloudInitConfigurationBlob storage URLs. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | cloud_init_configuration_blobs | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='cloudInitConfigurationBlobs')
def cloud_init_configuration_blobs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n \n '
return pulumi.get(self, 'cloud_init_configuration_blobs') |
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
'\n Resource ID.\n '
return pulumi.get(self, 'id') | 4,003,078,074,025,280,500 | Resource ID. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | id | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'id') |
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
'\n The service principal that has read access to cloud-init and config blob.\n '
return pulumi.get(self, 'identity') | 2,198,703,279,660,608,300 | The service principal that has read access to cloud-init and config blob. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | identity | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ManagedServiceIdentityArgs']]:
'\n \n '
return pulumi.get(self, 'identity') |
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
'\n Resource location.\n '
return pulumi.get(self, 'location') | 5,685,883,695,381,965,000 | Resource location. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | location | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter(name='networkVirtualApplianceName')
def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]:
'\n The name of Network Virtual Appliance.\n '
return pulumi.get(self, 'network_virtual_appliance_name') | -1,706,490,319,131,603,700 | The name of Network Virtual Appliance. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | network_virtual_appliance_name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='networkVirtualApplianceName')
def network_virtual_appliance_name(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'network_virtual_appliance_name') |
@property
@pulumi.getter(name='nvaSku')
def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]:
'\n Network Virtual Appliance SKU.\n '
return pulumi.get(self, 'nva_sku') | 2,661,550,094,163,592,000 | Network Virtual Appliance SKU. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | nva_sku | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='nvaSku')
def nva_sku(self) -> Optional[pulumi.Input['VirtualApplianceSkuPropertiesArgs']]:
'\n \n '
return pulumi.get(self, 'nva_sku') |
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,047,115,851,061,118,500 | Resource tags. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | tags | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter(name='virtualApplianceAsn')
def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]:
'\n VirtualAppliance ASN.\n '
return pulumi.get(self, 'virtual_appliance_asn') | -2,730,813,289,843,636,000 | VirtualAppliance ASN. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | virtual_appliance_asn | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualApplianceAsn')
def virtual_appliance_asn(self) -> Optional[pulumi.Input[float]]:
'\n \n '
return pulumi.get(self, 'virtual_appliance_asn') |
@property
@pulumi.getter(name='virtualHub')
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
'\n The Virtual Hub where Network Virtual Appliance is being deployed.\n '
return pulumi.get(self, 'virtual_hub') | 2,946,246,246,132,601,000 | The Virtual Hub where Network Virtual Appliance is being deployed. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | virtual_hub | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualHub')
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
'\n \n '
return pulumi.get(self, 'virtual_hub') |
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, cloud_init_configuration: Optional[pulumi.Input[str]]=None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, id: Optional[pulumi.Input[str]]=None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]]=None, location: Optional[pulumi.Input[str]]=None, network_virtual_appliance_name: Optional[pulumi.Input[str]]=None, nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_appliance_asn: Optional[pulumi.Input[float]]=None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]]=None, __props__=None):
"\n NetworkVirtualAppliance Resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.\n :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.\n :param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.\n :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.\n "
... | 1,037,797,180,704,716,400 | NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.
:param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.
:param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | __init__ | polivbr/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, boot_strap_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, cloud_init_configuration: Optional[pulumi.Input[str]]=None, cloud_init_configuration_blobs: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, id: Optional[pulumi.Input[str]]=None, identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]]=None, location: Optional[pulumi.Input[str]]=None, network_virtual_appliance_name: Optional[pulumi.Input[str]]=None, nva_sku: Optional[pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']]]=None, resource_group_name: Optional[pulumi.Input[str]]=None, tags: Optional[pulumi.Input[Mapping[(str, pulumi.Input[str])]]]=None, virtual_appliance_asn: Optional[pulumi.Input[float]]=None, virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]]=None, __props__=None):
"\n NetworkVirtualAppliance Resource.\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] boot_strap_configuration_blobs: BootStrapConfigurationBlobs storage URLs.\n :param pulumi.Input[str] cloud_init_configuration: CloudInitConfiguration string in plain text.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] cloud_init_configuration_blobs: CloudInitConfigurationBlob storage URLs.\n :param pulumi.Input[str] id: Resource ID.\n :param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The service principal that has read access to cloud-init and config blob.\n :param pulumi.Input[str] location: Resource location.\n :param pulumi.Input[str] network_virtual_appliance_name: The name of Network Virtual Appliance.\n :param pulumi.Input[pulumi.InputType['VirtualApplianceSkuPropertiesArgs']] nva_sku: Network Virtual Appliance SKU.\n :param pulumi.Input[str] resource_group_name: The name of the resource group.\n :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.\n :param pulumi.Input[float] virtual_appliance_asn: VirtualAppliance ASN.\n :param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The Virtual Hub where Network Virtual Appliance is being deployed.\n "
... |
@overload
def __init__(__self__, resource_name: str, args: NetworkVirtualApplianceArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n NetworkVirtualAppliance Resource.\n\n :param str resource_name: The name of the resource.\n :param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... | 2,114,617,597,250,460,700 | NetworkVirtualAppliance Resource.
:param str resource_name: The name of the resource.
:param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | __init__ | polivbr/pulumi-azure-native | python | @overload
def __init__(__self__, resource_name: str, args: NetworkVirtualApplianceArgs, opts: Optional[pulumi.ResourceOptions]=None):
"\n NetworkVirtualAppliance Resource.\n\n :param str resource_name: The name of the resource.\n :param NetworkVirtualApplianceArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
... |
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'NetworkVirtualAppliance':
"\n Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__['address_prefix'] = None
__props__.__dict__['boot_strap_configuration_blobs'] = None
__props__.__dict__['cloud_init_configuration'] = None
__props__.__dict__['cloud_init_configuration_blobs'] = None
__props__.__dict__['etag'] = None
__props__.__dict__['identity'] = None
__props__.__dict__['inbound_security_rules'] = None
__props__.__dict__['location'] = None
__props__.__dict__['name'] = None
__props__.__dict__['nva_sku'] = None
__props__.__dict__['provisioning_state'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
__props__.__dict__['virtual_appliance_asn'] = None
__props__.__dict__['virtual_appliance_nics'] = None
__props__.__dict__['virtual_appliance_sites'] = None
__props__.__dict__['virtual_hub'] = None
return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__) | -323,794,455,604,124,540 | Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | get | polivbr/pulumi-azure-native | python | @staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'NetworkVirtualAppliance':
"\n Get an existing NetworkVirtualAppliance resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NetworkVirtualApplianceArgs.__new__(NetworkVirtualApplianceArgs)
__props__.__dict__['address_prefix'] = None
__props__.__dict__['boot_strap_configuration_blobs'] = None
__props__.__dict__['cloud_init_configuration'] = None
__props__.__dict__['cloud_init_configuration_blobs'] = None
__props__.__dict__['etag'] = None
__props__.__dict__['identity'] = None
__props__.__dict__['inbound_security_rules'] = None
__props__.__dict__['location'] = None
__props__.__dict__['name'] = None
__props__.__dict__['nva_sku'] = None
__props__.__dict__['provisioning_state'] = None
__props__.__dict__['tags'] = None
__props__.__dict__['type'] = None
__props__.__dict__['virtual_appliance_asn'] = None
__props__.__dict__['virtual_appliance_nics'] = None
__props__.__dict__['virtual_appliance_sites'] = None
__props__.__dict__['virtual_hub'] = None
return NetworkVirtualAppliance(resource_name, opts=opts, __props__=__props__) |
@property
@pulumi.getter(name='addressPrefix')
def address_prefix(self) -> pulumi.Output[str]:
'\n Address Prefix.\n '
return pulumi.get(self, 'address_prefix') | 5,824,578,746,486,711,000 | Address Prefix. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | address_prefix | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='addressPrefix')
def address_prefix(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'address_prefix') |
@property
@pulumi.getter(name='bootStrapConfigurationBlobs')
def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n BootStrapConfigurationBlobs storage URLs.\n '
return pulumi.get(self, 'boot_strap_configuration_blobs') | 7,936,631,049,014,074,000 | BootStrapConfigurationBlobs storage URLs. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | boot_strap_configuration_blobs | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='bootStrapConfigurationBlobs')
def boot_strap_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n \n '
return pulumi.get(self, 'boot_strap_configuration_blobs') |
@property
@pulumi.getter(name='cloudInitConfiguration')
def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]:
'\n CloudInitConfiguration string in plain text.\n '
return pulumi.get(self, 'cloud_init_configuration') | 749,338,380,051,062,900 | CloudInitConfiguration string in plain text. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | cloud_init_configuration | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='cloudInitConfiguration')
def cloud_init_configuration(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'cloud_init_configuration') |
@property
@pulumi.getter(name='cloudInitConfigurationBlobs')
def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n CloudInitConfigurationBlob storage URLs.\n '
return pulumi.get(self, 'cloud_init_configuration_blobs') | 1,871,157,752,575,486,200 | CloudInitConfigurationBlob storage URLs. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | cloud_init_configuration_blobs | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='cloudInitConfigurationBlobs')
def cloud_init_configuration_blobs(self) -> pulumi.Output[Optional[Sequence[str]]]:
'\n \n '
return pulumi.get(self, 'cloud_init_configuration_blobs') |
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n A unique read-only string that changes whenever the resource is updated.\n '
return pulumi.get(self, 'etag') | 5,960,741,373,667,297,000 | A unique read-only string that changes whenever the resource is updated. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | etag | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'etag') |
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
'\n The service principal that has read access to cloud-init and config blob.\n '
return pulumi.get(self, 'identity') | 4,552,885,376,458,106,400 | The service principal that has read access to cloud-init and config blob. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | identity | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
'\n \n '
return pulumi.get(self, 'identity') |
@property
@pulumi.getter(name='inboundSecurityRules')
def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
'\n List of references to InboundSecurityRules.\n '
return pulumi.get(self, 'inbound_security_rules') | 4,518,341,234,215,433,000 | List of references to InboundSecurityRules. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | inbound_security_rules | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='inboundSecurityRules')
def inbound_security_rules(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
'\n \n '
return pulumi.get(self, 'inbound_security_rules') |
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
'\n Resource location.\n '
return pulumi.get(self, 'location') | -6,585,394,763,848,456,000 | Resource location. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | location | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n Resource name.\n '
return pulumi.get(self, 'name') | 4,695,236,134,441,039,000 | Resource name. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | name | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'name') |
@property
@pulumi.getter(name='nvaSku')
def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]:
'\n Network Virtual Appliance SKU.\n '
return pulumi.get(self, 'nva_sku') | 4,007,323,048,391,256,000 | Network Virtual Appliance SKU. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | nva_sku | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='nvaSku')
def nva_sku(self) -> pulumi.Output[Optional['outputs.VirtualApplianceSkuPropertiesResponse']]:
'\n \n '
return pulumi.get(self, 'nva_sku') |
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[str]:
'\n The provisioning state of the resource.\n '
return pulumi.get(self, 'provisioning_state') | -7,441,910,698,213,024,000 | The provisioning state of the resource. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | provisioning_state | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'provisioning_state') |
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | -2,929,197,049,816,896,000 | Resource tags. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | tags | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[(str, str)]]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n Resource type.\n '
return pulumi.get(self, 'type') | 2,132,950,812,122,862,800 | Resource type. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | type | polivbr/pulumi-azure-native | python | @property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'type') |
@property
@pulumi.getter(name='virtualApplianceAsn')
def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]:
'\n VirtualAppliance ASN.\n '
return pulumi.get(self, 'virtual_appliance_asn') | -9,037,999,107,382,466,000 | VirtualAppliance ASN. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | virtual_appliance_asn | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualApplianceAsn')
def virtual_appliance_asn(self) -> pulumi.Output[Optional[float]]:
'\n \n '
return pulumi.get(self, 'virtual_appliance_asn') |
@property
@pulumi.getter(name='virtualApplianceNics')
def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]:
'\n List of Virtual Appliance Network Interfaces.\n '
return pulumi.get(self, 'virtual_appliance_nics') | -572,032,662,944,750,140 | List of Virtual Appliance Network Interfaces. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | virtual_appliance_nics | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualApplianceNics')
def virtual_appliance_nics(self) -> pulumi.Output[Sequence['outputs.VirtualApplianceNicPropertiesResponse']]:
'\n \n '
return pulumi.get(self, 'virtual_appliance_nics') |
@property
@pulumi.getter(name='virtualApplianceSites')
def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
'\n List of references to VirtualApplianceSite.\n '
return pulumi.get(self, 'virtual_appliance_sites') | -8,492,447,354,014,992,000 | List of references to VirtualApplianceSite. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | virtual_appliance_sites | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualApplianceSites')
def virtual_appliance_sites(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
'\n \n '
return pulumi.get(self, 'virtual_appliance_sites') |
@property
@pulumi.getter(name='virtualHub')
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
'\n The Virtual Hub where Network Virtual Appliance is being deployed.\n '
return pulumi.get(self, 'virtual_hub') | 6,378,258,943,083,094,000 | The Virtual Hub where Network Virtual Appliance is being deployed. | sdk/python/pulumi_azure_native/network/v20201101/network_virtual_appliance.py | virtual_hub | polivbr/pulumi-azure-native | python | @property
@pulumi.getter(name='virtualHub')
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
'\n \n '
return pulumi.get(self, 'virtual_hub') |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_01_deploy_vm_no_startvm(self):
'Test Deploy Virtual Machine with no startVM parameter\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, mode=self.zone.networktype)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
return | 6,292,790,326,233,804,000 | Test Deploy Virtual Machine with no startVM parameter | test/integration/component/test_stopped_vm.py | test_01_deploy_vm_no_startvm | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_01_deploy_vm_no_startvm(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, mode=self.zone.networktype)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_02_deploy_vm_startvm_true(self):
'Test Deploy Virtual Machine with startVM=true parameter\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=True, diskofferingid=self.disk_offering.id, mode=self.zone.networktype)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
return | 2,796,998,086,058,335,000 | Test Deploy Virtual Machine with startVM=true parameter | test/integration/component/test_stopped_vm.py | test_02_deploy_vm_startvm_true | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_02_deploy_vm_startvm_true(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=True, diskofferingid=self.disk_offering.id, mode=self.zone.networktype)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_03_deploy_vm_startvm_false(self):
'Test Deploy Virtual Machine with startVM=false parameter\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(routers, None, 'List routers should return empty response')
self.debug(('Destroying instance: %s' % self.virtual_machine.name))
self.virtual_machine.delete(self.apiclient)
self.debug('Instance is destroyed!')
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.debug('Instance destroyed..waiting till expunge interval')
interval = list_configurations(self.apiclient, name='expunge.interval')
delay = list_configurations(self.apiclient, name='expunge.delay')
time.sleep((int(interval[0].value) + int(delay[0].value)))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.assertEqual(list_vm_response, None, 'Check list response returns a valid list')
return | 2,372,317,937,518,507,500 | Test Deploy Virtual Machine with startVM=false parameter | test/integration/component/test_stopped_vm.py | test_03_deploy_vm_startvm_false | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_03_deploy_vm_startvm_false(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(routers, None, 'List routers should return empty response')
self.debug(('Destroying instance: %s' % self.virtual_machine.name))
self.virtual_machine.delete(self.apiclient)
self.debug('Instance is destroyed!')
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.debug('Instance destroyed..waiting till expunge interval')
interval = list_configurations(self.apiclient, name='expunge.interval')
delay = list_configurations(self.apiclient, name='expunge.delay')
time.sleep((int(interval[0].value) + int(delay[0].value)))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.assertEqual(list_vm_response, None, 'Check list response returns a valid list')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_04_deploy_startvm_false_attach_volume(self):
'Test Deploy Virtual Machine with startVM=false and attach volume\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Creating a volume in account: %s' % self.account.name))
volume = Volume.create(self.apiclient, self.services['volume'], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id)
self.debug(('Created volume in account: %s' % self.account.name))
self.debug(('Attaching volume to instance: %s' % self.virtual_machine.name))
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail('Attach volume failed!')
return | -1,168,590,315,586,303,200 | Test Deploy Virtual Machine with startVM=false and attach volume | test/integration/component/test_stopped_vm.py | test_04_deploy_startvm_false_attach_volume | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_04_deploy_startvm_false_attach_volume(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Creating a volume in account: %s' % self.account.name))
volume = Volume.create(self.apiclient, self.services['volume'], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id)
self.debug(('Created volume in account: %s' % self.account.name))
self.debug(('Attaching volume to instance: %s' % self.virtual_machine.name))
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail('Attach volume failed!')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_05_deploy_startvm_false_change_so(self):
'Test Deploy Virtual Machine with startVM=false and change service offering\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
medium_service_off = ServiceOffering.create(self.apiclient, self.services['service_offering'])
self.cleanup.append(medium_service_off)
self.debug(('Changing service offering for instance: %s' % self.virtual_machine.name))
try:
self.virtual_machine.change_service_offering(self.apiclient, medium_service_off.id)
except Exception as e:
self.fail(('Change service offering failed: %s' % e))
self.debug(('Starting the instance: %s' % self.virtual_machine.name))
self.virtual_machine.start(self.apiclient)
self.debug(('Instance: %s started' % self.virtual_machine.name))
listedvm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_((len(listedvm) > 0))
self.assertEqual(listedvm[0].serviceofferingid, medium_service_off.id, msg='VM did not change service offering')
return | 5,525,013,060,685,405,000 | Test Deploy Virtual Machine with startVM=false and change service offering | test/integration/component/test_stopped_vm.py | test_05_deploy_startvm_false_change_so | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_05_deploy_startvm_false_change_so(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
medium_service_off = ServiceOffering.create(self.apiclient, self.services['service_offering'])
self.cleanup.append(medium_service_off)
self.debug(('Changing service offering for instance: %s' % self.virtual_machine.name))
try:
self.virtual_machine.change_service_offering(self.apiclient, medium_service_off.id)
except Exception as e:
self.fail(('Change service offering failed: %s' % e))
self.debug(('Starting the instance: %s' % self.virtual_machine.name))
self.virtual_machine.start(self.apiclient)
self.debug(('Instance: %s started' % self.virtual_machine.name))
listedvm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)
self.assert_(isinstance(listedvm, list))
self.assert_((len(listedvm) > 0))
self.assertEqual(listedvm[0].serviceofferingid, medium_service_off.id, msg='VM did not change service offering')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_06_deploy_startvm_attach_detach(self):
'Test Deploy Virtual Machine with startVM=false and\n attach detach volumes\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Creating a volume in account: %s' % self.account.name))
volume = Volume.create(self.apiclient, self.services['volume'], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id)
self.debug(('Created volume in account: %s' % self.account.name))
self.debug(('Attaching volume to instance: %s' % self.virtual_machine.name))
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail('Attach volume failed!')
self.debug(('Detaching the disk: %s' % volume.name))
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug(('Datadisk %s detached!' % volume.name))
volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', id=volume.id, listall=True)
self.assertEqual(volumes, None, 'List Volumes should not list any volume for instance')
return | -2,799,671,439,109,223,400 | Test Deploy Virtual Machine with startVM=false and
attach detach volumes | test/integration/component/test_stopped_vm.py | test_06_deploy_startvm_attach_detach | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_06_deploy_startvm_attach_detach(self):
'Test Deploy Virtual Machine with startVM=false and\n attach detach volumes\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Creating a volume in account: %s' % self.account.name))
volume = Volume.create(self.apiclient, self.services['volume'], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id)
self.debug(('Created volume in account: %s' % self.account.name))
self.debug(('Attaching volume to instance: %s' % self.virtual_machine.name))
try:
self.virtual_machine.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail('Attach volume failed!')
self.debug(('Detaching the disk: %s' % volume.name))
self.virtual_machine.detach_volume(self.apiclient, volume)
self.debug(('Datadisk %s detached!' % volume.name))
volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', id=volume.id, listall=True)
self.assertEqual(volumes, None, 'List Volumes should not list any volume for instance')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_07_deploy_startvm_attach_iso(self):
'Test Deploy Virtual Machine with startVM=false and attach ISO\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Registering a ISO in account: %s' % self.account.name))
iso = Iso.create(self.apiclient, self.services['iso'], account=self.account.name, domainid=self.account.domainid)
self.debug(('Successfully created ISO with ID: %s' % iso.id))
try:
iso.download(self.apiclient)
self.cleanup.append(iso)
except Exception as e:
self.fail(('Exception while downloading ISO %s: %s' % (iso.id, e)))
self.debug(('Attach ISO with ID: %s to VM ID: %s' % (iso.id, self.virtual_machine.id)))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail('Attach ISO failed!')
vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id, listall=True)
self.assertEqual(isinstance(vms, list), True, 'List vms should return a valid list')
vm = vms[0]
self.assertEqual(vm.isoid, iso.id, 'The ISO status should be reflected in list Vm call')
return | -8,296,429,924,960,855,000 | Test Deploy Virtual Machine with startVM=false and attach ISO | test/integration/component/test_stopped_vm.py | test_07_deploy_startvm_attach_iso | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_07_deploy_startvm_attach_iso(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Registering a ISO in account: %s' % self.account.name))
iso = Iso.create(self.apiclient, self.services['iso'], account=self.account.name, domainid=self.account.domainid)
self.debug(('Successfully created ISO with ID: %s' % iso.id))
try:
iso.download(self.apiclient)
self.cleanup.append(iso)
except Exception as e:
self.fail(('Exception while downloading ISO %s: %s' % (iso.id, e)))
self.debug(('Attach ISO with ID: %s to VM ID: %s' % (iso.id, self.virtual_machine.id)))
try:
self.virtual_machine.attach_iso(self.apiclient, iso)
except Exception as e:
self.fail('Attach ISO failed!')
vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id, listall=True)
self.assertEqual(isinstance(vms, list), True, 'List vms should return a valid list')
vm = vms[0]
self.assertEqual(vm.isoid, iso.id, 'The ISO status should be reflected in list Vm call')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_08_deploy_attached_volume(self):
'Test Deploy Virtual Machine with startVM=false and attach volume already attached to different machine\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine_1 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_1.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_1.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine_2 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_2.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_2.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Fetching DATADISK details for instance: %s' % self.virtual_machine_2.name))
volumes = Volume.list(self.apiclient, type='DATADISK', account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(isinstance(volumes, list), True, 'List volumes should return a valid list')
volume = volumes[0]
self.debug(('Detaching the disk: %s' % volume.name))
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug(('Datadisk %s detached!' % volume.name))
except Exception as e:
self.fail('Detach volume failed!')
self.debug(('Attaching volume to instance: %s' % self.virtual_machine_1.name))
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail(('Attach volume failed with %s!' % e))
volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK', id=volume.id, listall=True)
self.assertNotEqual(volumes, None, 'List Volumes should not list any volume for instance')
return | -1,117,870,452,740,422,900 | Test Deploy Virtual Machine with startVM=false and attach volume already attached to different machine | test/integration/component/test_stopped_vm.py | test_08_deploy_attached_volume | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_08_deploy_attached_volume(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine_1 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_1.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_1.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine_2 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_2.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_2.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Stopped state after deployment with startvm=false')
self.debug(('Fetching DATADISK details for instance: %s' % self.virtual_machine_2.name))
volumes = Volume.list(self.apiclient, type='DATADISK', account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(isinstance(volumes, list), True, 'List volumes should return a valid list')
volume = volumes[0]
self.debug(('Detaching the disk: %s' % volume.name))
try:
self.virtual_machine_2.detach_volume(self.apiclient, volume)
self.debug(('Datadisk %s detached!' % volume.name))
except Exception as e:
self.fail('Detach volume failed!')
self.debug(('Attaching volume to instance: %s' % self.virtual_machine_1.name))
try:
self.virtual_machine_1.attach_volume(self.apiclient, volume)
except Exception as e:
self.fail(('Attach volume failed with %s!' % e))
volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine_1.id, type='DATADISK', id=volume.id, listall=True)
self.assertNotEqual(volumes, None, 'List Volumes should not list any volume for instance')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_09_stop_vm_migrate_vol(self):
"Test Stopped Virtual Machine's ROOT volume migration\n "
clusters = Cluster.list(self.apiclient, zoneid=self.zone.id)
self.assertEqual(isinstance(clusters, list), True, 'Check list response returns a valid list')
i = 0
for cluster in clusters:
storage_pools = StoragePool.list(self.apiclient, clusterid=cluster.id)
if (len(storage_pools) > 1):
self.cluster_id = cluster.id
i += 1
break
if (i == 0):
self.skipTest('No cluster with more than one primary storage pool to perform migrate volume test')
hosts = Host.list(self.apiclient, clusterid=self.cluster_id)
self.assertEqual(isinstance(hosts, list), True, 'Check list response returns a valid list')
host = hosts[0]
self.debug(('Deploying instance on host: %s' % host.id))
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, hostid=host.id, mode=self.zone.networktype)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
self.debug(('Stopping instance: %s' % self.virtual_machine.name))
self.virtual_machine.stop(self.apiclient)
self.debug('Instance is stopped!')
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after stoping vm')
volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True)
self.assertEqual(isinstance(volumes, list), True, 'Check volume list response returns a valid list')
vol_response = volumes[0]
storage_name = vol_response.storage
storage_pools = StoragePool.list(self.apiclient, clusterid=self.cluster_id)
for spool in storage_pools:
if (spool.name == storage_name):
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug(('Migrating volume to storage pool: %s' % self.storage_name))
Volume.migrate(self.apiclient, storageid=self.storage_id, volumeid=vol_response.id)
volume = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True)
self.assertEqual(volume[0].storage, self.storage_name, 'Check volume migration response')
return | 587,181,880,230,539,300 | Test Stopped Virtual Machine's ROOT volume migration | test/integration/component/test_stopped_vm.py | test_09_stop_vm_migrate_vol | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_09_stop_vm_migrate_vol(self):
"\n "
clusters = Cluster.list(self.apiclient, zoneid=self.zone.id)
self.assertEqual(isinstance(clusters, list), True, 'Check list response returns a valid list')
i = 0
for cluster in clusters:
storage_pools = StoragePool.list(self.apiclient, clusterid=cluster.id)
if (len(storage_pools) > 1):
self.cluster_id = cluster.id
i += 1
break
if (i == 0):
self.skipTest('No cluster with more than one primary storage pool to perform migrate volume test')
hosts = Host.list(self.apiclient, clusterid=self.cluster_id)
self.assertEqual(isinstance(hosts, list), True, 'Check list response returns a valid list')
host = hosts[0]
self.debug(('Deploying instance on host: %s' % host.id))
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, hostid=host.id, mode=self.zone.networktype)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
self.debug(('Stopping instance: %s' % self.virtual_machine.name))
self.virtual_machine.stop(self.apiclient)
self.debug('Instance is stopped!')
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after stoping vm')
volumes = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True)
self.assertEqual(isinstance(volumes, list), True, 'Check volume list response returns a valid list')
vol_response = volumes[0]
storage_name = vol_response.storage
storage_pools = StoragePool.list(self.apiclient, clusterid=self.cluster_id)
for spool in storage_pools:
if (spool.name == storage_name):
continue
else:
self.storage_id = spool.id
self.storage_name = spool.name
break
self.debug(('Migrating volume to storage pool: %s' % self.storage_name))
Volume.migrate(self.apiclient, storageid=self.storage_id, volumeid=vol_response.id)
volume = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True)
self.assertEqual(volume[0].storage, self.storage_name, 'Check volume migration response')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_01_deploy_ha_vm_startvm_false(self):
'Test Deploy HA enabled Virtual Machine with startvm=false\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment')
return | -6,801,253,598,527,564,000 | Test Deploy HA enabled Virtual Machine with startvm=false | test/integration/component/test_stopped_vm.py | test_01_deploy_ha_vm_startvm_false | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_01_deploy_ha_vm_startvm_false(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Stopped state after deployment')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_02_deploy_ha_vm_from_iso(self):
'Test Deploy HA enabled Virtual Machine from ISO\n '
self.iso = Iso.create(self.apiclient, self.services['iso'], account=self.account.name, domainid=self.account.domainid)
try:
self.iso.download(self.apiclient)
self.cleanup.append(self.iso)
except Exception as e:
raise Exception(('Exception while downloading ISO %s: %s' % (self.iso.id, e)))
self.debug(('Registered ISO: %s' % self.iso.name))
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, templateid=self.iso.id, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=True)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
return | 1,370,290,039,441,946,000 | Test Deploy HA enabled Virtual Machine from ISO | test/integration/component/test_stopped_vm.py | test_02_deploy_ha_vm_from_iso | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_02_deploy_ha_vm_from_iso(self):
'\n '
self.iso = Iso.create(self.apiclient, self.services['iso'], account=self.account.name, domainid=self.account.domainid)
try:
self.iso.download(self.apiclient)
self.cleanup.append(self.iso)
except Exception as e:
raise Exception(('Exception while downloading ISO %s: %s' % (self.iso.id, e)))
self.debug(('Registered ISO: %s' % self.iso.name))
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, templateid=self.iso.id, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=True)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_03_deploy_ha_vm_iso_startvm_false(self):
'Test Deploy HA enabled Virtual Machine from ISO with startvm=false\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Running state after deployment')
return | -4,505,269,079,852,825,600 | Test Deploy HA enabled Virtual Machine from ISO with startvm=false | test/integration/component/test_stopped_vm.py | test_03_deploy_ha_vm_iso_startvm_false | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_03_deploy_ha_vm_iso_startvm_false(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in Running state after deployment')
return |
@attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_01_deploy_vm_no_startvm(self):
'Test Deploy Virtual Machine with no startVM parameter\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine_1 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_1.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_1.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in stopped state after deployment')
self.debug('Checking the router state after VM deployment')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(routers, None, 'List routers should return empty response')
self.debug(('Deploying another instance (startvm=true) in the account: %s' % self.account.name))
self.virtual_machine_2 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=True)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_2.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_2.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
self.debug('Checking the router state after VM deployment')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(isinstance(routers, list), True, 'List routers should not return empty response')
for router in routers:
self.debug(('Router state: %s' % router.state))
self.assertEqual(router.state, 'Running', 'Router should be in running state when instance is running in the account')
self.debug(('Destroying the running VM:%s' % self.virtual_machine_2.name))
self.virtual_machine_2.delete(self.apiclient)
self.debug('Instance destroyed..waiting till expunge interval')
interval = list_configurations(self.apiclient, name='expunge.interval')
delay = list_configurations(self.apiclient, name='expunge.delay')
time.sleep(((int(interval[0].value) + int(delay[0].value)) * 2))
self.debug('Checking the router state after VM deployment')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertNotEqual(routers, None, 'Router should get deleted after expunge delay+wait')
return | -6,827,778,610,811,940,000 | Test Deploy Virtual Machine with no startVM parameter | test/integration/component/test_stopped_vm.py | test_01_deploy_vm_no_startvm | ksowmya/cloudstack-1 | python | @attr(tags=['advanced', 'eip', 'advancedns', 'basic', 'sg'])
def test_01_deploy_vm_no_startvm(self):
'\n '
self.debug(('Deploying instance in the account: %s' % self.account.name))
self.virtual_machine_1 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=False)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_1.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_1.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Stopped', 'VM should be in stopped state after deployment')
self.debug('Checking the router state after VM deployment')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(routers, None, 'List routers should return empty response')
self.debug(('Deploying another instance (startvm=true) in the account: %s' % self.account.name))
self.virtual_machine_2 = VirtualMachine.create(self.apiclient, self.services['virtual_machine'], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, startvm=True)
self.debug(('Deployed instance in account: %s' % self.account.name))
list_vm_response = list_virtual_machines(self.apiclient, id=self.virtual_machine_2.id)
self.debug(('Verify listVirtualMachines response for virtual machine: %s' % self.virtual_machine_2.id))
self.assertEqual(isinstance(list_vm_response, list), True, 'Check list response returns a valid list')
vm_response = list_vm_response[0]
self.assertEqual(vm_response.state, 'Running', 'VM should be in Running state after deployment')
self.debug('Checking the router state after VM deployment')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertEqual(isinstance(routers, list), True, 'List routers should not return empty response')
for router in routers:
self.debug(('Router state: %s' % router.state))
self.assertEqual(router.state, 'Running', 'Router should be in running state when instance is running in the account')
self.debug(('Destroying the running VM:%s' % self.virtual_machine_2.name))
self.virtual_machine_2.delete(self.apiclient)
self.debug('Instance destroyed..waiting till expunge interval')
interval = list_configurations(self.apiclient, name='expunge.interval')
delay = list_configurations(self.apiclient, name='expunge.delay')
time.sleep(((int(interval[0].value) + int(delay[0].value)) * 2))
self.debug('Checking the router state after VM deployment')
routers = Router.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True)
self.assertNotEqual(routers, None, 'Router should get deleted after expunge delay+wait')
return |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.