hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a20163fcfd694371e57b61cb29bbe6b80cc3d4b | 4,302 | py | Python | datawig/mxnet_output_symbols.py | sscdotopen/datawig | 97e259d6fde9e38f66c59e82a068172c54060c04 | [
"Apache-2.0"
] | null | null | null | datawig/mxnet_output_symbols.py | sscdotopen/datawig | 97e259d6fde9e38f66c59e82a068172c54060c04 | [
"Apache-2.0"
] | 1 | 2018-09-02T12:08:29.000Z | 2018-09-04T22:13:36.000Z | datawig/mxnet_output_symbols.py | sscdotopen/datawig | 97e259d6fde9e38f66c59e82a068172c54060c04 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
DataWig imputer output modules
"""
from typing import List, Tuple, Any
import mxnet as mx
from .utils import logger
def make_categorical_loss(latents: mx.symbol,
label_field_name: str,
num_labels: int,
final_fc_hidden_units: List[int] = None) -> Tuple[Any, Any]:
'''
Generate output symbol for categorical loss
:param latents: MxNet symbol containing the concantenated latents from all featurizers
:param label_field_name: name of the label column
:num_labels: number of labels contained in the label column (for prediction)
:final_fc_hidden_units: list of dimensions for the final fully connected layer.
The length of this list corresponds to the number of FC
layers, and the contents of the list are integers with
corresponding hidden layer size.
:return: mxnet symbols for predictions and loss
'''
if not final_fc_hidden_units:
# generate prediction symbol
fully_connected = mx.sym.FullyConnected(
data=latents,
num_hidden=num_labels,
name="label_{}".format(label_field_name))
else:
layer_size = final_fc_hidden_units
with mx.name.Prefix("label_{}".format(label_field_name)):
for i, layer in enumerate(layer_size):
if i == len(layer_size) - 1:
fully_connected = mx.sym.FullyConnected(
data=latents,
num_hidden=layer)
else:
latents = mx.sym.FullyConnected(
data=latents,
num_hidden=layer)
pred = mx.sym.softmax(fully_connected)
label = mx.sym.Variable(label_field_name)
# assign to 0.0 the label values larger than number of classes so that they
# do not contribute to the loss
logger.info("Building output of label {} with {} classes \
(including missing class)".format(label, num_labels))
num_labels_vec = label * 0.0 + num_labels
indices = mx.sym.broadcast_lesser(label, num_labels_vec)
label = label * indices
# goes from (batch, 1) to (batch,) as it is required for softmax output
label = mx.sym.split(label, axis=1, num_outputs=1, squeeze_axis=1)
# mask entries when label is 0 (missing value)
missing_labels = mx.sym.zeros_like(label)
positive_mask = mx.sym.broadcast_greater(label, missing_labels)
# compute the cross entropy only when labels are positive
cross_entropy = mx.sym.pick(mx.sym.log_softmax(fully_connected), label) * -positive_mask
# normalize the cross entropy by the number of positive label
num_positive_indices = mx.sym.sum(positive_mask)
cross_entropy = mx.sym.broadcast_div(cross_entropy, num_positive_indices + 1.0)
# todo because MakeLoss normalize even with normalization='null' argument is used,
# we have to multiply by batch_size here
batch_size = mx.sym.sum(mx.sym.ones_like(label))
cross_entropy = mx.sym.broadcast_mul(cross_entropy, batch_size)
return pred, cross_entropy
def make_numerical_loss(latents: mx.symbol, label_field_name: str) -> Tuple[Any, Any]:
'''
Generate output symbol for univariate numeric loss
:param label_field_name:
:return: mxnet symbols for predictions and loss
'''
# generate prediction symbol
pred = mx.sym.FullyConnected(
data=latents,
num_hidden=1,
name="label_{}".format(label_field_name))
target = mx.sym.Variable(label_field_name)
# squared loss
loss = mx.sym.sum((pred - target) ** 2.0)
return pred, loss
| 35.85 | 92 | 0.662947 |
4a2016c9c685c8fced7bc8d99b5d083a04963b72 | 19,342 | py | Python | ares/simulations/Global21cm.py | eklem1/ares | df39056065f0493e3c922fb50ced2dc6d1bc79a2 | [
"MIT"
] | null | null | null | ares/simulations/Global21cm.py | eklem1/ares | df39056065f0493e3c922fb50ced2dc6d1bc79a2 | [
"MIT"
] | null | null | null | ares/simulations/Global21cm.py | eklem1/ares | df39056065f0493e3c922fb50ced2dc6d1bc79a2 | [
"MIT"
] | null | null | null | """
Global21cm.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Wed Sep 24 14:55:35 MDT 2014
Description:
"""
from __future__ import print_function
import os
import time
import numpy as np
from types import FunctionType
from ..util.Math import interp1d
from ..util.PrintInfo import print_sim
from ..util.ReadData import _sort_history
from ..util.Pickling import write_pickle_file
from ..util import ParameterFile, ProgressBar, get_rev
from ..analysis.Global21cm import Global21cm as AnalyzeGlobal21cm
from ..physics.Constants import nu_0_mhz, E_LyA, h_p, erg_per_ev, k_B, c
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
size = MPI.COMM_WORLD.size
except ImportError:
rank = 0
size = 1
class Global21cm(AnalyzeGlobal21cm):
def __init__(self, **kwargs):
"""
Set up a two-zone model for the global 21-cm signal.
..note :: This is essentially a MultiPhaseMedium calculation, except
the Lyman alpha background and 21-cm background are calculated,
and alternative (phenomenological) parameterizations such as a
tanh for the ionization, thermal, and LW background evolution,
may be used.
"""
self.is_complete = False
# See if this is a tanh model calculation
is_phenom = self.is_phenom = self._check_if_phenom(**kwargs)
if 'problem_type' not in kwargs:
kwargs['problem_type'] = 101
self.kwargs = kwargs
# Print info to screen
if self.pf['verbose']:
print_sim(self)
#def __del__(self):
# print("Killing it! Processor={}".format(rank))
@property
def timer(self):
if not hasattr(self, '_timer'):
self._timer = 0.0
return self._timer
@timer.setter
def timer(self, value):
self._timer = value
@property
def count(self):
if not hasattr(self, '_count'):
try:
self._count = self.medium.field.count
except AttributeError:
self._count = 1
return self._count
@property
def info(self):
print_sim(self)
@property
def rank(self):
try:
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
except ImportError:
rank = 0
return rank
@property
def pf(self):
if not hasattr(self, '_pf'):
self._pf = ParameterFile(**self.kwargs)
return self._pf
@pf.setter
def pf(self, value):
self._pf = value
@property
def medium(self):
if not hasattr(self, '_medium'):
from .MultiPhaseMedium import MultiPhaseMedium
self._medium = MultiPhaseMedium(cosm=self.cosm, **self.kwargs)
#self.pf = self._medium.pf
return self._medium
@property
def field(self):
if not hasattr(self, '_field'):
self._field = self.medium.field
return self._field
@property
def pops(self):
return self.medium.field.solver.pops
@property
def grid(self):
return self.medium.field.grid
def _init_dTb(self):
"""
Compute differential brightness temperature for initial conditions.
"""
z = self.all_z
dTb = []
for i, data_igm in enumerate(self.all_data_igm):
n_H = self.medium.parcel_igm.grid.cosm.nH(z[i])
Ts = \
self.medium.parcel_igm.grid.hydr.Ts(
z[i], data_igm['Tk'], 0.0, data_igm['h_2'],
data_igm['e'] * n_H)
# Compute volume-averaged ionized fraction
if self.pf['include_cgm']:
QHII = self.all_data_cgm[i]['h_2']
else:
QHII = 0.0
xavg = QHII + (1. - QHII) * data_igm['h_2']
# Derive brightness temperature
Tb = self.medium.parcel_igm.grid.hydr.dTb(z[i], xavg, Ts)
self.all_data_igm[i]['dTb'] = Tb
self.all_data_igm[i]['Ts'] = np.array([Ts])
dTb.append(Tb)
return dTb
def _check_if_phenom(self, **kwargs):
if not kwargs:
return False
if ('tanh_model' not in kwargs) and ('gaussian_model' not in kwargs)\
and ('parametric_model' not in kwargs):
return False
self.is_tanh = False
self.is_gauss = False
self.is_param = False
if 'tanh_model' in kwargs:
if kwargs['tanh_model']:
from ..phenom.Tanh21cm import Tanh21cm as PhenomModel
self.is_tanh = True
elif 'gaussian_model' in kwargs:
if kwargs['gaussian_model']:
from ..phenom.Gaussian21cm import Gaussian21cm as PhenomModel
self.is_gauss = True
elif 'parametric_model' in kwargs:
if kwargs['parametric_model']:
from ..phenom.Parametric21cm import Parametric21cm as PhenomModel
self.is_param = True
if (not self.is_tanh) and (not self.is_gauss) and (not self.is_param):
return False
model = self._model = PhenomModel(**kwargs)
self.pf = model.pf
if self.pf['output_frequencies'] is not None:
nu = self.pf['output_frequencies']
z = nu_0_mhz / nu - 1.
elif self.pf['output_redshifts'] is not None:
z = self.pf['output_redshifts']
nu = nu_0_mhz / (1. + z)
elif self.pf['output_dz'] is not None:
z = np.arange(self.pf['final_redshift'] + self.pf['output_dz'],
self.pf['initial_redshift'], self.pf['output_dz'])[-1::-1]
nu = nu_0_mhz / (1. + z)
else:
nu_min = self.pf['output_freq_min']
nu_max = self.pf['output_freq_max']
nu_res = self.pf['output_freq_res']
nu = np.arange(nu_min, nu_max, nu_res)
z = nu_0_mhz / nu - 1.
if self.is_param:
self.history = model(z)
elif self.is_gauss:
self.history = model(nu, **self.pf)
else:
self.history = model(z, **self.pf)
return True
def run(self):
"""
Run a 21-cm simulation.
Returns
-------
Nothing: sets `history` attribute.
"""
# If this was a tanh model or some such thing, we're already done.
if self.is_phenom:
return
if self.is_complete:
print("Already ran simulation!")
return
# Need to generate radiation backgrounds first.
if self.pf['radiative_transfer']:
self.medium.field.run()
self._f_Ja = self.medium.field._f_Ja
self._f_Jlw = self.medium.field._f_Jlw
else:
self._f_Ja = lambda z: 0.0
self._f_Jlw = lambda z: 0.0
# Start timer
t1 = time.time()
tf = self.medium.tf
self.medium._insert_inits()
pb = self.pb = ProgressBar(tf, use=self.pf['progress_bar'],
name='gs-21cm')
# Lists for data in general
self.all_t, self.all_z, self.all_data_igm, self.all_data_cgm, \
self.all_RC_igm, self.all_RC_cgm = \
self.medium.all_t, self.medium.all_z, self.medium.all_data_igm, \
self.medium.all_data_cgm, self.medium.all_RCs_igm, self.medium.all_RCs_cgm
# Add zeros for Ja
for element in self.all_data_igm:
element['Ja'] = 0.0
element['Jlw'] = 0.0
# List for extrema-finding
self.all_dTb = self._init_dTb()
for t, z, data_igm, data_cgm, rc_igm, rc_cgm in self.step():
# Occasionally the progress bar breaks if we're not careful
if z < self.pf['final_redshift']:
break
if z < self.pf['kill_redshift']:
break
# Delaying the initialization prevents progressbar from being
# interrupted by, e.g., PrintInfo calls
if not pb.has_pb:
pb.start()
pb.update(t)
# Save data
self.all_z.append(z)
self.all_t.append(t)
self.all_dTb.append(data_igm['dTb'][0])
self.all_data_igm.append(data_igm.copy())
self.all_RC_igm.append(rc_igm.copy())
if self.pf['include_cgm']:
self.all_data_cgm.append(data_cgm.copy())
self.all_RC_cgm.append(rc_cgm.copy())
# Automatically find turning points
if self.pf['track_extrema']:
if self.track.is_stopping_point(self.all_z, self.all_dTb):
break
pb.finish()
self.history_igm = _sort_history(self.all_data_igm, prefix='igm_',
squeeze=True)
if self.pf['include_cgm']:
self.history_cgm = _sort_history(self.all_data_cgm, prefix='cgm_',
squeeze=True)
else:
self.history_cgm = {}
self.history = self.history_igm.copy()
self.history.update(self.history_cgm)
##
# In the future, could do this better by only calculating Ja at
# the end, since it a passive quantity (unless we included its
# very small heating).
##
#if self.pf['secondary_lya']:
# xe = lambda zz: np.interp(zz, self.history['z'][-1::-1],
# self.history['igm_e'][-1::-1])
# self.medium.field.run(xe=xe)
# self._f_Ja = self.medium.field._f_Ja
# #self._f_Jlw = self.medium.field._f_Jlw
#
# # Fix Ja in history
self.history['dTb'] = self.history['igm_dTb']
#self.history['dTb_bulk'] = self.history['igm_dTb_bulk']
self.history['Ts'] = self.history['igm_Ts']
self.history['Ja'] = self.history['igm_Ja']
self.history['Jlw'] = self.history['igm_Jlw']
# Save rate coefficients [optional]
if self.pf['save_rate_coefficients']:
self.rates_igm = \
_sort_history(self.all_RC_igm, prefix='igm_', squeeze=True)
self.rates_cgm = \
_sort_history(self.all_RC_cgm, prefix='cgm_', squeeze=True)
self.history.update(self.rates_igm)
self.history.update(self.rates_cgm)
self.history['t'] = np.array(self.all_t)
self.history['z'] = np.array(self.all_z)
##
# Optional extra radio background
##
Tr = np.zeros_like(self.history['z'])
for popid, pop in enumerate(self.pops):
if not pop.is_src_radio:
continue
z, E, flux = self.field.get_history(popid, flatten=True)
E21cm = h_p * nu_0_mhz * 1e6 / erg_per_ev
f21 = interp1d(E, flux, axis=1, bounds_error=False,
fill_value=0.0, force_scipy=True)
flux_21cm = f21(E21cm)
Tr += np.interp(self.history['z'], z, flux_21cm) \
* E21cm * erg_per_ev * c**2 / k_B / 2. / (nu_0_mhz * 1e6)**2
if not np.all(Tr == 0):
assert self.medium.parcel_igm.grid.hydr.Tbg is None
elif self.medium.parcel_igm.grid.hydr.Tbg is not None:
Tr = self.medium.parcel_igm.grid.hydr.Tbg(self.history['z'])
self.history['Tr'] = Tr
# Correct the brightness temperature if there are non-CMB backgrounds
if not np.all(Tr == 0):
zall = self.history['z']
n_H = self.medium.parcel_igm.grid.cosm.nH(zall)
Ts = self.medium.parcel_igm.grid.hydr.Ts(zall,
self.history['igm_Tk'], self.history['Ja'],
self.history['igm_h_2'], self.history['igm_e'] * n_H, Tr)
if self.pf['floor_Ts']:
Ts = max(Ts, self.medium.parcel_igm.grid.hydr.Ts_floor(z=zall))
# Compute volume-averaged ionized fraction
xavg = self.history['cgm_h_2'] \
+ (1. - self.history['cgm_h_2']) * self.history['igm_h_2']
# Derive brightness temperature
dTb = self.medium.parcel_igm.grid.hydr.dTb(zall, xavg, Ts, Tr)
self.history['dTb_no_radio'] = self.history['dTb'].copy()
self.history['dTb'] = dTb
#self.history['dTb_bulk'] = \
# self.medium.parcel_igm.grid.hydr.dTb(zall, 0.0, Ts, Tr)
t2 = time.time()
self.timer = t2 - t1
self.is_complete = True
def step(self):
"""
Generator for the 21-cm signal.
.. note:: Basically just calling MultiPhaseMedium here, except we
compute the spin temperature and brightness temperature on
each step.
Returns
-------
Generator for MultiPhaseMedium object, with notable addition that
the spin temperature and 21-cm brightness temperature are now
tracked.
"""
for t, z, data_igm, data_cgm, RC_igm, RC_cgm in self.medium.step():
Ja = np.atleast_1d(self._f_Ja(z))
Jlw = np.atleast_1d(self._f_Jlw(z))
# Compute spin temperature
n_H = self.medium.parcel_igm.grid.cosm.nH(z)
Ts = self.medium.parcel_igm.grid.hydr.Ts(z,
data_igm['Tk'], Ja, data_igm['h_2'], data_igm['e'] * n_H)
if self.pf['floor_Ts'] is not None:
Ts = max(Ts, self.medium.parcel_igm.grid.hydr.Ts_floor(z=z))
# Compute volume-averaged ionized fraction
if self.pf['include_cgm']:
xavg = data_cgm['h_2'] + (1. - data_cgm['h_2']) * data_igm['h_2']
else:
xavg = data_igm['h_2']
# Derive brightness temperature
dTb = self.medium.parcel_igm.grid.hydr.dTb(z, xavg, Ts)
dTb_b = self.medium.parcel_igm.grid.hydr.dTb(z, 0.0, Ts)
# Add derived fields to data
data_igm.update({'Ts': Ts, 'dTb': dTb, #'dTb_bulk': dTb_b,
'Ja': Ja, 'Jlw': Jlw})
# Yield!
yield t, z, data_igm, data_cgm, RC_igm, RC_cgm
def save(self, prefix, suffix='pkl', clobber=False, fields=None):
"""
Save results of calculation. Pickle parameter file dict.
Notes
-----
1) will save files as prefix.history.suffix and prefix.parameters.pkl.
2) ASCII files will fail if simulation had multiple populations.
Parameters
----------
prefix : str
Prefix of save filename
suffix : str
Suffix of save filename. Can be hdf5 (or h5) or pkl.
Anything else will be assumed to be ASCII format (e.g., .txt).
clobber : bool
Overwrite pre-existing files of same name?
"""
fn = '{0!s}.history.{1!s}'.format(prefix, suffix)
if os.path.exists(fn):
if clobber:
os.remove(fn)
else:
raise IOError('{!s} exists! Set clobber=True to overwrite.'.format(fn))
if suffix == 'pkl':
write_pickle_file(self.history._data, fn, ndumps=1, open_mode='w',\
safe_mode=False, verbose=False)
try:
write_pickle_file(self.blobs, '{0!s}.blobs.{1!s}'.format(\
prefix, suffix), ndumps=1, open_mode='w', safe_mode=False,\
verbose=self.pf['verbose'])
except AttributeError:
print('Error writing {0!s}.blobs.{1!s}'.format(prefix, suffix))
elif suffix in ['hdf5', 'h5']:
import h5py
f = h5py.File(fn, 'w')
for key in self.history:
if fields is not None:
if key not in fields:
continue
f.create_dataset(key, data=np.array(self.history[key]))
f.close()
# ASCII format
else:
f = open(fn, 'w')
print("#", end='', file=f)
for key in self.history:
if fields is not None:
if key not in fields:
continue
print('{0:<18s}'.format(key), end='', file=f)
print('', file=f)
# Now, the data
for i in range(len(self.history[key])):
s = ''
for key in self.history:
if fields is not None:
if key not in fields:
continue
s += '{:<20.8e}'.format(self.history[key][i])
if not s.strip():
continue
print(s, file=f)
f.close()
if self.pf['verbose']:
print('Wrote {0!s}.history.{1!s}'.format(prefix, suffix))
# Save histories for Mmin and SFRD if we're doing iterative stuff
if self.count > 1 and hasattr(self, '_Mmin_bank'):
write_pickle_file((self.medium.field._zarr,\
self.medium.field._Mmin_bank), '{!s}.Mmin.pkl'.format(prefix),\
ndumps=2, open_mode='w', safe_mode=False,\
verbose=self.pf['verbose'])
if self.pf['feedback_LW_sfrd_popid'] is not None:
pid = self.pf['feedback_LW_sfrd_popid']
write_pickle_file((self.medium.field.pops[pid].halos.tab_z,\
self.medium.field._sfrd_bank), '{!s}.sfrd.pkl'.format(\
prefix), ndumps=1, open_mode='w', safe_mode=False,\
verbose=self.pf['verbose'])
write_pf = True
if os.path.exists('{!s}.parameters.pkl'.format(prefix)):
if clobber:
os.remove('{!s}.parameters.pkl'.format(prefix))
else:
write_pf = False
print(('WARNING: {!s}.parameters.pkl exists! Set ' +\
'clobber=True to overwrite.').format(prefix))
if write_pf:
#pf = {}
#for key in self.pf:
# if key in self.carryover_kwargs():
# continue
# pf[key] = self.pf[key]
if 'revision' not in self.pf:
self.pf['revision'] = get_rev()
# Save parameter file
write_pickle_file(self.pf, '{!s}.parameters.pkl'.format(prefix),\
ndumps=1, open_mode='w', safe_mode=False,\
verbose=self.pf['verbose'])
| 33.696864 | 93 | 0.520991 |
4a20182f8c98670576e7a98fd354c06e6f05bea1 | 637 | py | Python | Python/6/NextPolydivisibleNumber/test_next_polydivisible_number.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | Python/6/NextPolydivisibleNumber/test_next_polydivisible_number.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | 6 | 2020-02-21T17:01:59.000Z | 2021-05-04T07:04:41.000Z | Python/6/NextPolydivisibleNumber/test_next_polydivisible_number.py | hwakabh/codewars | 7afce5a7424d35abc55c350301ac134f2d3edd3d | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest import main
from next_polydivisible_number import next_num
class TestNextPolydivisibleNumber(TestCase):
def test_next_num(self):
ptr = [
# polydivisible numbers
(0, 1), # 1 / 1 = 0
(10, 12), # 1 / 1 = 0, 12 / 2 = 6
(11, 12),
(1234, 1236), # 1 / 1 = 0, 12 / 2 = 6, 123 / 3 = 41, 1236 / 4 = 309
(123220, 123252),
]
for inp, exp in ptr:
with self.subTest(inp=inp, exp=exp):
self.assertEqual(next_num(n=inp), exp)
if __name__ == "__main__":
main(verbosity=2)
| 26.541667 | 79 | 0.530612 |
4a201901778c275371f77f5c4e2b47b960f196bf | 19,950 | py | Python | seev/apps/core/views.py | Kairn/se-eon-venture | 98cb77067bf3d9e3489923416f59eb9c044d9e3b | [
"MIT"
] | 1 | 2021-05-01T17:16:43.000Z | 2021-05-01T17:16:43.000Z | seev/apps/core/views.py | Kairn/se-eon-venture | 98cb77067bf3d9e3489923416f59eb9c044d9e3b | [
"MIT"
] | 4 | 2020-01-02T01:23:43.000Z | 2021-06-04T21:52:43.000Z | seev/apps/core/views.py | Kairn/se-eon-venture | 98cb77067bf3d9e3489923416f59eb9c044d9e3b | [
"MIT"
] | null | null | null | """
View logic used in core app
"""
import traceback
from django.conf import settings
from django.http import HttpRequest
from django.db import transaction
from django.shortcuts import render, redirect, reverse
from django.core.paginator import Paginator
from django.core.exceptions import ObjectDoesNotExist
from seev.apps.utils.generators import (getRandomSalt, getSha384Hash,
getSha224Hash, getAdminCredentials, getCpAdminId,
getClientStates)
from seev.apps.utils.validations import isValidRegisterRequest
from seev.apps.utils.messages import get_app_message, addSnackDataToContext, getNewOppoMessage
from seev.apps.utils.session import store_context_in_session, get_context_in_session
from seev.apps.utils.process import logError
from .models import UnoClient, UnoCredentials, UnoApproval, UnoCustomer, UnoOpportunity
from .forms import (LoginForm, PasswordResetForm, RegisterForm,
ApprovalForm, CustomerForm, OpportunityForm)
def go_landing(request):
# Test cookie (disabled)
# request.session.set_test_cookie()
context = {}
return render(request, 'core/index.html', context=context)
def go_login(request, context=None):
try:
if request and request.session:
pass
# Test cookie (disabled)
# if request.session.test_cookie_worked():
# print('Django session is working')
# request.session.delete_test_cookie()
except AttributeError:
pass
# Retrieve session context if passed
context = get_context_in_session(request)
if context is None:
context = {}
if request.method == 'GET':
loginForm = LoginForm()
psrForm = PasswordResetForm()
context['loginForm'] = loginForm
context['psrForm'] = psrForm
return render(request, 'core/login.html', context=context)
def auth_login(request):
context = {}
if request.method == 'POST':
try:
username = request.POST['username']
password = request.POST['password']
unHash = getSha224Hash(username)
psHash = getSha224Hash(password)
if unHash == getAdminCredentials()[0] and psHash == getAdminCredentials()[1]:
request.session['id'] = getCpAdminId()
return redirect('go_admin')
# Get client credentials data
credObj = UnoCredentials.objects.get(username=username)
if credObj and credObj.password_hash == getSha384Hash(password + credObj.password_salt):
client = UnoClient.objects.get(client_id=credObj.client_id)
if client.active:
request.session['id'] = str(
credObj.client_id).replace('-', '')
return redirect('go_client')
else:
store_context_in_session(
request, addSnackDataToContext(context, 'Access denied'))
return redirect('go_login')
else:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid credentials'))
return redirect('go_login')
except ObjectDoesNotExist:
store_context_in_session(
request, addSnackDataToContext(context, 'User not found'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
request.session.clear()
store_context_in_session(
request, addSnackDataToContext(context, 'ERR01'))
return redirect('go_login')
else:
return redirect('go_login')
def auth_password_reset(request):
"""
Deprecated
"""
if request.method == 'POST':
return redirect('go_landing')
else:
return redirect('go_login')
def go_register(request):
context = {}
if request.method == 'GET':
registerForm = RegisterForm()
context['registerForm'] = registerForm
return render(request, 'core/register.html', context=context)
@transaction.atomic
def do_register(request):
if request.method == 'POST':
registerForm = RegisterForm(request.POST, request.FILES)
# Basic validation
if registerForm.is_multipart() and registerForm.is_valid():
# Specific validation
if isValidRegisterRequest(request.POST):
entity_name = request.POST['entity_name']
country = request.POST['country']
trade_ticker = request.POST['trade_ticker']
contact_email = request.POST['contact_email']
contact_phone = request.POST['contact_phone']
summary = request.POST['summary']
website = request.POST['website']
username = request.POST['username']
password = request.POST['password']
recovery_email = request.POST['recovery_email']
pin = request.POST['pin']
# Obtain binary data (deprecated but doable)
sl_bin = b''
try:
signature_letter = request.FILES['signature_letter']
for chunk in signature_letter.chunks():
sl_bin += chunk
except KeyError:
sl_bin = b''
pass
password_salt = getRandomSalt(8)
if len(trade_ticker) == 0:
trade_ticker = None
if len(summary) == 0:
summary = None
if len(website) == 0:
website = None
try:
# Create client object
newClient = UnoClient(
ctg_name=None,
entity_name=entity_name,
country=country,
trade_ticker=trade_ticker,
contact_email=contact_email,
contact_phone=contact_phone,
signature_letter=sl_bin,
summary=summary,
website=website
)
# Create credentials object
newCredentials = UnoCredentials(
client=newClient,
username=username,
password_salt=password_salt,
password_hash=getSha384Hash(password + password_salt),
recovery_email=recovery_email,
pin=pin
)
newClient.save()
newCredentials.save()
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('register_error'), 'message': get_app_message('register_error_message')})
return go_success(HttpRequest(), {'message': get_app_message('register_success')})
else:
return go_error(HttpRequest(), {'error': get_app_message('register_error'), 'message': get_app_message('register_error_message')})
else:
return go_error(HttpRequest(), {'error': get_app_message('register_error'), 'message': get_app_message('register_error_message')})
else:
return redirect('go_register')
def go_success(request, context=None):
context = context
if not context and not settings.DEBUG:
return redirect('go_landing')
if context is None:
context = {}
if 'return_link' in context:
pass
else:
context['return_link'] = reverse('go_landing')
return render(request, 'core/success.html', context=context)
def go_error(request, context=None):
context = context
if not context and not settings.DEBUG:
return redirect('go_landing')
return render(request, 'core/error.html', context=context)
def go_admin(request, context=None):
try:
if request is None:
return redirect('go_login')
elif request.session['id'] != getCpAdminId():
request.session.clear()
return redirect('go_login')
except KeyError:
return redirect('go_login')
context = get_context_in_session(request)
if context is None:
context = {}
ITEMS_PER_PAGE = 3
requestPage = None
if request.GET.get('request_page'):
requestPage = request.GET.get('request_page')
else:
requestPage = 1
# Fetch client data
clientList = UnoClient.objects.all().order_by('-creation_time', 'client_id')
pagedList = Paginator(clientList, ITEMS_PER_PAGE)
clients = pagedList.get_page(requestPage)
# Store the current page in temp session variable
request.session['admin_page'] = requestPage
# Deprecated but usable
for client in clients:
tempBytes = client.signature_letter
if tempBytes:
client.signature_letter = tempBytes.decode('U8')
context['clients'] = clients
context['approvalForm'] = ApprovalForm()
return render(request, 'core/admin.html', context=context)
def go_logout(request):
if request and hasattr(request, 'session') and request.session:
request.session.clear()
return redirect('go_landing')
@transaction.atomic
def do_approve(request):
if request.method == 'POST':
try:
if request.session['id'] != getCpAdminId():
request.session.clear()
return redirect('go_login')
# Retrieve form data
client_id = request.POST['client_id']
ctg_name = request.POST['ctg_name']
action = request.POST['action']
comment = request.POST['message']
# Get client data
client = UnoClient.objects.get(client_id=client_id)
# Validate action
valid = False
tempStatus = ''
if client.status == getClientStates('PE'):
if action == 'AP' and ctg_name:
valid = True
tempStatus = getClientStates('AP')
elif action == 'DE':
valid = True
tempStatus = getClientStates('DE')
elif client.status == getClientStates('AP'):
if action == 'RV':
valid = True
tempStatus = getClientStates('RV')
elif client.status == getClientStates('RV'):
if action == 'RI':
valid = True
tempStatus = getClientStates('AP')
else:
valid = False
if not valid:
raise RuntimeError
# Create approval data
newApproval = UnoApproval(
client=client,
action=action,
message=comment
)
newApproval.save()
# Update client data
if (tempStatus == getClientStates('AP')):
client.active = 1
client.ctg_name = ctg_name
else:
client.active = 0
client.status = tempStatus
client.save()
# Retrieve the current page
redirectPage = 1
if 'admin_page' in request.session:
redirectPage = request.session['admin_page']
# Success message
store_context_in_session(request, addSnackDataToContext(
{}, 'Your action has been applied'))
return redirect(reverse('go_admin') + '?request_page=' + str(redirectPage))
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('approval_error'), 'message': get_app_message('approval_error_message')})
else:
return redirect('go_admin')
def go_client(request, context=None):
if request and hasattr(request, 'session') and request.session and 'id' in request.session:
if len(request.session['id']) != 32:
request.session.clear()
return redirect('go_login')
else:
context = get_context_in_session(request)
if context is None:
context = {}
client = UnoClient.objects.get(client_id=request.session['id'])
context['client'] = client
# Customer form
context['customerForm'] = CustomerForm()
# Opportunity form
oppoForm = OpportunityForm(initial={'client_id': client.client_id})
customerList = UnoCustomer.objects.filter(client=client)
custChoice = []
for cust in customerList:
choice = (cust.customer_id, cust.customer_name)
custChoice.append(choice)
if len(custChoice) > 0:
oppoForm.fields['customer'].choices = custChoice
context['oppoForm'] = oppoForm
else:
context['oppoForm'] = None
return render(request, 'core/client.html', context=context)
else:
return redirect('go_login')
@transaction.atomic
def do_enroll(request):
if request and request.method == 'POST':
try:
context = {}
# Verify client
client = None
if request.session:
client = UnoClient.objects.get(client_id=request.session['id'])
if not client:
raise RuntimeError
# Retrieve form values
customer_name = request.POST['customer_name']
contact_email = request.POST['contact_email']
country = request.POST['country']
if customer_name and contact_email and country:
newCustomer = UnoCustomer(
client=client,
customer_name=customer_name,
contact_email=contact_email,
country=country
)
newCustomer.save()
return go_success(HttpRequest(), {'message': get_app_message('enroll_success'), 'return_link': reverse('go_client')})
else:
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid form data'))
return redirect('go_login')
except RuntimeError:
if hasattr(request, 'session') and request.session:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid client session'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('enroll_error'), 'message': get_app_message('enroll_error_message')})
else:
return redirect('go_client')
@transaction.atomic
def do_oppo(request, context=None):
if request and request.method == 'POST':
try:
if not context:
context = {}
client = None
if request.session:
client = UnoClient.objects.get(client_id=request.session['id'])
if not client:
raise RuntimeError
# Get opportunity details
customer_id = request.POST['customer']
discount_nrc = request.POST['discount_nrc']
discount_mrc = request.POST['discount_mrc']
deal_limit = int(request.POST['deal_limit'])
if deal_limit < 1 or deal_limit > 32:
raise AssertionError
customer_id = str(customer_id).replace('-', '')
customer = UnoCustomer.objects.get(customer_id=customer_id)
newOpportunity = UnoOpportunity(
client=client,
customer=customer,
discount_nrc=discount_nrc,
discount_mrc=discount_mrc,
deal_limit=deal_limit
)
newOpportunity.save()
return go_success(HttpRequest(), {'message': getNewOppoMessage(newOpportunity.opportunity_number), 'return_link': reverse('go_client')})
except AssertionError:
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid data encountered'))
return redirect('go_client')
except RuntimeError:
if hasattr(request, 'session') and request.session:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid client session'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('oppo_error'), 'message': get_app_message('oppo_error_message')})
else:
return redirect('go_client')
def go_records(request):
try:
client = UnoClient.objects.get(client_id=request.session['id'])
context = get_context_in_session(request)
if not context:
context = {}
context['entity_name'] = client.entity_name
records = []
oppoList = UnoOpportunity.objects.filter(client=client)
if len(oppoList) < 1:
store_context_in_session(request, addSnackDataToContext(
context, 'No opportunity found'))
return redirect('go_client')
for oppo in oppoList:
customer = UnoCustomer.objects.get(customer_id=oppo.customer_id)
records.append(
(str(oppo.opportunity_number).replace('-', ''), oppo.creation_time, customer.customer_name, oppo.active))
context['records'] = records
return render(request, 'core/records.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
if request and hasattr(request, 'session'):
request.session.clear()
store_context_in_session(
request, addSnackDataToContext(context, 'Unexpected Error'))
return redirect('go_login')
@transaction.atomic
def can_oppo(request, context=None):
if request and request.method == 'POST':
try:
if not context:
context = {}
client = None
if request.session:
client = UnoClient.objects.get(client_id=request.session['id'])
if not client:
raise RuntimeError
opportunity = UnoOpportunity.objects.get(
opportunity_number=request.POST['oppoNb'])
if not opportunity.active:
raise Exception
opportunity.active = False
opportunity.save()
store_context_in_session(request, addSnackDataToContext(
context, 'Opportunity annulled'))
return redirect('go_records')
except RuntimeError:
if hasattr(request, 'session') and request.session:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid client session'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('oppo_can_error'), 'message': get_app_message('oppo_can_message')})
else:
return redirect('go_records')
def go_bad_view(request, context=None):
return render(request, 'core/bad-view.html', context=context)
| 34.337349 | 150 | 0.578396 |
4a201966046a95272cab5bf67d959c6e05ba23d2 | 1,156 | py | Python | setup.py | SpotlightKid/jackclient-python | cfc1e6a11f50f98abcd351b6e372e98da8e3a06d | [
"MIT"
] | 120 | 2015-04-03T03:55:04.000Z | 2022-03-06T07:21:38.000Z | setup.py | SpotlightKid/jackclient-python | cfc1e6a11f50f98abcd351b6e372e98da8e3a06d | [
"MIT"
] | 84 | 2015-01-04T12:42:43.000Z | 2022-03-15T18:13:13.000Z | setup.py | SpotlightKid/jackclient-python | cfc1e6a11f50f98abcd351b6e372e98da8e3a06d | [
"MIT"
] | 31 | 2015-04-11T13:03:35.000Z | 2022-03-06T07:21:38.000Z | from setuptools import setup
__version__ = 'unknown'
# "import" __version__
for line in open('src/jack.py'):
if line.startswith('__version__'):
exec(line)
break
setup(
name='JACK-Client',
version=__version__,
package_dir={'': 'src'},
py_modules=['jack'],
setup_requires=['CFFI>=1.0'],
install_requires=['CFFI>=1.0'],
python_requires='>=3',
extras_require={'NumPy': ['NumPy']},
cffi_modules=['jack_build.py:ffibuilder'],
author='Matthias Geier',
author_email='[email protected]',
description='JACK Audio Connection Kit (JACK) Client for Python',
long_description=open('README.rst').read(),
license='MIT',
keywords='JACK audio low-latency multi-channel'.split(),
url='http://jackclient-python.readthedocs.io/',
platforms='any',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Multimedia :: Sound/Audio',
],
zip_safe=True,
)
| 29.641026 | 69 | 0.628893 |
4a201a5cd9f0be0766920954cc313a02e303641a | 26,600 | py | Python | test/unit/authnz/test_custos_authnz.py | toshea-bu/galaxy | 760fcbd3d4ca8b036d8531751d972684eee130e2 | [
"CC-BY-3.0"
] | null | null | null | test/unit/authnz/test_custos_authnz.py | toshea-bu/galaxy | 760fcbd3d4ca8b036d8531751d972684eee130e2 | [
"CC-BY-3.0"
] | null | null | null | test/unit/authnz/test_custos_authnz.py | toshea-bu/galaxy | 760fcbd3d4ca8b036d8531751d972684eee130e2 | [
"CC-BY-3.0"
] | null | null | null | import hashlib
import json
import os
import unittest
import uuid
from datetime import datetime, timedelta
from urllib.parse import (
parse_qs,
quote,
urlparse,
)
import jwt
import requests
from galaxy.authnz import custos_authnz
from galaxy.model import CustosAuthnzToken, User
from galaxy.util import unicodify
from ..unittest_utils.galaxy_mock import MockTrans
class CustosAuthnzTestCase(unittest.TestCase):
_create_oauth2_session_called = False
_fetch_token_called = False
_get_userinfo_called = False
_raw_token = None
def _get_idp_url(self):
# it would be ideal is we can use a URI as the following:
# https://test_base_uri/auth
return 'https://iam.scigap.org/auth'
def _get_credential_url(self):
return '/'.join([self._get_idp_url(), 'credentials'])
def _get_well_known_url(self):
return '/'.join([self._get_idp_url(), '.well-known/openid-configuration'])
def setUp(self):
self.orig_requests_get = requests.get
requests.get = self.mockRequest({
self._get_well_known_url(): {
"authorization_endpoint": "https://test-auth-endpoint",
"token_endpoint": "https://test-token-endpoint",
"userinfo_endpoint": "https://test-userinfo-endpoint",
"end_session_endpoint": "https://test-end-session-endpoint"
},
self._get_credential_url(): {
"iam_client_secret": "TESTSECRET"
}
})
self.custos_authnz = custos_authnz.CustosAuthnz('Custos', {
'VERIFY_SSL': True
}, {
'url': self._get_idp_url(),
'client_id': 'test-client-id',
'client_secret': 'test-client-secret',
'redirect_uri': 'https://test-redirect-uri',
'realm': 'test-realm'
})
self.setupMocks()
self.test_state = "abc123"
self.test_nonce = b"4662892146306485421546981092"
self.test_nonce_hash = hashlib.sha256(self.test_nonce).hexdigest()
self.test_code = "test-code"
self.test_username = "test-username"
self.test_email = "test-email"
self.test_sub = "test-sub"
self.test_alt_username = "test-alt-username"
self.test_alt_email = "test-alt-email"
self.test_access_token = "test_access_token"
self.test_refresh_token = "test_refresh_token"
self.test_expires_in = 30
self.test_refresh_expires_in = 1800
self.test_user_id = str(uuid.uuid4())
self.test_alt_user_id = str(uuid.uuid4())
self.trans.request.url = f"https://localhost:8000/authnz/custos/oidc/callback?state={self.test_state}&code={self.test_code}"
def setupMocks(self):
self.mock_fetch_token(self.custos_authnz)
self.mock_get_userinfo(self.custos_authnz)
self.trans = self.mockTrans()
@property
def test_id_token(self):
return unicodify(jwt.encode({'nonce': self.test_nonce_hash}, key=None, algorithm=None))
def mock_create_oauth2_session(self, custos_authnz):
orig_create_oauth2_session = custos_authnz._create_oauth2_session
def create_oauth2_session(state=None):
self._create_oauth2_session_called = True
assert state == self.test_state
return orig_create_oauth2_session(state)
custos_authnz._create_oauth2_session = create_oauth2_session
def mock_fetch_token(self, custos_authnz):
def fetch_token(oauth2_session, trans):
self._fetch_token_called = True
self._raw_token = {
"access_token": self.test_access_token,
"id_token": self.test_id_token,
"refresh_token": self.test_refresh_token,
"expires_in": self.test_expires_in,
"refresh_expires_in": self.test_refresh_expires_in
}
return self._raw_token
custos_authnz._fetch_token = fetch_token
def mock_get_userinfo(self, custos_authnz):
def get_userinfo(oauth2_session):
self._get_userinfo_called = True
return {
"preferred_username": self.test_username,
"email": self.test_email,
"sub": self.test_user_id,
"alt_username": self.test_alt_username,
"alt_email": self.test_alt_email,
"alt_id": self.test_alt_user_id
}
custos_authnz._get_userinfo = get_userinfo
def mockRequest(self, request_dict):
def get(x, **kwargs):
assert(x in request_dict)
return Response(request_dict[x])
class Response:
def __init__(self, resp):
self.response = resp
def json(self):
return self.response
return get
def mockTrans(self):
class Request:
url = None
class QueryResult:
results = []
def __init__(self, results=None):
if results:
self.results = results
def first(self):
if len(self.results) > 0:
return self.results[0]
else:
return None
def one_or_none(self):
if len(self.results) == 1:
return self.results[0]
elif len(self.results) == 0:
return None
else:
raise Exception("More than one result!")
class Query:
external_user_id = None
provider = None
custos_authnz_token = None
def filter_by(self, email=None, external_user_id=None, provider=None, username=None):
self.external_user_id = external_user_id
self.provider = provider
if username:
# This is only called with a specific username to check if it
# already exists in the database. Say no, for testing.
return QueryResult()
if self.custos_authnz_token:
return QueryResult([self.custos_authnz_token])
else:
return QueryResult()
class Session:
items = []
flush_called = False
_query = Query()
deleted = []
def add(self, item):
self.items.append(item)
def delete(self, item):
self.deleted.append(item)
def flush(self):
self.flush_called = True
def query(self, cls):
return self._query
class Trans(MockTrans):
def __init__(self, app=None, user=None, history=None, **kwargs):
super().__init__(app, user, history, **kwargs)
self.cookies = {}
self.cookies_args = {}
self.request = Request()
self.session = self.sa_session
self.sa_session = Session()
self.user = None
def set_cookie(self, value, name=None, **kwargs):
self.cookies[name] = value
self.cookies_args[name] = kwargs
def get_cookie(self, name):
return self.cookies[name]
return Trans()
def tearDown(self):
requests.get = self.orig_requests_get
def test_parse_config(self):
self.assertTrue(self.custos_authnz.config['verify_ssl'])
self.assertEqual(self.custos_authnz.config['client_id'], 'test-client-id')
self.assertEqual(self.custos_authnz.config['client_secret'], 'test-client-secret')
self.assertEqual(self.custos_authnz.config['redirect_uri'], 'https://test-redirect-uri')
self.assertEqual(self.custos_authnz.config['authorization_endpoint'], 'https://test-auth-endpoint')
self.assertEqual(self.custos_authnz.config['token_endpoint'], 'https://test-token-endpoint')
self.assertEqual(self.custos_authnz.config['userinfo_endpoint'], 'https://test-userinfo-endpoint')
def test_authenticate_set_state_cookie(self):
"""Verify that authenticate() sets a state cookie."""
authorization_url = self.custos_authnz.authenticate(self.trans)
parsed = urlparse(authorization_url)
state = parse_qs(parsed.query)['state'][0]
self.assertEqual(state, self.trans.cookies[custos_authnz.STATE_COOKIE_NAME])
def test_authenticate_set_nonce_cookie(self):
"""Verify that authenticate() sets a nonce cookie."""
authorization_url = self.custos_authnz.authenticate(self.trans)
parsed = urlparse(authorization_url)
hashed_nonce_in_url = parse_qs(parsed.query)['nonce'][0]
nonce_in_cookie = self.trans.cookies[custos_authnz.NONCE_COOKIE_NAME]
hashed_nonce = self.custos_authnz._hash_nonce(nonce_in_cookie)
self.assertEqual(hashed_nonce, hashed_nonce_in_url)
def test_authenticate_adds_extra_params(self):
"""Verify that authenticate() adds configured extra params."""
authorization_url = self.custos_authnz.authenticate(self.trans)
parsed = urlparse(authorization_url)
param1_value = parse_qs(parsed.query)['kc_idp_hint'][0]
self.assertEqual(param1_value, 'oidc')
def test_authenticate_sets_env_var_when_localhost_redirect(self):
"""Verify that OAUTHLIB_INSECURE_TRANSPORT var is set with localhost redirect."""
self.custos_authnz = custos_authnz.CustosAuthnz('Custos', {
'VERIFY_SSL': True
}, {
'url': self._get_idp_url(),
'client_id': 'test-client-id',
'client_secret': 'test-client-secret',
'redirect_uri': 'http://localhost/auth/callback',
'realm': 'test-realm'
})
self.setupMocks()
self.assertIsNone(os.environ.get('OAUTHLIB_INSECURE_TRANSPORT', None))
self.custos_authnz.authenticate(self.trans)
self.assertEqual("1", os.environ['OAUTHLIB_INSECURE_TRANSPORT'])
def test_authenticate_does_not_set_env_var_when_https_redirect(self):
self.assertTrue(self.custos_authnz.config['redirect_uri'].startswith("https:"))
self.assertIsNone(os.environ.get('OAUTHLIB_INSECURE_TRANSPORT', None))
self.custos_authnz.authenticate(self.trans)
self.assertIsNone(os.environ.get('OAUTHLIB_INSECURE_TRANSPORT', None))
def test_callback_verify_with_state_cookie(self):
"""Verify that state from cookie is passed to OAuth2Session constructor."""
self.trans.set_cookie(value=self.test_state, name=custos_authnz.STATE_COOKIE_NAME)
self.trans.set_cookie(value=self.test_nonce, name=custos_authnz.NONCE_COOKIE_NAME)
old_access_token = "old-access-token"
old_id_token = "old-id-token"
old_refresh_token = "old-refresh-token"
old_expiration_time = datetime.now() - timedelta(days=1)
old_refresh_expiration_time = datetime.now() - timedelta(hours=3)
existing_custos_authnz_token = CustosAuthnzToken(
user=User(email=self.test_email, username=self.test_username),
external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'],
access_token=old_access_token,
id_token=old_id_token,
refresh_token=old_refresh_token,
expiration_time=old_expiration_time,
refresh_expiration_time=old_refresh_expiration_time,
)
self.trans.sa_session._query.custos_authnz_token = existing_custos_authnz_token
self.assertIsNotNone(
self.trans.sa_session.query(CustosAuthnzToken)
.filter_by(external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'])
.one_or_none()
)
self.trans.sa_session._query.user = User(email=self.test_email, username=self.test_username)
# Mock _create_oauth2_session to make sure it is created with cookie state token
self.mock_create_oauth2_session(self.custos_authnz)
# Intentionally passing a bad state_token to make sure that code under
# test uses the state cookie instead when creating the OAuth2Session
login_redirect_url, user = self.custos_authnz.callback(
state_token="xxx",
authz_code=self.test_code, trans=self.trans,
login_redirect_url="http://localhost:8000/")
self.assertTrue(self._create_oauth2_session_called)
self.assertTrue(self._fetch_token_called)
self.assertTrue(self._get_userinfo_called)
self.assertEqual(login_redirect_url, "http://localhost:8000/")
self.assertIsNotNone(user)
def test_callback_nonce_validation_with_bad_nonce(self):
self.trans.set_cookie(value=self.test_state, name=custos_authnz.STATE_COOKIE_NAME)
self.trans.set_cookie(value=self.test_nonce, name=custos_authnz.NONCE_COOKIE_NAME)
self.trans.sa_session._query.user = User(email=self.test_email, username=self.test_username)
# Intentionally create a bad nonce
self.test_nonce_hash = self.test_nonce_hash + "Z"
# self.custos_authnz._fetch_token = fetch_token
with self.assertRaises(Exception):
self.custos_authnz.callback(state_token="xxx",
authz_code=self.test_code, trans=self.trans,
login_redirect_url="http://localhost:8000/")
self.assertTrue(self._fetch_token_called)
self.assertFalse(self._get_userinfo_called)
def test_callback_user_not_created_when_does_not_exists(self):
self.trans.set_cookie(value=self.test_state, name=custos_authnz.STATE_COOKIE_NAME)
self.trans.set_cookie(value=self.test_nonce, name=custos_authnz.NONCE_COOKIE_NAME)
self.assertIsNone(
self.trans.sa_session.query(CustosAuthnzToken)
.filter_by(external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'])
.one_or_none()
)
self.assertEqual(0, len(self.trans.sa_session.items))
login_redirect_url, user = self.custos_authnz.callback(
state_token="xxx",
authz_code=self.test_code, trans=self.trans,
login_redirect_url="http://localhost:8000/")
self.assertIsNone(user)
self.assertTrue("http://localhost:8000/root/login?confirm=true&custos_token=" in login_redirect_url)
self.assertTrue(self._fetch_token_called)
def test_create_user(self):
self.assertIsNone(
self.trans.sa_session.query(CustosAuthnzToken)
.filter_by(external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'])
.one_or_none()
)
self.assertEqual(0, len(self.trans.sa_session.items))
test_id_token = unicodify(jwt.encode({
'nonce': self.test_nonce_hash,
'email': self.test_email,
'preferred_username': self.test_username,
'sub': self.test_sub
}, key=None, algorithm=None))
self._raw_token = {
"access_token": self.test_access_token,
"id_token": test_id_token,
"refresh_token": self.test_refresh_token,
"expires_in": self.test_expires_in,
"refresh_expires_in": self.test_refresh_expires_in
}
login_redirect_url, user = self.custos_authnz.create_user(
token=json.dumps(self._raw_token),
trans=self.trans,
login_redirect_url="http://localhost:8000/")
self.assertEqual(login_redirect_url, "http://localhost:8000/")
self.trans.set_user(user)
self.assertEqual(2, len(self.trans.sa_session.items), "Session has new User & new CustosAuthnzToken")
added_user = self.trans.get_user()
self.assertIsInstance(added_user, User)
self.assertEqual(self.test_username, added_user.username)
self.assertEqual(self.test_email, added_user.email)
self.assertIsNotNone(added_user.password)
# Verify added_custos_authnz_token
added_custos_authnz_token = self.trans.sa_session.items[1]
self.assertIsInstance(added_custos_authnz_token, CustosAuthnzToken)
self.assertIs(user, added_custos_authnz_token.user)
self.assertEqual(self.test_access_token, added_custos_authnz_token.access_token)
self.assertEqual(test_id_token, added_custos_authnz_token.id_token)
self.assertEqual(self.test_refresh_token, added_custos_authnz_token.refresh_token)
expected_expiration_time = datetime.now() + timedelta(seconds=self.test_expires_in)
expiration_timedelta = expected_expiration_time - added_custos_authnz_token.expiration_time
self.assertTrue(expiration_timedelta.total_seconds() < 1)
expected_refresh_expiration_time = datetime.now() + timedelta(seconds=self.test_refresh_expires_in)
refresh_expiration_timedelta = expected_refresh_expiration_time - added_custos_authnz_token.refresh_expiration_time
self.assertTrue(refresh_expiration_timedelta.total_seconds() < 1)
self.assertEqual(self.custos_authnz.config['provider'], added_custos_authnz_token.provider)
self.assertTrue(self.trans.sa_session.flush_called)
def test_callback_galaxy_user_not_created_when_user_logged_in_and_no_custos_authnz_token_exists(self):
"""
Galaxy user is already logged in and trying to associate external
identity with their Galaxy user account. No new user should be created.
"""
self.trans.set_cookie(value=self.test_state, name=custos_authnz.STATE_COOKIE_NAME)
self.trans.set_cookie(value=self.test_nonce, name=custos_authnz.NONCE_COOKIE_NAME)
self.trans.user = User()
self.assertIsNone(
self.trans.sa_session.query(CustosAuthnzToken)
.filter_by(external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'])
.one_or_none()
)
self.assertEqual(0, len(self.trans.sa_session.items))
login_redirect_url, user = self.custos_authnz.callback(
state_token="xxx",
authz_code=self.test_code, trans=self.trans,
login_redirect_url="http://localhost:8000/")
self.assertTrue(self._fetch_token_called)
self.assertTrue(self._get_userinfo_called)
self.assertEqual(1, len(self.trans.sa_session.items), "Session has new CustosAuthnzToken")
# Verify added_custos_authnz_token
added_custos_authnz_token = self.trans.sa_session.items[0]
self.assertIsInstance(added_custos_authnz_token, CustosAuthnzToken)
self.assertIs(user, added_custos_authnz_token.user)
self.assertIs(user, self.trans.user)
self.assertTrue(self.trans.sa_session.flush_called)
def test_callback_galaxy_user_not_created_when_custos_authnz_token_exists(self):
self.trans.set_cookie(value=self.test_state, name=custos_authnz.STATE_COOKIE_NAME)
self.trans.set_cookie(value=self.test_nonce, name=custos_authnz.NONCE_COOKIE_NAME)
old_access_token = "old-access-token"
old_id_token = "old-id-token"
old_refresh_token = "old-refresh-token"
old_expiration_time = datetime.now() - timedelta(days=1)
old_refresh_expiration_time = datetime.now() - timedelta(hours=3)
existing_custos_authnz_token = CustosAuthnzToken(
user=User(email=self.test_email, username=self.test_username),
external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'],
access_token=old_access_token,
id_token=old_id_token,
refresh_token=old_refresh_token,
expiration_time=old_expiration_time,
refresh_expiration_time=old_refresh_expiration_time,
)
self.trans.sa_session._query.custos_authnz_token = existing_custos_authnz_token
self.assertIsNotNone(
self.trans.sa_session.query(CustosAuthnzToken)
.filter_by(external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'])
.one_or_none()
),
self.assertEqual(0, len(self.trans.sa_session.items))
login_redirect_url, user = self.custos_authnz.callback(
state_token="xxx",
authz_code=self.test_code, trans=self.trans,
login_redirect_url="http://localhost:8000/")
self.assertTrue(self._fetch_token_called)
self.assertTrue(self._get_userinfo_called)
# Make sure query was called with correct parameters
self.assertEqual(self.test_user_id, self.trans.sa_session._query.external_user_id)
self.assertEqual(self.custos_authnz.config['provider'], self.trans.sa_session._query.provider)
self.assertEqual(1, len(self.trans.sa_session.items), "Session has updated CustosAuthnzToken")
session_custos_authnz_token = self.trans.sa_session.items[0]
self.assertIsInstance(session_custos_authnz_token, CustosAuthnzToken)
self.assertIs(existing_custos_authnz_token, session_custos_authnz_token, "existing CustosAuthnzToken should be updated")
# Verify both that existing CustosAuthnzToken has the correct values and different values than before
self.assertEqual(self.test_access_token, session_custos_authnz_token.access_token)
self.assertNotEqual(old_access_token, session_custos_authnz_token.access_token)
self.assertEqual(self.test_id_token, session_custos_authnz_token.id_token)
self.assertNotEqual(old_id_token, session_custos_authnz_token.id_token)
self.assertEqual(self.test_refresh_token, session_custos_authnz_token.refresh_token)
self.assertNotEqual(old_refresh_token, session_custos_authnz_token.refresh_token)
expected_expiration_time = datetime.now() + timedelta(seconds=self.test_expires_in)
expiration_timedelta = expected_expiration_time - session_custos_authnz_token.expiration_time
self.assertTrue(expiration_timedelta.total_seconds() < 1)
self.assertNotEqual(old_expiration_time, session_custos_authnz_token.expiration_time)
expected_refresh_expiration_time = datetime.now() + timedelta(seconds=self.test_refresh_expires_in)
refresh_expiration_timedelta = expected_refresh_expiration_time - session_custos_authnz_token.refresh_expiration_time
self.assertTrue(refresh_expiration_timedelta.total_seconds() < 1)
self.assertNotEqual(old_refresh_expiration_time, session_custos_authnz_token.refresh_expiration_time)
self.assertTrue(self.trans.sa_session.flush_called)
def test_disconnect(self):
custos_authnz_token = CustosAuthnzToken(
user=User(email=self.test_email, username=self.test_username),
external_user_id=self.test_user_id,
provider=self.custos_authnz.config['provider'],
access_token=self.test_access_token,
id_token=self.test_id_token,
refresh_token=self.test_refresh_token,
expiration_time=datetime.now() + timedelta(seconds=self.test_refresh_expires_in),
refresh_expiration_time=datetime.now() + timedelta(seconds=self.test_refresh_expires_in),
)
self.trans.user = custos_authnz_token.user
self.trans.user.custos_auth = [custos_authnz_token]
provider = custos_authnz_token.provider
email = custos_authnz_token.user.email
success, message, redirect_uri = self.custos_authnz.disconnect(provider, self.trans, email, "/")
self.assertEqual(1, len(self.trans.sa_session.deleted))
deleted_token = self.trans.sa_session.deleted[0]
self.assertIs(custos_authnz_token, deleted_token)
self.assertTrue(self.trans.sa_session.flush_called)
self.assertTrue(success)
self.assertEqual("", message)
self.assertEqual("/", redirect_uri)
def test_disconnect_when_no_associated_provider(self):
self.trans.user = User()
success, message, redirect_uri = self.custos_authnz.disconnect("Custos", self.trans, "/")
self.assertEqual(0, len(self.trans.sa_session.deleted))
self.assertFalse(self.trans.sa_session.flush_called)
self.assertFalse(success)
self.assertNotEqual("", message)
self.assertIsNone(redirect_uri)
def test_disconnect_when_more_than_one_associated_token_for_provider(self):
self.trans.user = User(email=self.test_email, username=self.test_username)
custos_authnz_token1 = CustosAuthnzToken(
user=self.trans.user,
external_user_id=self.test_user_id + "1",
provider=self.custos_authnz.config['provider'],
access_token=self.test_access_token,
id_token=self.test_id_token,
refresh_token=self.test_refresh_token,
expiration_time=datetime.now() + timedelta(seconds=self.test_refresh_expires_in),
refresh_expiration_time=datetime.now() + timedelta(seconds=self.test_refresh_expires_in),
)
custos_authnz_token2 = CustosAuthnzToken(
user=self.trans.user,
external_user_id=self.test_user_id + "2",
provider=self.custos_authnz.config['provider'],
access_token=self.test_access_token,
id_token=self.test_id_token,
refresh_token=self.test_refresh_token,
expiration_time=datetime.now() + timedelta(seconds=self.test_refresh_expires_in),
refresh_expiration_time=datetime.now() + timedelta(seconds=self.test_refresh_expires_in),
)
self.trans.user.custos_auth = [custos_authnz_token1, custos_authnz_token2]
success, message, redirect_uri = self.custos_authnz.disconnect("Custos", self.trans, "/")
self.assertEqual(0, len(self.trans.sa_session.deleted))
self.assertFalse(self.trans.sa_session.flush_called)
self.assertFalse(success)
self.assertNotEqual("", message)
self.assertIsNone(redirect_uri)
def test_logout_with_redirect(self):
logout_redirect_url = "http://localhost:8080/post-logout"
redirect_url = self.custos_authnz.logout(self.trans, logout_redirect_url)
self.assertEqual(redirect_url, "https://test-end-session-endpoint?redirect_uri=" + quote(logout_redirect_url))
| 46.91358 | 132 | 0.67282 |
4a201b4b9a583d552e913aa9d7fdd620de8270d9 | 844 | py | Python | leadmanager/leads/migrations/0001_initial.py | AvishrantsSh/django_react_test | 33a344fb81e337e30f450acbfc15715453a88251 | [
"MIT"
] | null | null | null | leadmanager/leads/migrations/0001_initial.py | AvishrantsSh/django_react_test | 33a344fb81e337e30f450acbfc15715453a88251 | [
"MIT"
] | null | null | null | leadmanager/leads/migrations/0001_initial.py | AvishrantsSh/django_react_test | 33a344fb81e337e30f450acbfc15715453a88251 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-09-06 08:57
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Lead",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100)),
("email", models.EmailField(max_length=254)),
("message", models.TextField()),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
]
| 26.375 | 72 | 0.451422 |
4a201c008cd4b7180a00e082439f80c26cca2321 | 995 | py | Python | testplan/report/testing/parser.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | testplan/report/testing/parser.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | testplan/report/testing/parser.py | dobragab/testplan | 407ac1dfd33d19753e41235a1f576aeb06118840 | [
"Apache-2.0"
] | null | null | null | import argparse
class ReportTagsAction(argparse.Action):
"""
Argparse action for parsing multiple report tag
arguments, builds up a list of dictionary of sets.
In:
.. code-block:: bash
--report-tags foo bar hello=world --report-tags one two color=red
Out:
.. code-block:: python
[
{
'simple': {'foo', 'bar'},
'hello': {'world'}
},
{
'simple': {'one', 'two'},
'color': {'red'},
}
]
"""
def __call__(self, parser, namespace, values, option_string=None):
from testplan.testing import tagging
items = getattr(namespace, self.dest) or []
tag_arg = [tagging.parse_tag_arguments(v) for v in values]
tag_arg = tagging.merge_tag_dicts(*tag_arg)
items.append(tag_arg)
setattr(namespace, self.dest, items)
| 24.268293 | 77 | 0.502513 |
4a201c540ec5e880c81b17b563c165558b11dc20 | 428 | py | Python | tests/test_datastructures.py | amaksymov/object-validation | e2bafb17755f945f521995ca2e5adfcac8d2d19a | [
"MIT"
] | null | null | null | tests/test_datastructures.py | amaksymov/object-validation | e2bafb17755f945f521995ca2e5adfcac8d2d19a | [
"MIT"
] | null | null | null | tests/test_datastructures.py | amaksymov/object-validation | e2bafb17755f945f521995ca2e5adfcac8d2d19a | [
"MIT"
] | null | null | null | from object_validation.datastructures import ValidationResult
def test_validation_result():
result = ValidationResult('value')
assert result.value == 'value'
assert not result.has_error()
assert result.get_errors() is None
errors = {'error': 'errors'}
result = ValidationResult('value', errors)
assert result.value == 'value'
assert result.has_error()
assert result.get_errors() == errors
| 28.533333 | 61 | 0.71028 |
4a201cb04b9a5882c24483361cc32695189ccdce | 7,935 | py | Python | pytorch_toolkit/nncf/tests/sparsity/rb/test_algo.py | AnastasiaaSenina/openvino_training_extensions | 267425d64372dff5b9083dc0ca6abfc305a71449 | [
"Apache-2.0"
] | 1 | 2021-03-20T06:00:02.000Z | 2021-03-20T06:00:02.000Z | pytorch_toolkit/nncf/tests/sparsity/rb/test_algo.py | akshayjaryal603/openvino_training_extensions | 7d606a22143db0af97087709d63a2ec2aa02036c | [
"Apache-2.0"
] | 28 | 2020-09-25T22:40:36.000Z | 2022-03-12T00:37:36.000Z | pytorch_toolkit/nncf/tests/sparsity/rb/test_algo.py | akshayjaryal603/openvino_training_extensions | 7d606a22143db0af97087709d63a2ec2aa02036c | [
"Apache-2.0"
] | 1 | 2021-04-02T07:51:01.000Z | 2021-04-02T07:51:01.000Z | """
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import pytest
import torch
from pytest import approx
from torch import nn
from nncf.algo_selector import create_compression_algorithm
from nncf.config import Config
from nncf.operations import UpdateWeight
from nncf.sparsity import RBSparsity
from nncf.sparsity.rb.layers import RBSparsifyingWeight
from nncf.sparsity.rb.loss import SparseLoss
from nncf.sparsity.schedulers import PolynomialSparseScheduler
from nncf.utils import get_all_modules_by_type
from tests.test_helpers import BasicConvTestModel, TwoConvTestModel
def get_basic_sparsity_config(model_size=4, input_sample_size=(1, 1, 4, 4),
sparsity_init=0.02, sparsity_target=0.5, sparsity_steps=2, sparsity_training_steps=3):
config = Config()
config.update({
"model": "basic_sparse_conv",
"model_size": model_size,
"input_info":
{
"sample_size": input_sample_size,
},
"compression":
{
"algorithm": "rb_sparsity",
"params":
{
"schedule": "polynomial",
"sparsity_init": sparsity_init,
"sparsity_target": sparsity_target,
"sparsity_steps": sparsity_steps,
"sparsity_training_steps": sparsity_training_steps
},
"layers":
{
"conv": {"sparsify": True},
}
}
})
return config
def test_can_load_sparse_algo__with_defaults():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
compression_algo = create_compression_algorithm(deepcopy(model), config)
assert isinstance(compression_algo, RBSparsity)
sparse_model = compression_algo.model
model_conv = get_all_modules_by_type(model, 'Conv2d')
sparse_model_conv = get_all_modules_by_type(sparse_model, 'NNCFConv2d')
assert len(model_conv) == len(sparse_model_conv)
for module_name in model_conv:
scope = module_name.split('/')
scope[-1] = scope[-1].replace('Conv2d', 'NNCFConv2d')
sparse_module_name = '/'.join(scope)
assert sparse_module_name in sparse_model_conv
store = []
sparse_module = sparse_model_conv[sparse_module_name]
for op in sparse_module.pre_ops.values():
if isinstance(op, UpdateWeight) and isinstance(op.operand, RBSparsifyingWeight):
assert torch.allclose(op.operand.binary_mask, torch.ones_like(sparse_module.weight))
assert op.operand.sparsify
assert op.__class__.__name__ not in store
store.append(op.__class__.__name__)
def test_can_set_sparse_layers_to_loss():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
config['compression']['train_phase'] = ''
compression_algo = create_compression_algorithm(model, config)
loss = compression_algo.loss
assert isinstance(loss, SparseLoss)
for layer in loss.sparse_layers:
assert isinstance(layer, RBSparsifyingWeight)
def test_sparse_algo_does_not_replace_not_conv_layer():
class TwoLayersTestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
def forward(self, x):
return self.bn(self.conv(x))
model = TwoLayersTestModel()
config = get_basic_sparsity_config()
config['compression']['train_phase'] = ''
compression_algo = create_compression_algorithm(model, config)
assert isinstance(compression_algo, RBSparsity)
for m in compression_algo.sparsified_module_info:
assert isinstance(m.operand, RBSparsifyingWeight)
def test_can_create_sparse_loss_and_scheduler():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
compression_algo = create_compression_algorithm(model, config)
loss = compression_algo.loss
assert isinstance(loss, SparseLoss)
assert not loss.disabled
assert loss.target_sparsity_rate == approx(0.02)
assert loss.p == approx(0.05)
scheduler = compression_algo.scheduler
assert isinstance(scheduler, PolynomialSparseScheduler)
assert scheduler.current_sparsity_level == approx(0.02)
assert scheduler.max_sparsity == approx(0.5)
assert scheduler.max_step == 2
assert scheduler.sparsity_training_steps == 3
def test_sparse_algo_can_calc_sparsity_rate__for_basic_model():
model = BasicConvTestModel()
config = get_basic_sparsity_config()
compression_algo = create_compression_algorithm(model, config)
assert compression_algo.sparsified_weights_count == model.weights_num
assert compression_algo.sparsity_rate_for_model == (
1 - (model.nz_weights_num + model.nz_bias_num) / (model.weights_num + model.bias_num)
)
assert compression_algo.sparsity_rate_for_sparsified_modules == 1 - model.nz_weights_num / model.weights_num
assert len(compression_algo.sparsified_module_info) == 1
def test_sparse_algo_can_collect_sparse_layers():
model = TwoConvTestModel()
config = get_basic_sparsity_config()
compression_algo = create_compression_algorithm(model, config)
assert len(compression_algo.sparsified_module_info) == 2
def test_sparse_algo_can_calc_sparsity_rate__for_2_conv_model():
model = TwoConvTestModel()
config = get_basic_sparsity_config()
compression_algo = create_compression_algorithm(model, config)
assert compression_algo.sparsified_weights_count == model.weights_num
assert compression_algo.sparsity_rate_for_model == (
1 - (model.nz_weights_num + model.nz_bias_num) / (model.weights_num + model.bias_num)
)
assert compression_algo.sparsity_rate_for_sparsified_modules == 1 - model.nz_weights_num / model.weights_num
def test_scheduler_can_do_epoch_step__with_rb_algo():
config = Config()
config['compression']['algorithm'] = 'rb_sparsity'
config['compression']["params"] = {
'schedule': 'polynomial',
'power': 1, 'sparsity_steps': 2, 'sparsity_init': 0.2, 'sparsity_target': 0.6,
'sparsity_training_steps': 4
}
compression_algo = create_compression_algorithm(BasicConvTestModel(), config)
scheduler = compression_algo.scheduler
loss = compression_algo.loss
assert pytest.approx(loss.target_sparsity_rate) == 0.2
assert not loss.disabled
for module_info in compression_algo.sparsified_module_info:
assert module_info.operand.sparsify
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.4
assert pytest.approx(loss().item(), abs=1e-3) == 64
assert not loss.disabled
scheduler.epoch_step()
assert pytest.approx(loss.target_sparsity_rate, abs=1e-3) == 0.6
assert pytest.approx(loss().item(), abs=1e-3) == 144
assert not loss.disabled
scheduler.epoch_step()
assert not loss.disabled
assert loss.target_sparsity_rate == 0.6
assert loss().item() == 144
scheduler.epoch_step()
assert loss.disabled
assert loss.target_sparsity_rate == 0.6
assert loss() == 0
for module_info in compression_algo.sparsified_module_info:
assert not module_info.operand.sparsify
| 36.906977 | 116 | 0.705986 |
4a201d065c0b654ee78c853e533123efcb4c19a8 | 11,569 | py | Python | pandas/compat/__init__.py | maketestsgofaster/pandas | 3493abaa9c47e39b410752833c901fd27f5b3a76 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/compat/__init__.py | maketestsgofaster/pandas | 3493abaa9c47e39b410752833c901fd27f5b3a76 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null | pandas/compat/__init__.py | maketestsgofaster/pandas | 3493abaa9c47e39b410752833c901fd27f5b3a76 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null | """
compat
======
Cross-compatible functions for Python 2 and 3.
Key items to import for 2/3 compatible code:
* iterators: range(), map(), zip(), filter(), reduce()
* lists: lrange(), lmap(), lzip(), lfilter()
* unicode: u() [no unicode builtin in Python 3]
* longs: long (int in Python 3)
* callable
* iterable method compatibility: iteritems, iterkeys, itervalues
* Uses the original method if available, otherwise uses items, keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bind_method: binds functions to classes
* add_metaclass(metaclass) - class decorator that recreates class with with the
given metaclass instead (and avoids intermediary class creation)
Other items:
* platform checker
"""
# pylint disable=W0611
# flake8: noqa
import functools
import itertools
from distutils.version import LooseVersion
from itertools import product
import sys
import platform
import types
from unicodedata import east_asian_width
import struct
import inspect
from collections import namedtuple
PY2 = sys.version_info[0] == 2
PY3 = (sys.version_info[0] >= 3)
PY35 = (sys.version_info >= (3, 5))
PY36 = (sys.version_info >= (3, 6))
PYPY = (platform.python_implementation() == 'PyPy')
try:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
BytesIO = StringIO
import cPickle
import httplib
except ImportError:
import builtins
from io import StringIO, BytesIO
cStringIO = StringIO
import pickle as cPickle
import http.client as httplib
from pandas.compat.chainmap import DeepChainMap
if PY3:
def isidentifier(s):
return s.isidentifier()
def str_to_bytes(s, encoding=None):
return s.encode(encoding or 'ascii')
def bytes_to_str(b, encoding=None):
return b.decode(encoding or 'utf-8')
# The signature version below is directly copied from Django,
# https://github.com/django/django/pull/4846
def signature(f):
sig = inspect.signature(f)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
keywords = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
keywords = keywords[0] if keywords else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
and p.default is not p.empty
] or None
argspec = namedtuple('Signature', ['args', 'defaults',
'varargs', 'keywords'])
return argspec(args, defaults, varargs, keywords)
def get_range_parameters(data):
"""Gets the start, stop, and step parameters from a range object"""
return data.start, data.stop, data.step
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
intern = sys.intern
reduce = functools.reduce
long = int
unichr = chr
# This was introduced in Python 3.3, but we don't support
# Python 3.x < 3.5, so checking PY3 is safe.
FileNotFoundError = FileNotFoundError
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
# Python 2
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
FileNotFoundError = IOError
def isidentifier(s, dotted=False):
return bool(_name_re.match(s))
def str_to_bytes(s, encoding='ascii'):
return s
def bytes_to_str(b, encoding='ascii'):
return b
def signature(f):
return inspect.getargspec(f)
def get_range_parameters(data):
"""Gets the start, stop, and step parameters from a range object"""
# seems we only have indexing ops to infer
# rather than direct accessors
if len(data) > 1:
step = data[1] - data[0]
stop = data[-1] + step
start = data[0]
elif len(data):
start = data[0]
stop = data[0] + 1
step = 1
else:
start = stop = 0
step = 1
return start, stop, step
# import iterator versions of these functions
range = xrange
intern = intern
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
if PY2:
def iteritems(obj, **kw):
return obj.iteritems(**kw)
def iterkeys(obj, **kw):
return obj.iterkeys(**kw)
def itervalues(obj, **kw):
return obj.itervalues(**kw)
next = lambda it: it.next()
else:
def iteritems(obj, **kw):
return iter(obj.items(**kw))
def iterkeys(obj, **kw):
return iter(obj.keys(**kw))
def itervalues(obj, **kw):
return iter(obj.values(**kw))
next = next
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
# ----------------------------------------------------------------------------
# functions largely based / taken from the six module
# Much of the code in this module comes from Benjamin Peterson's six library.
# The license for this library can be found in LICENSES/SIX and the code can be
# found at https://bitbucket.org/gutworth/six
# Definition of East Asian Width
# http://unicode.org/reports/tr11/
# Ambiguous width can be changed by option
_EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1}
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
def u(s):
return s
def u_safe(s):
return s
def strlen(data, encoding=None):
# encoding is for compat with PY2
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import lzma from the std library """
import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name/qualname attributes of the function """
f.__name__ = name
f.__qualname__ = '{klass}.{name}'.format(
klass=cls.__name__,
name=name)
f.__module__ = cls.__module__
return f
ResourceWarning = ResourceWarning
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
def u(s):
return unicode(s, "unicode_escape")
def u_safe(s):
try:
return unicode(s, "unicode_escape")
except:
return s
def strlen(data, encoding=None):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return len(data)
def east_asian_len(data, encoding=None, ambiguous_width=1):
"""
Calculate display width considering unicode East Asian Width
"""
if isinstance(data, text_type):
try:
data = data.decode(encoding)
except UnicodeError:
pass
return sum([_EAW_MAP.get(east_asian_width(c), ambiguous_width) for c in data])
else:
return len(data)
def import_lzma():
""" import the backported lzma library
or raise ImportError if not available """
from backports import lzma
return lzma
def set_function_name(f, name, cls):
""" Bind the name attributes of the function """
f.__name__ = name
return f
class ResourceWarning(Warning):
pass
string_and_binary_types = string_types + (binary_type,)
try:
# callable reintroduced in later versions of Python
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
from collections import OrderedDict, Counter
if PY3:
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
# this version of raise is a syntax error in Python 3
exec("""
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
""")
raise_with_traceback.__doc__ = """Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
# http://stackoverflow.com/questions/4126348
# Thanks to @martineau at SO
import dateutil
if PY2 and LooseVersion(dateutil.__version__) == '2.0':
# dateutil brokenness
raise Exception('dateutil 2.0 incompatible with Python 2.x, you must '
'install version 1.5 or 2.1+!')
from dateutil import parser as _date_parser
if LooseVersion(dateutil.__version__) < '2.0':
@functools.wraps(_date_parser.parse)
def parse_date(timestr, *args, **kwargs):
timestr = bytes(timestr)
return _date_parser.parse(timestr, *args, **kwargs)
else:
parse_date = _date_parser.parse
# https://github.com/pandas-dev/pandas/pull/9123
def is_platform_little_endian():
""" am I little endian """
return sys.byteorder == 'little'
def is_platform_windows():
return sys.platform == 'win32' or sys.platform == 'cygwin'
def is_platform_linux():
return sys.platform == 'linux2'
def is_platform_mac():
return sys.platform == 'darwin'
def is_platform_32bit():
return struct.calcsize("P") * 8 < 64
| 27.414692 | 90 | 0.630824 |
4a201d80ae96e8af78d70740c6b782be03fa1a50 | 3,105 | py | Python | ucsmsdk/mometa/storage/StorageScsiLunRef.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/storage/StorageScsiLunRef.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | ucsmsdk/mometa/storage/StorageScsiLunRef.py | anoop1984/python_sdk | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for StorageScsiLunRef ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class StorageScsiLunRefConsts():
ID_UNSPECIFIED = "unspecified"
class StorageScsiLunRef(ManagedObject):
"""This is StorageScsiLunRef class."""
consts = StorageScsiLunRefConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageScsiLunRef", "storageScsiLunRef", "scsi-lun-ref-[id]", VersionMeta.Version224a, "InputOutput", 0x3f, [], ["read-only"], [u'storageVirtualDrive'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version224a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version224a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version224a, MoPropertyMeta.NAMING, 0x8, None, None, None, ["unspecified"], ["0-4294967295"]),
"ls_dn": MoPropertyMeta("ls_dn", "lsDn", "string", VersionMeta.Version224a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"lun_name": MoPropertyMeta("lun_name", "lunName", "string", VersionMeta.Version224a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"pn_dn": MoPropertyMeta("pn_dn", "pnDn", "string", VersionMeta.Version311e, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"profile_dn": MoPropertyMeta("profile_dn", "profileDn", "string", VersionMeta.Version224a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version224a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version224a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"lsDn": "ls_dn",
"lunName": "lun_name",
"pnDn": "pn_dn",
"profileDn": "profile_dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.ls_dn = None
self.lun_name = None
self.pn_dn = None
self.profile_dn = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "StorageScsiLunRef", parent_mo_or_dn, **kwargs)
| 51.75 | 248 | 0.639291 |
4a201faf1e0c8910e3471a9991604799c819c30b | 620 | py | Python | Introduksjon/Flytkontroll/ForLoops.py | trodfjell/PythonTeachers2021 | 6e6cba24f0adf654bce5cfd3a6027a972d02259e | [
"MIT"
] | null | null | null | Introduksjon/Flytkontroll/ForLoops.py | trodfjell/PythonTeachers2021 | 6e6cba24f0adf654bce5cfd3a6027a972d02259e | [
"MIT"
] | null | null | null | Introduksjon/Flytkontroll/ForLoops.py | trodfjell/PythonTeachers2021 | 6e6cba24f0adf654bce5cfd3a6027a972d02259e | [
"MIT"
] | null | null | null | # For løkker kan brukes for å kjøre noe en gitt antall ganger
print('Summer sammen alle tallene fra 1 til og med 100')
total = 0
for i in range(101):
total = total + i
print(total)
# Dette kan også løses med en while løkke
print('Summer sammen alle tallene fra 1 til og med 100')
total = 0
i = 0
while i < 100:
i = i + 1
total = total + i
print(total)
# Vi kan også løse dette med en for løkke som teller baklengs
print('Summer sammen alle tallene fra 1 til og med 100')
total = 0
for i in range(100, 0, -1): # Start på 100, stop på 0, tell en ned hver gang løkken kjøres
total = total + i
print(total) | 28.181818 | 90 | 0.690323 |
4a2020ce3359367b6fcb42ad016eaad542734349 | 784 | py | Python | simplipy/errors.py | FindingJohnny/simplisafe-python | e660938c8280dd47d5a90c49a39f77bba27e8a30 | [
"MIT"
] | null | null | null | simplipy/errors.py | FindingJohnny/simplisafe-python | e660938c8280dd47d5a90c49a39f77bba27e8a30 | [
"MIT"
] | null | null | null | simplipy/errors.py | FindingJohnny/simplisafe-python | e660938c8280dd47d5a90c49a39f77bba27e8a30 | [
"MIT"
] | null | null | null | """Define package errors."""
class SimplipyError(Exception):
"""A base error."""
pass
class EndpointUnavailable(SimplipyError):
"""An error related to accessing an endpoint that isn't available in the plan."""
pass
class InvalidCredentialsError(SimplipyError):
"""An error related to invalid credentials."""
pass
class PendingAuthorizationError(SimplipyError):
"""An error ralted to an unconfirmed multi-factor authentication."""
pass
class PinError(SimplipyError):
"""An error related to invalid PINs or PIN operations."""
pass
class RequestError(SimplipyError):
"""An error related to invalid requests."""
pass
class WebsocketError(SimplipyError):
"""An error related to generic websocket errors."""
pass
| 17.818182 | 85 | 0.704082 |
4a20216fcf4d495a95f1fcdab55c6dac53d4ff3a | 3,098 | py | Python | utils/csv_generator.py | stegmaierj/Cellpose3D | d78cbf99a9a35e557fd3878d0761b6ad98f8da61 | [
"Apache-2.0"
] | 2 | 2021-09-13T15:50:26.000Z | 2021-12-08T21:53:21.000Z | utils/csv_generator.py | stegmaierj/Cellpose3D | d78cbf99a9a35e557fd3878d0761b6ad98f8da61 | [
"Apache-2.0"
] | 3 | 2021-10-13T09:01:21.000Z | 2021-12-06T15:28:50.000Z | utils/csv_generator.py | stegmaierj/Cellpose3D | d78cbf99a9a35e557fd3878d0761b6ad98f8da61 | [
"Apache-2.0"
] | 1 | 2022-02-01T15:19:19.000Z | 2022-02-01T15:19:19.000Z | # -*- coding: utf-8 -*-
"""
# 3D Cellpose Extension.
# Copyright (C) 2021 D. Eschweiler, J. Stegmaier
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Liceense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please refer to the documentation for more information about the software
# as well as for installation instructions.
"""
import os
import glob
import csv
import numpy as np
def get_files(folders, data_root='', descriptor='', filetype='tif'):
filelist = []
for folder in folders:
files = glob.glob(os.path.join(data_root, folder, '*'+descriptor+'*.'+filetype))
filelist.extend([os.path.join(folder, os.path.split(f)[-1]) for f in files])
return filelist
def read_csv(list_path, data_root=''):
filelist = []
with open(list_path, 'r') as f:
reader = csv.reader(f, delimiter=';')
for row in reader:
if len(row)==0 or np.sum([len(r) for r in row])==0: continue
row = [os.path.join(data_root, r) for r in row]
filelist.append(row)
return filelist
def create_csv(data_list, save_path='list_folder/experiment_name', test_split=0.2, val_split=0.1, shuffle=False):
if shuffle:
np.random.shuffle(data_list)
# Get number of files for each split
num_files = len(data_list)
num_test_files = int(test_split*num_files)
num_val_files = int((num_files-num_test_files)*val_split)
num_train_files = num_files - num_test_files - num_val_files
# Get file indices
file_idx = np.arange(num_files)
# Save csv files
if num_test_files > 0:
test_idx = sorted(np.random.choice(file_idx, size=num_test_files, replace=False))
with open(save_path+'_test.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in test_idx:
writer.writerow(data_list[idx])
else:
test_idx = []
if num_val_files > 0:
val_idx = sorted(np.random.choice(list(set(file_idx)-set(test_idx)), size=num_val_files, replace=False))
with open(save_path+'_val.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in val_idx:
writer.writerow(data_list[idx])
else:
val_idx = []
if num_train_files > 0:
train_idx = sorted(list(set(file_idx) - set(test_idx) - set(val_idx)))
with open(save_path+'_train.csv', 'w') as fh:
writer = csv.writer(fh, delimiter=';')
for idx in train_idx:
writer.writerow(data_list[idx])
| 33.311828 | 113 | 0.627824 |
4a2021f44cacdc2c61b5500972e83b991ce9dbf8 | 1,274 | py | Python | script/model/sklearn_like_model/NetModule/optimizer/RMSProp.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/model/sklearn_like_model/NetModule/optimizer/RMSProp.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/model/sklearn_like_model/NetModule/optimizer/RMSProp.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | import tensorflow as tf
from script.model.sklearn_like_model.NetModule.optimizer.optimizer import optimizer
class RMSProp(optimizer):
def __init__(
self,
learning_rate=0.001,
decay=0.9,
momentum=0.0,
epsilon=1e-8,
use_locking=False,
reuse=False,
name=None,
verbose=0
):
super().__init__(learning_rate=learning_rate, verbose=verbose, name=name, reuse=reuse)
self.decay = decay
self.momentum = momentum
self.epsilon = epsilon
self.use_locking = use_locking
def __str__(self):
s = f"{self.__class__.__name__}"
s += f"learning rate = {self.learning_rate}\n"
s += f"decay = {self.decay}\n"
s += f"momentum = {self.momentum}\n"
s += f"epsilon = {self.epsilon}\n"
s += f"use_lock = {self.use_locking}\n"
return s
def _build(self):
self._optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self._lr_module.variable,
)
self._train_op = self.optimizer.minimize(
loss=self.loss,
var_list=self.train_var_list
)
return self._train_op
| 29.627907 | 95 | 0.559655 |
4a20231bba5aca61935f321a96673834ed64b51b | 1,038 | py | Python | limacharlie/demo_firehose.py | macdaliot/python-limacharlie | dc8011418f09d3acc0ebce5ea3afb98e4b7f600f | [
"Apache-2.0"
] | null | null | null | limacharlie/demo_firehose.py | macdaliot/python-limacharlie | dc8011418f09d3acc0ebce5ea3afb98e4b7f600f | [
"Apache-2.0"
] | null | null | null | limacharlie/demo_firehose.py | macdaliot/python-limacharlie | dc8011418f09d3acc0ebce5ea3afb98e4b7f600f | [
"Apache-2.0"
] | null | null | null | import limacharlie
import json
import gevent
import signal
import sys
import getpass
if __name__ == "__main__":
def signal_handler():
global fh
print( 'You pressed Ctrl+C!' )
fh.shutdown()
sys.exit( 0 )
gevent.signal( signal.SIGINT, signal_handler )
def debugPrint( msg ):
print msg
# This example uses interactive credentials, but see the README for alternative
# ways of getting credentials.
man = limacharlie.Manager( oid = raw_input( 'Enter OID: ' ),
secret_api_key = getpass.getpass( prompt = 'Enter secret API key: ' ),
print_debug_fn = debugPrint )
fh = limacharlie.Firehose( man,
raw_input( 'Local Interface: ' ),
'event',
public_dest = raw_input( 'Public Interface: ' ),
name = 'firehose_test' )
while True:
data = fh.queue.get()
print( json.dumps( data, indent = 2 ) + "\n\n" )
| 28.833333 | 100 | 0.554913 |
4a20235823f52cc0254585b333e2fec0ae270f20 | 4,613 | py | Python | Fduers/alumni/models.py | TAKHEXI/ALUMNI | b06fb8f408444d8294304e1890f2afa4283c8bfa | [
"MIT"
] | null | null | null | Fduers/alumni/models.py | TAKHEXI/ALUMNI | b06fb8f408444d8294304e1890f2afa4283c8bfa | [
"MIT"
] | null | null | null | Fduers/alumni/models.py | TAKHEXI/ALUMNI | b06fb8f408444d8294304e1890f2afa4283c8bfa | [
"MIT"
] | null | null | null | from django.db import models
from ckeditor.fields import RichTextField
# Create your models here.
class Department(models.Model):
name = models.CharField('院系', max_length=30, null=False, unique=True)
class Meta:
verbose_name = '院系'
verbose_name_plural = '院系'
def __str__(self):
# 作用:外键约束中指定关联到哪一个属性上去,譬如这里关联到self.name
return self.name
class Industry(models.Model):
name = models.CharField('行业', max_length=30, null=False, unique=True)
class Meta:
verbose_name = '行业'
verbose_name_plural = '行业'
def __str__(self):
return self.name
class Province(models.Model):
name = models.CharField('省份', max_length=30, null=False, unique=True, primary_key = True)
class City(models.Model):
name = models.CharField('城市', max_length=30, null=False, unique = False)
province = models.ForeignKey(Province, on_delete = models.DO_NOTHING, db_column = 'f', default="北京")
class Meta:
verbose_name = '城市'
verbose_name_plural = '城市'
def __str__(self):
return self.name
class User(models.Model):
username = models.CharField('用户名',max_length=30, primary_key = True)
department = models.ForeignKey(Department, on_delete=models.DO_NOTHING, verbose_name='院系')
mail = models.CharField('邮箱',max_length=20)
grade = models.IntegerField('届次')
studentID = models.CharField('学号', max_length=20)
phone = models.CharField('电话',max_length=20)
industry = models.ForeignKey(Industry, on_delete=models.DO_NOTHING, verbose_name='行业')
city = models.ForeignKey(City, on_delete=models.DO_NOTHING, verbose_name='城市')
referrer = models.CharField('推荐人',max_length=30,null=True)
password = models.CharField('密码',max_length=20)
photo = models.ImageField(upload_to='user_photo/%Y/%m/%d',verbose_name='头像',blank=True, null=True, default='img/default.jpg')
essay = models.TextField('个性签名', max_length=30, default='')
class Meta:
verbose_name = '用户'
verbose_name_plural = '用户'
def __str__(self):
return self.username
#
# class Group(models.Model):
# groupname = models.CharField('群组名',max_length=10)
# intr = models.TextField('简介', max_length=200)
# menbers = models.ManyToManyField(to='User')
# createdTime = models.DateTimeField('创建时间', auto_now_add=True)
# ####################################################################################################
class Tag(models.Model):
name = models.CharField('标签', max_length=30, null=False, unique=True)
class Meta:
verbose_name = '标签'
verbose_name_plural = '标签'
def __str__(self):
return self.name
class Activity(models.Model):
title = models.CharField('标题', max_length=30, default='DEFAULT')
startTime = models.DateTimeField(verbose_name='开始时间')
endTime = models.DateTimeField(verbose_name='结束时间')
city = models.ForeignKey(City, on_delete=models.DO_NOTHING, verbose_name='所在城市')
location = models.CharField('详细地点',max_length=200)
cost = models.PositiveIntegerField('费用')
class Meta:
verbose_name = '活动'
verbose_name_plural = '活动'
def __str__(self):
return self.title
class Tie(models.Model): # means 帖子
author = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name='作者')
title = models.CharField('标题',max_length=30)
content = RichTextField()
createdTime = models.DateTimeField(verbose_name='发布时间', auto_now_add=True)
replyTime = models.DateTimeField(verbose_name='最新回复时间', auto_now=True)
access = models.IntegerField(default=0, verbose_name='浏览量')
tag = models.ManyToManyField(Tag, verbose_name='所属标签')
relatedActivity = models.ForeignKey(Activity, on_delete=models.DO_NOTHING, verbose_name='相关活动')
class Meta:
verbose_name = '帖子'
verbose_name_plural = '帖子'
def __str__(self):
return self.title
class Reply(models.Model): # 楼层
content = models.TextField('回复内容',max_length=200)
relatedTie = models.ForeignKey(Tie, on_delete=models.DO_NOTHING, verbose_name='相关帖')
class Meta:
verbose_name = '楼层'
verbose_name_plural = '楼层'
class Test(models.Model):
content = models.TextField(max_length=3000)
class Meta:
verbose_name = 'test'
verbose_name_plural = 'test'
class Student(models.Model): # 存放管理员导入的表单
name = models.CharField('姓名', max_length = 50)
studentID = models.CharField('学号', max_length = 20)
grade = models.IntegerField('界次')
department = models.ForeignKey(Department, on_delete=models.DO_NOTHING, verbose_name='院系')
| 33.427536 | 129 | 0.674832 |
4a20239f1b08a3949199e28b1bbf3dd6615c3cd8 | 1,615 | py | Python | openmdao.lib/src/openmdao/lib/components/dynwrapper.py | swryan/OpenMDAO-Framework | f50d60e1a8cadac7fe03d26ffad5fb660b2a15ec | [
"Apache-2.0"
] | 3 | 2015-06-02T00:36:28.000Z | 2018-11-03T00:35:21.000Z | openmdao.lib/src/openmdao/lib/components/dynwrapper.py | JustinSGray/OpenMDAO-Framework | 7ebd7fda0b10fbe8a86ae938dc4f135396dd9759 | [
"Apache-2.0"
] | null | null | null | openmdao.lib/src/openmdao/lib/components/dynwrapper.py | JustinSGray/OpenMDAO-Framework | 7ebd7fda0b10fbe8a86ae938dc4f135396dd9759 | [
"Apache-2.0"
] | 1 | 2020-07-15T02:45:54.000Z | 2020-07-15T02:45:54.000Z | import os
from openmdao.main.api import Component
#class DynWrapper(Component):
#"""A Component wrapper for objects that contain their own internal hierarchy
#and Component-like interface (set/get/run) but do not provide normal python
#attribute access.
#"""
#def __init__(self):
#super(DynWrapper, self).__init__()
#def get_dyn_trait(self, pathname, io):
#"""Returns a trait if a trait with the given pathname exists, possibly
#creating the trait 'on-the-fly'. If an attribute exists with the given
#pathname but no trait is found or can be created, or if pathname
#references a trait in a parent scope, None will be returned. If no
#attribute exists with the given pathname within this scope, an
#AttributeError will be raised.
#pathname: str
#Pathname of the desired trait. May contain dots.
#"""
#pass
#def __getattr__(self, name):
## this is called when the normal getattr fails
#pass
#def _get_failed(self, path, index=None):
#pass
#def _set_failed(self, path, value, index=None, src=None, force=False):
#pass
#def execute(self):
#""" Perform operations associated with running the component. """
#pass
#def build_trait(self, ref_name, iotype=None, trait=None):
## create appropriate trait based on the name, iotype, and whatever
## internal information that self._top can provide
#pass
#def get_wrapped_attr(self, name):
#pass
| 33.645833 | 81 | 0.63096 |
4a20253684bf6c0cb84b1751cbb0f369d3198af2 | 283 | py | Python | cursoemvideo/ex0086.py | LuanPetruitis/minis_programas_python | 5fbc4c3fbe832303511e612f320d31e2b91f1ef0 | [
"MIT"
] | 1 | 2020-04-09T14:41:48.000Z | 2020-04-09T14:41:48.000Z | cursoemvideo/ex0086.py | LuanPetruitis/minis_programas_python | 5fbc4c3fbe832303511e612f320d31e2b91f1ef0 | [
"MIT"
] | 1 | 2020-04-10T20:39:24.000Z | 2020-04-12T13:43:51.000Z | cursoemvideo/ex0086.py | LuanPetruitis/minis_programas_python | 5fbc4c3fbe832303511e612f320d31e2b91f1ef0 | [
"MIT"
] | 1 | 2020-04-13T03:21:09.000Z | 2020-04-13T03:21:09.000Z | matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'Difite uma valor: [{l}, {c}] '))
print('-='*30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz [l][c]:^5}]', end='')
print() | 31.444444 | 70 | 0.459364 |
4a202581b675e89ec31d9b905515fb8fda2ee62a | 42,595 | py | Python | isi_sdk_8_2_0/isi_sdk_8_2_0/models/providers_file_file_item.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_2_0/isi_sdk_8_2_0/models/providers_file_file_item.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_2_0/isi_sdk_8_2_0/models/providers_file_file_item.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProvidersFileFileItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'authentication': 'bool',
'create_home_directory': 'bool',
'enabled': 'bool',
'enumerate_groups': 'bool',
'enumerate_users': 'bool',
'findable_groups': 'list[str]',
'findable_users': 'list[str]',
'group_domain': 'str',
'group_file': 'str',
'home_directory_template': 'str',
'id': 'str',
'listable_groups': 'list[str]',
'listable_users': 'list[str]',
'login_shell': 'str',
'modifiable_groups': 'list[str]',
'modifiable_users': 'list[str]',
'name': 'str',
'netgroup_file': 'str',
'normalize_groups': 'bool',
'normalize_users': 'bool',
'ntlm_support': 'str',
'password_file': 'str',
'provider_domain': 'str',
'restrict_findable': 'bool',
'restrict_listable': 'bool',
'restrict_modifiable': 'bool',
'status': 'str',
'system': 'bool',
'unfindable_groups': 'list[str]',
'unfindable_users': 'list[str]',
'unlistable_groups': 'list[str]',
'unlistable_users': 'list[str]',
'unmodifiable_groups': 'list[str]',
'unmodifiable_users': 'list[str]',
'user_domain': 'str',
'zone_name': 'str'
}
attribute_map = {
'authentication': 'authentication',
'create_home_directory': 'create_home_directory',
'enabled': 'enabled',
'enumerate_groups': 'enumerate_groups',
'enumerate_users': 'enumerate_users',
'findable_groups': 'findable_groups',
'findable_users': 'findable_users',
'group_domain': 'group_domain',
'group_file': 'group_file',
'home_directory_template': 'home_directory_template',
'id': 'id',
'listable_groups': 'listable_groups',
'listable_users': 'listable_users',
'login_shell': 'login_shell',
'modifiable_groups': 'modifiable_groups',
'modifiable_users': 'modifiable_users',
'name': 'name',
'netgroup_file': 'netgroup_file',
'normalize_groups': 'normalize_groups',
'normalize_users': 'normalize_users',
'ntlm_support': 'ntlm_support',
'password_file': 'password_file',
'provider_domain': 'provider_domain',
'restrict_findable': 'restrict_findable',
'restrict_listable': 'restrict_listable',
'restrict_modifiable': 'restrict_modifiable',
'status': 'status',
'system': 'system',
'unfindable_groups': 'unfindable_groups',
'unfindable_users': 'unfindable_users',
'unlistable_groups': 'unlistable_groups',
'unlistable_users': 'unlistable_users',
'unmodifiable_groups': 'unmodifiable_groups',
'unmodifiable_users': 'unmodifiable_users',
'user_domain': 'user_domain',
'zone_name': 'zone_name'
}
def __init__(self, authentication=None, create_home_directory=None, enabled=None, enumerate_groups=None, enumerate_users=None, findable_groups=None, findable_users=None, group_domain=None, group_file=None, home_directory_template=None, id=None, listable_groups=None, listable_users=None, login_shell=None, modifiable_groups=None, modifiable_users=None, name=None, netgroup_file=None, normalize_groups=None, normalize_users=None, ntlm_support=None, password_file=None, provider_domain=None, restrict_findable=None, restrict_listable=None, restrict_modifiable=None, status=None, system=None, unfindable_groups=None, unfindable_users=None, unlistable_groups=None, unlistable_users=None, unmodifiable_groups=None, unmodifiable_users=None, user_domain=None, zone_name=None): # noqa: E501
"""ProvidersFileFileItem - a model defined in Swagger""" # noqa: E501
self._authentication = None
self._create_home_directory = None
self._enabled = None
self._enumerate_groups = None
self._enumerate_users = None
self._findable_groups = None
self._findable_users = None
self._group_domain = None
self._group_file = None
self._home_directory_template = None
self._id = None
self._listable_groups = None
self._listable_users = None
self._login_shell = None
self._modifiable_groups = None
self._modifiable_users = None
self._name = None
self._netgroup_file = None
self._normalize_groups = None
self._normalize_users = None
self._ntlm_support = None
self._password_file = None
self._provider_domain = None
self._restrict_findable = None
self._restrict_listable = None
self._restrict_modifiable = None
self._status = None
self._system = None
self._unfindable_groups = None
self._unfindable_users = None
self._unlistable_groups = None
self._unlistable_users = None
self._unmodifiable_groups = None
self._unmodifiable_users = None
self._user_domain = None
self._zone_name = None
self.discriminator = None
if authentication is not None:
self.authentication = authentication
if create_home_directory is not None:
self.create_home_directory = create_home_directory
if enabled is not None:
self.enabled = enabled
if enumerate_groups is not None:
self.enumerate_groups = enumerate_groups
if enumerate_users is not None:
self.enumerate_users = enumerate_users
if findable_groups is not None:
self.findable_groups = findable_groups
if findable_users is not None:
self.findable_users = findable_users
if group_domain is not None:
self.group_domain = group_domain
if group_file is not None:
self.group_file = group_file
if home_directory_template is not None:
self.home_directory_template = home_directory_template
if id is not None:
self.id = id
if listable_groups is not None:
self.listable_groups = listable_groups
if listable_users is not None:
self.listable_users = listable_users
if login_shell is not None:
self.login_shell = login_shell
if modifiable_groups is not None:
self.modifiable_groups = modifiable_groups
if modifiable_users is not None:
self.modifiable_users = modifiable_users
if name is not None:
self.name = name
if netgroup_file is not None:
self.netgroup_file = netgroup_file
if normalize_groups is not None:
self.normalize_groups = normalize_groups
if normalize_users is not None:
self.normalize_users = normalize_users
if ntlm_support is not None:
self.ntlm_support = ntlm_support
if password_file is not None:
self.password_file = password_file
if provider_domain is not None:
self.provider_domain = provider_domain
if restrict_findable is not None:
self.restrict_findable = restrict_findable
if restrict_listable is not None:
self.restrict_listable = restrict_listable
if restrict_modifiable is not None:
self.restrict_modifiable = restrict_modifiable
if status is not None:
self.status = status
if system is not None:
self.system = system
if unfindable_groups is not None:
self.unfindable_groups = unfindable_groups
if unfindable_users is not None:
self.unfindable_users = unfindable_users
if unlistable_groups is not None:
self.unlistable_groups = unlistable_groups
if unlistable_users is not None:
self.unlistable_users = unlistable_users
if unmodifiable_groups is not None:
self.unmodifiable_groups = unmodifiable_groups
if unmodifiable_users is not None:
self.unmodifiable_users = unmodifiable_users
if user_domain is not None:
self.user_domain = user_domain
if zone_name is not None:
self.zone_name = zone_name
@property
def authentication(self):
"""Gets the authentication of this ProvidersFileFileItem. # noqa: E501
Enables authentication and identity mapping through the authentication provider. # noqa: E501
:return: The authentication of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this ProvidersFileFileItem.
Enables authentication and identity mapping through the authentication provider. # noqa: E501
:param authentication: The authentication of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._authentication = authentication
@property
def create_home_directory(self):
"""Gets the create_home_directory of this ProvidersFileFileItem. # noqa: E501
Automatically creates a home directory on the first login. # noqa: E501
:return: The create_home_directory of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._create_home_directory
@create_home_directory.setter
def create_home_directory(self, create_home_directory):
"""Sets the create_home_directory of this ProvidersFileFileItem.
Automatically creates a home directory on the first login. # noqa: E501
:param create_home_directory: The create_home_directory of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._create_home_directory = create_home_directory
@property
def enabled(self):
"""Gets the enabled of this ProvidersFileFileItem. # noqa: E501
Enables the file provider. # noqa: E501
:return: The enabled of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ProvidersFileFileItem.
Enables the file provider. # noqa: E501
:param enabled: The enabled of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def enumerate_groups(self):
"""Gets the enumerate_groups of this ProvidersFileFileItem. # noqa: E501
Enables the provider to enumerate groups. # noqa: E501
:return: The enumerate_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._enumerate_groups
@enumerate_groups.setter
def enumerate_groups(self, enumerate_groups):
"""Sets the enumerate_groups of this ProvidersFileFileItem.
Enables the provider to enumerate groups. # noqa: E501
:param enumerate_groups: The enumerate_groups of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._enumerate_groups = enumerate_groups
@property
def enumerate_users(self):
"""Gets the enumerate_users of this ProvidersFileFileItem. # noqa: E501
Enables the provider to enumerate users. # noqa: E501
:return: The enumerate_users of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._enumerate_users
@enumerate_users.setter
def enumerate_users(self, enumerate_users):
"""Sets the enumerate_users of this ProvidersFileFileItem.
Enables the provider to enumerate users. # noqa: E501
:param enumerate_users: The enumerate_users of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._enumerate_users = enumerate_users
@property
def findable_groups(self):
"""Gets the findable_groups of this ProvidersFileFileItem. # noqa: E501
Specifies the list of groups that can be resolved. # noqa: E501
:return: The findable_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._findable_groups
@findable_groups.setter
def findable_groups(self, findable_groups):
"""Sets the findable_groups of this ProvidersFileFileItem.
Specifies the list of groups that can be resolved. # noqa: E501
:param findable_groups: The findable_groups of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._findable_groups = findable_groups
@property
def findable_users(self):
"""Gets the findable_users of this ProvidersFileFileItem. # noqa: E501
Specifies the list of users that can be resolved. # noqa: E501
:return: The findable_users of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._findable_users
@findable_users.setter
def findable_users(self, findable_users):
"""Sets the findable_users of this ProvidersFileFileItem.
Specifies the list of users that can be resolved. # noqa: E501
:param findable_users: The findable_users of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._findable_users = findable_users
@property
def group_domain(self):
"""Gets the group_domain of this ProvidersFileFileItem. # noqa: E501
Specifies the domain for this provider through which domains are qualified. # noqa: E501
:return: The group_domain of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._group_domain
@group_domain.setter
def group_domain(self, group_domain):
"""Sets the group_domain of this ProvidersFileFileItem.
Specifies the domain for this provider through which domains are qualified. # noqa: E501
:param group_domain: The group_domain of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if group_domain is not None and len(group_domain) > 255:
raise ValueError("Invalid value for `group_domain`, length must be less than or equal to `255`") # noqa: E501
if group_domain is not None and len(group_domain) < 0:
raise ValueError("Invalid value for `group_domain`, length must be greater than or equal to `0`") # noqa: E501
self._group_domain = group_domain
@property
def group_file(self):
"""Gets the group_file of this ProvidersFileFileItem. # noqa: E501
Specifies the location of the file that contains information about the group. # noqa: E501
:return: The group_file of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._group_file
@group_file.setter
def group_file(self, group_file):
"""Sets the group_file of this ProvidersFileFileItem.
Specifies the location of the file that contains information about the group. # noqa: E501
:param group_file: The group_file of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if group_file is not None and len(group_file) > 4096:
raise ValueError("Invalid value for `group_file`, length must be less than or equal to `4096`") # noqa: E501
if group_file is not None and len(group_file) < 0:
raise ValueError("Invalid value for `group_file`, length must be greater than or equal to `0`") # noqa: E501
self._group_file = group_file
@property
def home_directory_template(self):
"""Gets the home_directory_template of this ProvidersFileFileItem. # noqa: E501
Specifies the path to the home directory template. # noqa: E501
:return: The home_directory_template of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._home_directory_template
@home_directory_template.setter
def home_directory_template(self, home_directory_template):
"""Sets the home_directory_template of this ProvidersFileFileItem.
Specifies the path to the home directory template. # noqa: E501
:param home_directory_template: The home_directory_template of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if home_directory_template is not None and len(home_directory_template) > 4096:
raise ValueError("Invalid value for `home_directory_template`, length must be less than or equal to `4096`") # noqa: E501
if home_directory_template is not None and len(home_directory_template) < 0:
raise ValueError("Invalid value for `home_directory_template`, length must be greater than or equal to `0`") # noqa: E501
if home_directory_template is not None and not re.search('^((\/[^\/\\0]+)(\/?))*$', home_directory_template): # noqa: E501
raise ValueError("Invalid value for `home_directory_template`, must be a follow pattern or equal to `/^((\/[^\/\\0]+)(\/?))*$/`") # noqa: E501
self._home_directory_template = home_directory_template
@property
def id(self):
"""Gets the id of this ProvidersFileFileItem. # noqa: E501
Specifies the file provider ID. # noqa: E501
:return: The id of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProvidersFileFileItem.
Specifies the file provider ID. # noqa: E501
:param id: The id of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if id is not None and len(id) > 255:
raise ValueError("Invalid value for `id`, length must be less than or equal to `255`") # noqa: E501
if id is not None and len(id) < 0:
raise ValueError("Invalid value for `id`, length must be greater than or equal to `0`") # noqa: E501
self._id = id
@property
def listable_groups(self):
"""Gets the listable_groups of this ProvidersFileFileItem. # noqa: E501
Specifies the groups that can be viewed in the provider. # noqa: E501
:return: The listable_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._listable_groups
@listable_groups.setter
def listable_groups(self, listable_groups):
"""Sets the listable_groups of this ProvidersFileFileItem.
Specifies the groups that can be viewed in the provider. # noqa: E501
:param listable_groups: The listable_groups of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._listable_groups = listable_groups
@property
def listable_users(self):
"""Gets the listable_users of this ProvidersFileFileItem. # noqa: E501
Specifies the users that can be viewed in the provider. # noqa: E501
:return: The listable_users of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._listable_users
@listable_users.setter
def listable_users(self, listable_users):
"""Sets the listable_users of this ProvidersFileFileItem.
Specifies the users that can be viewed in the provider. # noqa: E501
:param listable_users: The listable_users of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._listable_users = listable_users
@property
def login_shell(self):
"""Gets the login_shell of this ProvidersFileFileItem. # noqa: E501
Specifies the login shell path. # noqa: E501
:return: The login_shell of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._login_shell
@login_shell.setter
def login_shell(self, login_shell):
"""Sets the login_shell of this ProvidersFileFileItem.
Specifies the login shell path. # noqa: E501
:param login_shell: The login_shell of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if login_shell is not None and len(login_shell) > 4096:
raise ValueError("Invalid value for `login_shell`, length must be less than or equal to `4096`") # noqa: E501
if login_shell is not None and len(login_shell) < 0:
raise ValueError("Invalid value for `login_shell`, length must be greater than or equal to `0`") # noqa: E501
self._login_shell = login_shell
@property
def modifiable_groups(self):
"""Gets the modifiable_groups of this ProvidersFileFileItem. # noqa: E501
Specifies the groups that can be modified in the provider. # noqa: E501
:return: The modifiable_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._modifiable_groups
@modifiable_groups.setter
def modifiable_groups(self, modifiable_groups):
"""Sets the modifiable_groups of this ProvidersFileFileItem.
Specifies the groups that can be modified in the provider. # noqa: E501
:param modifiable_groups: The modifiable_groups of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._modifiable_groups = modifiable_groups
@property
def modifiable_users(self):
"""Gets the modifiable_users of this ProvidersFileFileItem. # noqa: E501
Specifies the users that can be modified in the provider. # noqa: E501
:return: The modifiable_users of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._modifiable_users
@modifiable_users.setter
def modifiable_users(self, modifiable_users):
"""Sets the modifiable_users of this ProvidersFileFileItem.
Specifies the users that can be modified in the provider. # noqa: E501
:param modifiable_users: The modifiable_users of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._modifiable_users = modifiable_users
@property
def name(self):
"""Gets the name of this ProvidersFileFileItem. # noqa: E501
Specifies the name of the file provider. # noqa: E501
:return: The name of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProvidersFileFileItem.
Specifies the name of the file provider. # noqa: E501
:param name: The name of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if name is not None and len(name) > 255:
raise ValueError("Invalid value for `name`, length must be less than or equal to `255`") # noqa: E501
if name is not None and len(name) < 0:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `0`") # noqa: E501
self._name = name
@property
def netgroup_file(self):
"""Gets the netgroup_file of this ProvidersFileFileItem. # noqa: E501
Specifies the path to a netgroups replacement file. # noqa: E501
:return: The netgroup_file of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._netgroup_file
@netgroup_file.setter
def netgroup_file(self, netgroup_file):
"""Sets the netgroup_file of this ProvidersFileFileItem.
Specifies the path to a netgroups replacement file. # noqa: E501
:param netgroup_file: The netgroup_file of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if netgroup_file is not None and len(netgroup_file) > 4096:
raise ValueError("Invalid value for `netgroup_file`, length must be less than or equal to `4096`") # noqa: E501
if netgroup_file is not None and len(netgroup_file) < 0:
raise ValueError("Invalid value for `netgroup_file`, length must be greater than or equal to `0`") # noqa: E501
self._netgroup_file = netgroup_file
@property
def normalize_groups(self):
"""Gets the normalize_groups of this ProvidersFileFileItem. # noqa: E501
Normalizes group names to lowercase before look up. # noqa: E501
:return: The normalize_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._normalize_groups
@normalize_groups.setter
def normalize_groups(self, normalize_groups):
"""Sets the normalize_groups of this ProvidersFileFileItem.
Normalizes group names to lowercase before look up. # noqa: E501
:param normalize_groups: The normalize_groups of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._normalize_groups = normalize_groups
@property
def normalize_users(self):
"""Gets the normalize_users of this ProvidersFileFileItem. # noqa: E501
Normalizes user names to lowercase before look up. # noqa: E501
:return: The normalize_users of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._normalize_users
@normalize_users.setter
def normalize_users(self, normalize_users):
"""Sets the normalize_users of this ProvidersFileFileItem.
Normalizes user names to lowercase before look up. # noqa: E501
:param normalize_users: The normalize_users of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._normalize_users = normalize_users
@property
def ntlm_support(self):
"""Gets the ntlm_support of this ProvidersFileFileItem. # noqa: E501
Specifies which NTLM versions to support for users with NTLM-compatible credentials. # noqa: E501
:return: The ntlm_support of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._ntlm_support
@ntlm_support.setter
def ntlm_support(self, ntlm_support):
"""Sets the ntlm_support of this ProvidersFileFileItem.
Specifies which NTLM versions to support for users with NTLM-compatible credentials. # noqa: E501
:param ntlm_support: The ntlm_support of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
allowed_values = ["all", "v2only", "none"] # noqa: E501
if ntlm_support not in allowed_values:
raise ValueError(
"Invalid value for `ntlm_support` ({0}), must be one of {1}" # noqa: E501
.format(ntlm_support, allowed_values)
)
self._ntlm_support = ntlm_support
@property
def password_file(self):
"""Gets the password_file of this ProvidersFileFileItem. # noqa: E501
Specifies the location of the file containing information about users. # noqa: E501
:return: The password_file of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._password_file
@password_file.setter
def password_file(self, password_file):
"""Sets the password_file of this ProvidersFileFileItem.
Specifies the location of the file containing information about users. # noqa: E501
:param password_file: The password_file of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if password_file is not None and len(password_file) > 4096:
raise ValueError("Invalid value for `password_file`, length must be less than or equal to `4096`") # noqa: E501
if password_file is not None and len(password_file) < 0:
raise ValueError("Invalid value for `password_file`, length must be greater than or equal to `0`") # noqa: E501
self._password_file = password_file
@property
def provider_domain(self):
"""Gets the provider_domain of this ProvidersFileFileItem. # noqa: E501
Specifies the domain for the provider. # noqa: E501
:return: The provider_domain of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._provider_domain
@provider_domain.setter
def provider_domain(self, provider_domain):
"""Sets the provider_domain of this ProvidersFileFileItem.
Specifies the domain for the provider. # noqa: E501
:param provider_domain: The provider_domain of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if provider_domain is not None and len(provider_domain) > 255:
raise ValueError("Invalid value for `provider_domain`, length must be less than or equal to `255`") # noqa: E501
if provider_domain is not None and len(provider_domain) < 0:
raise ValueError("Invalid value for `provider_domain`, length must be greater than or equal to `0`") # noqa: E501
self._provider_domain = provider_domain
@property
def restrict_findable(self):
"""Gets the restrict_findable of this ProvidersFileFileItem. # noqa: E501
If true, checks the provider for filtered lists of findable and unfindable users and groups. # noqa: E501
:return: The restrict_findable of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._restrict_findable
@restrict_findable.setter
def restrict_findable(self, restrict_findable):
"""Sets the restrict_findable of this ProvidersFileFileItem.
If true, checks the provider for filtered lists of findable and unfindable users and groups. # noqa: E501
:param restrict_findable: The restrict_findable of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._restrict_findable = restrict_findable
@property
def restrict_listable(self):
"""Gets the restrict_listable of this ProvidersFileFileItem. # noqa: E501
If true, checks the provider for filtered lists of listable and unlistable users and groups. # noqa: E501
:return: The restrict_listable of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._restrict_listable
@restrict_listable.setter
def restrict_listable(self, restrict_listable):
"""Sets the restrict_listable of this ProvidersFileFileItem.
If true, checks the provider for filtered lists of listable and unlistable users and groups. # noqa: E501
:param restrict_listable: The restrict_listable of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._restrict_listable = restrict_listable
@property
def restrict_modifiable(self):
"""Gets the restrict_modifiable of this ProvidersFileFileItem. # noqa: E501
If true, checks the provider for filtered lists of modifiable and unmodifiable users and groups. # noqa: E501
:return: The restrict_modifiable of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._restrict_modifiable
@restrict_modifiable.setter
def restrict_modifiable(self, restrict_modifiable):
"""Sets the restrict_modifiable of this ProvidersFileFileItem.
If true, checks the provider for filtered lists of modifiable and unmodifiable users and groups. # noqa: E501
:param restrict_modifiable: The restrict_modifiable of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._restrict_modifiable = restrict_modifiable
@property
def status(self):
"""Gets the status of this ProvidersFileFileItem. # noqa: E501
Specifies the status of the provider. # noqa: E501
:return: The status of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ProvidersFileFileItem.
Specifies the status of the provider. # noqa: E501
:param status: The status of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if status is not None and len(status) > 255:
raise ValueError("Invalid value for `status`, length must be less than or equal to `255`") # noqa: E501
if status is not None and len(status) < 0:
raise ValueError("Invalid value for `status`, length must be greater than or equal to `0`") # noqa: E501
self._status = status
@property
def system(self):
"""Gets the system of this ProvidersFileFileItem. # noqa: E501
If true, indicates that this provider instance was created by OneFS and cannot be removed. # noqa: E501
:return: The system of this ProvidersFileFileItem. # noqa: E501
:rtype: bool
"""
return self._system
@system.setter
def system(self, system):
"""Sets the system of this ProvidersFileFileItem.
If true, indicates that this provider instance was created by OneFS and cannot be removed. # noqa: E501
:param system: The system of this ProvidersFileFileItem. # noqa: E501
:type: bool
"""
self._system = system
@property
def unfindable_groups(self):
"""Gets the unfindable_groups of this ProvidersFileFileItem. # noqa: E501
Specifies groups that cannot be resolved by the provider. # noqa: E501
:return: The unfindable_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._unfindable_groups
@unfindable_groups.setter
def unfindable_groups(self, unfindable_groups):
"""Sets the unfindable_groups of this ProvidersFileFileItem.
Specifies groups that cannot be resolved by the provider. # noqa: E501
:param unfindable_groups: The unfindable_groups of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._unfindable_groups = unfindable_groups
@property
def unfindable_users(self):
"""Gets the unfindable_users of this ProvidersFileFileItem. # noqa: E501
Specifies users that cannot be resolved by the provider. # noqa: E501
:return: The unfindable_users of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._unfindable_users
@unfindable_users.setter
def unfindable_users(self, unfindable_users):
"""Sets the unfindable_users of this ProvidersFileFileItem.
Specifies users that cannot be resolved by the provider. # noqa: E501
:param unfindable_users: The unfindable_users of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._unfindable_users = unfindable_users
@property
def unlistable_groups(self):
"""Gets the unlistable_groups of this ProvidersFileFileItem. # noqa: E501
Specifies a group that cannot be listed by the provider. # noqa: E501
:return: The unlistable_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._unlistable_groups
@unlistable_groups.setter
def unlistable_groups(self, unlistable_groups):
"""Sets the unlistable_groups of this ProvidersFileFileItem.
Specifies a group that cannot be listed by the provider. # noqa: E501
:param unlistable_groups: The unlistable_groups of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._unlistable_groups = unlistable_groups
@property
def unlistable_users(self):
"""Gets the unlistable_users of this ProvidersFileFileItem. # noqa: E501
Specifies a user that cannot be listed by the provider. # noqa: E501
:return: The unlistable_users of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._unlistable_users
@unlistable_users.setter
def unlistable_users(self, unlistable_users):
"""Sets the unlistable_users of this ProvidersFileFileItem.
Specifies a user that cannot be listed by the provider. # noqa: E501
:param unlistable_users: The unlistable_users of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._unlistable_users = unlistable_users
@property
def unmodifiable_groups(self):
"""Gets the unmodifiable_groups of this ProvidersFileFileItem. # noqa: E501
Specifies a group that cannot be modified by the provider. # noqa: E501
:return: The unmodifiable_groups of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._unmodifiable_groups
@unmodifiable_groups.setter
def unmodifiable_groups(self, unmodifiable_groups):
"""Sets the unmodifiable_groups of this ProvidersFileFileItem.
Specifies a group that cannot be modified by the provider. # noqa: E501
:param unmodifiable_groups: The unmodifiable_groups of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._unmodifiable_groups = unmodifiable_groups
@property
def unmodifiable_users(self):
"""Gets the unmodifiable_users of this ProvidersFileFileItem. # noqa: E501
Specifies a user that cannot be modified by the provider. # noqa: E501
:return: The unmodifiable_users of this ProvidersFileFileItem. # noqa: E501
:rtype: list[str]
"""
return self._unmodifiable_users
@unmodifiable_users.setter
def unmodifiable_users(self, unmodifiable_users):
"""Sets the unmodifiable_users of this ProvidersFileFileItem.
Specifies a user that cannot be modified by the provider. # noqa: E501
:param unmodifiable_users: The unmodifiable_users of this ProvidersFileFileItem. # noqa: E501
:type: list[str]
"""
self._unmodifiable_users = unmodifiable_users
@property
def user_domain(self):
"""Gets the user_domain of this ProvidersFileFileItem. # noqa: E501
Specifies the domain for this provider through which users are qualified. # noqa: E501
:return: The user_domain of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._user_domain
@user_domain.setter
def user_domain(self, user_domain):
"""Sets the user_domain of this ProvidersFileFileItem.
Specifies the domain for this provider through which users are qualified. # noqa: E501
:param user_domain: The user_domain of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if user_domain is not None and len(user_domain) > 255:
raise ValueError("Invalid value for `user_domain`, length must be less than or equal to `255`") # noqa: E501
if user_domain is not None and len(user_domain) < 0:
raise ValueError("Invalid value for `user_domain`, length must be greater than or equal to `0`") # noqa: E501
self._user_domain = user_domain
@property
def zone_name(self):
"""Gets the zone_name of this ProvidersFileFileItem. # noqa: E501
Specifies the name of the access zone in which this provider was created. # noqa: E501
:return: The zone_name of this ProvidersFileFileItem. # noqa: E501
:rtype: str
"""
return self._zone_name
@zone_name.setter
def zone_name(self, zone_name):
"""Sets the zone_name of this ProvidersFileFileItem.
Specifies the name of the access zone in which this provider was created. # noqa: E501
:param zone_name: The zone_name of this ProvidersFileFileItem. # noqa: E501
:type: str
"""
if zone_name is not None and len(zone_name) > 255:
raise ValueError("Invalid value for `zone_name`, length must be less than or equal to `255`") # noqa: E501
if zone_name is not None and len(zone_name) < 0:
raise ValueError("Invalid value for `zone_name`, length must be greater than or equal to `0`") # noqa: E501
self._zone_name = zone_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProvidersFileFileItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.00695 | 787 | 0.655053 |
4a2025c788991861dfafddb82a0f39df211dc61b | 1,685 | py | Python | wizard/wizards.py | JDavidMoreno/Technical-Service | 93ddf3d95cd3abfdfd29129d7934fb928aeced19 | [
"MIT"
] | null | null | null | wizard/wizards.py | JDavidMoreno/Technical-Service | 93ddf3d95cd3abfdfd29129d7934fb928aeced19 | [
"MIT"
] | null | null | null | wizard/wizards.py | JDavidMoreno/Technical-Service | 93ddf3d95cd3abfdfd29129d7934fb928aeced19 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from datetime import timedelta
from odoo import models, fields, api
from odoo.exceptions import ValidationError
_logger = logging.getLogger(__name__)
# I need to update the wizard to the new duration and schedule system
class TechnicalServiceRequestDuration(models.TransientModel):
_name = 'ts.request.duration'
_description = "Confirm the duration of the Technical Request\
in the case it's still 0"
first_schedule_date = fields.Datetime(string="Scheduled Date", help="Date the technical service team plans the intervention. It should not differ much from the Request Date.")
b_first_schedule_date = fields.Boolean(default=True)
schedule_date_ids = fields.Many2many('ts.calendar', string="New Scheduled Date")
b_schedule_date_ids = fields.Boolean(default=True)
@api.multi
def confirm_duration(self):
request = self.env['ts.request'].browse(self.env.context.get('active_id'))
if self.b_first_schedule_date == False and self.first_schedule_date:
values = {
'name': self.env.context.get('name'),
'start': self.first_schedule_date,
'stop': self.first_schedule_date + timedelta(hours=1),
'duration': 1,
'technical_request_id': request.id,
'technical_team_id': request.technical_team.id,
}
request.update({'first_schedule_date': self.first_schedule_date, 'schedule_date_ids': [(0, False, values)]})
self.b_first_schedule_date = True
if self.b_schedule_date_ids == False and self.schedule_date_ids:
self.b_schedule_date_ids = True
if all([self.b_first_schedule_date, self.b_schedule_date_ids]):
request.requirements = True
return request._check_requirements()
| 33.7 | 176 | 0.753709 |
4a20266b7238631022a2e86c8fdc1c965a65f6d1 | 2,315 | py | Python | student_net_learning/utils.py | Kulikovpavel/MCS2018.Baseline | 5a3997bc9e59ea1c2ada3df4c4f036465b38303f | [
"MIT"
] | 16 | 2018-06-09T13:49:45.000Z | 2020-09-30T03:47:42.000Z | student_net_learning/utils.py | Kulikovpavel/MCS2018.Baseline | 5a3997bc9e59ea1c2ada3df4c4f036465b38303f | [
"MIT"
] | null | null | null | student_net_learning/utils.py | Kulikovpavel/MCS2018.Baseline | 5a3997bc9e59ea1c2ada3df4c4f036465b38303f | [
"MIT"
] | 10 | 2018-05-21T17:13:08.000Z | 2020-04-24T16:28:46.000Z | '''Some helper functions for Baseline, including:
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
| 24.62766 | 64 | 0.574514 |
4a202721a52b3f8c03aecef171b05c2b064f6ed9 | 3,975 | py | Python | base_report.py | shoubamzlibap/ihreport | e9e6132d872a9b896ea1b08b5d61cf6a157cfd0c | [
"MIT"
] | null | null | null | base_report.py | shoubamzlibap/ihreport | e9e6132d872a9b896ea1b08b5d61cf6a157cfd0c | [
"MIT"
] | null | null | null | base_report.py | shoubamzlibap/ihreport | e9e6132d872a9b896ea1b08b5d61cf6a157cfd0c | [
"MIT"
] | null | null | null | """
A small reporting tool, using asciidoc
"""
import calendar
import datetime
import jinja2
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '/usr/bin'))
if not path in sys.path:
sys.path.insert(1, path)
del path
import a2x
class BaseReportAttributeMissing(Exception):
pass
class BaseReport(object):
"""
Basic reporting class.
"""
def __init__(self,
body_data=None,
body_template=None,
output_filename=None,
output_dir='reports',
tmp_dir='tmp',):
"""
Create a report object
body_data: a dict containing the data for the report. Will be consumed by the template
renderer
body_template: a template file to be rendered.
output_dir: directory to save output to
temp_dir: a directory to save intermediate files to
"""
if not body_data:
raise BaseReportAttributeMissing('body_data')
if not body_template:
raise BaseReportAttributeMissing('body_template')
self.body_data = body_data
if output_filename:
self.output_filename = output_filename
else:
self.output_filename = 'report_' + self.today()
self.body_template = body_template
self.output_dir = os.getcwd() + '/' + output_dir
self.tmp_dir = os.getcwd() + '/' + tmp_dir
self.asciidoc = self._render_template(template=body_template, data=body_data)
@staticmethod
def today():
"""
Return today formatted as dd-mmm-yyyy
"""
today = datetime.datetime.today()
day = str(today.day)
if len(day) == 1: day = '0' + day
month = calendar.month_abbr[today.month]
year = str(today.year)
return day + '-' + month + '-' + year
def _render_template(self, template=None, data=None):
"""
Render template with input data
template: the template file
data: the data the template should be rendered with
"""
try:
self.template_env
except AttributeError:
#self.template_loader = jinja2.FileSystemLoader(searchpath='/')
self.template_loader = jinja2.FileSystemLoader(searchpath=os.getcwd())
self.template_env = jinja2.Environment(loader=self.template_loader)
loaded_template = self.template_env.get_template(template)
return loaded_template.render(data)
def _write_asciidoc(self):
"""
Write rendered asciidoc to disk for further processing.
One could probably just keep it in memory, but it is nice
for debugging to see the asciidoc source.
"""
self.asciidoc_filename = self.tmp_dir + '/' + self.output_filename + '.txt'
with open(self.asciidoc_filename, 'w') as filehandle:
filehandle.write(self.asciidoc)
def render_to_pdf(self):
"""
Convert asciidoc to pdf
"""
self._write_asciidoc()
# can we do better then this? - like just adding the non-defaults ...
a2x_opts = {'verbose': 0, 'keep_artifacts': False, 'backend': None, 'skip_asciidoc': False,
'destination_dir': self.output_dir, 'fop': True, 'backend_opts': '', 'dry_run': False, 'icons':
False, 'conf_file': None, 'stylesheet': None, 'epubcheck': False, 'lynx': False, 'resources':
[], 'format': 'pdf', 'resource_manifest': None, 'safe': False, 'fop_opts': '', 'xsltproc_opts':
'', 'copy': False, 'asciidoc_opts': '', 'doctype': 'book', 'xsl_file': None, 'dblatex_opts':
'', 'icons_dir': None, 'attributes': ['docinfo'], 'no_xmllint': False}
a2x_obj = a2x.A2X(a2x_opts)
a2x.OPTIONS = a2x_obj # verbose and dry_run used by utility functions.
a2x_obj.asciidoc_file = self.asciidoc_filename
try:
a2x_obj.load_conf()
a2x_obj.execute()
except KeyboardInterrupt:
exit(1)
| 32.317073 | 99 | 0.62239 |
4a20277bdb393da0d21f16d9f1ae4ef9114bddb9 | 2,953 | py | Python | pyftdi/tests/timearray.py | larsch/pyftdi | c77136fe4000f36842bc996ff1d5a5a0e05c1be4 | [
"BSD-3-Clause"
] | null | null | null | pyftdi/tests/timearray.py | larsch/pyftdi | c77136fe4000f36842bc996ff1d5a5a0e05c1be4 | [
"BSD-3-Clause"
] | null | null | null | pyftdi/tests/timearray.py | larsch/pyftdi | c77136fe4000f36842bc996ff1d5a5a0e05c1be4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""Quick and dirty bytearray vs. array('B') performance test."""
from array import array
from struct import pack
from timeit import timeit
from time import perf_counter
def timing(f, n, a):
start = perf_counter()
for i in range(n):
f(a); f(a); f(a); f(a); f(a); f(a); f(a); f(a); f(a); f(a)
finish = perf_counter()
return '%s\t%f' % (f.__name__, finish - start)
def time_array(addr):
return array('B', addr)
def time_bytearray(addr):
return bytearray(addr)
def extend_array(addr):
b = bytearray()
b.extend(addr)
b.extend(b)
b.extend(b)
b.extend(b)
b.extend(b)
b.extend(b)
return b
def extend_bytearray(addr):
b = bytearray()
b.extend(addr)
b.extend(b)
b.extend(b)
b.extend(b)
b.extend(b)
b.extend(b)
return b
def array_tostring(addr):
return array('B', addr).tobytes()
def str_bytearray(addr):
return str(bytearray(addr))
def struct_pack(addr):
return pack('4B', *addr)
if __name__ == '__main__':
count = 100000
addr = '192.168.4.2'
addr = tuple([int(i) for i in addr.split('.')])
print('\t\ttiming\t\tfunc\t\tno func')
print('%s\t%s\t%s' % (timing(time_array, count, addr),
timeit('time_array((192,168,4,2))', number=count,
setup='from __main__ import time_array'),
timeit("array('B', (192,168,4,2))", number=count,
setup='from array import array')))
print('%s\t%s\t%s' % (timing(time_bytearray, count, addr),
timeit('time_bytearray((192,168,4,2))', number=count,
setup='from __main__ import time_bytearray'),
timeit('bytearray((192,168,4,2))', number=count)))
print('%s\t%s' % (timing(extend_array, count, addr),
timeit('extend_array((192,168,4,2))', number=count,
setup='from __main__ import extend_array')))
print('%s\t%s' % (timing(extend_bytearray, count, addr),
timeit('extend_bytearray((192,168,4,2))', number=count,
setup='from __main__ import extend_bytearray')))
print('%s\t%s\t%s' % (timing(array_tostring, count, addr),
timeit('array_tostring((192,168,4,2))', number=count,
setup='from __main__ import array_tostring'),
timeit("array('B', (192,168,4,2)).tostring()", number=count,
setup='from array import array')))
print('%s\t%s\t%s' % (timing(str_bytearray, count, addr),
timeit('str_bytearray((192,168,4,2))', number=count,
setup='from __main__ import str_bytearray'),
timeit('str(bytearray((192,168,4,2)))', number=count)))
print('%s\t%s\t%s' % (timing(struct_pack, count, addr),
timeit('struct_pack((192,168,4,2))', number=count,
setup='from __main__ import struct_pack'),
timeit("pack('4B', *(192,168,4,2))", number=count,
setup='from struct import pack')))
| 34.337209 | 70 | 0.59194 |
4a2028f4251022373675f1562e12bcb28e4de162 | 1,020 | py | Python | test/IssueTest.py | AurisAudentis/rentify-server | 57a9e840dce96edf6d48deedc588b1a624213dd9 | [
"MIT"
] | null | null | null | test/IssueTest.py | AurisAudentis/rentify-server | 57a9e840dce96edf6d48deedc588b1a624213dd9 | [
"MIT"
] | 1 | 2020-07-20T00:26:00.000Z | 2020-07-20T00:26:00.000Z | test/IssueTest.py | AurisAudentis/rentify-server | 57a9e840dce96edf6d48deedc588b1a624213dd9 | [
"MIT"
] | null | null | null | from AuthTest import fetchToken, getHeader
import requests
header1 = getHeader(fetchToken("[email protected]", "rentify"))
header2 = getHeader(fetchToken("[email protected]", "rentify"))
r = requests.get("http://localhost:4200/groups", headers= header1)
# gidten = r.json()["tenant"][0]["_id"]
# r = requests.get(f"http://localhost:4200/issues/{gidten}", headers=header1)
r = requests.post("http://localhost:4200/groups", headers=header2, json={"description":"Building one", "maintainers": [], "rooms": [{"address":"New room"}]})
print(r.json())
grp = r.json()
rmid = grp["rooms"][0]["_id"]
r = requests.post(f"http://localhost:4200/rooms/{rmid}/users", headers=header1)
r = requests.get(f"http://localhost:4200/issues/{grp['_id']}", headers=header1)
r = requests.post(f"http://localhost:4200/issues/group/{grp['_id']}", headers=header1, json={"description": "da worst issue", "issue_title": "Help me dammit"})
print(r)
r = requests.get(f"http://localhost:4200/issues/{grp['_id']}", headers=header2)
print(r.text) | 36.428571 | 159 | 0.694118 |
4a20296af17eba141c9cb06b754760c4774fc76d | 5,644 | py | Python | resnet_cifar.py | idstcv/KDA | 0f509e6a90f0593fc27514dcd93a9248d80931be | [
"Apache-2.0"
] | null | null | null | resnet_cifar.py | idstcv/KDA | 0f509e6a90f0593fc27514dcd93a9248d80931be | [
"Apache-2.0"
] | null | null | null | resnet_cifar.py | idstcv/KDA | 0f509e6a90f0593fc27514dcd93a9248d80931be | [
"Apache-2.0"
] | null | null | null | # modify from
# https://github.com/weiaicunzai/pytorch-cifar100/blob/master/models/resnet.py
"""resnet in pytorch
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385v1
"""
import torch
import torch.nn as nn
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
# BasicBlock and BottleNeck block
# have different output size
# we use class attribute expansion
# to distinct
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
# residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
# shortcut
self.shortcut = nn.Sequential()
# the shortcut output dimension is not the same with residual function
# use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class BottleNeck(nn.Module):
"""Residual block for resnet over 50 layers
"""
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=100):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
# we use a different inputsize than the original paper
# so conv2_x's stride is 1
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
"""make resnet layers(by layer i didnt mean this 'layer' was the
same as a neuron netowork layer, ex. conv layer), one layer may
contain more than one residual block
Args:
block: block type, basic block or bottle neck block
out_channels: output depth channel number of this layer
num_blocks: how many blocks per layer
stride: the stride of the first block of this layer
Return:
return a resnet layer
"""
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.conv2_x(x)
x = self.conv3_x(x)
x = self.conv4_x(x)
x = self.conv5_x(x)
x = self.avg_pool(x)
x_feat = x.view(x.size(0), -1)
x_logits = self.fc(x_feat)
return x_feat, x_logits
def resnet18(num_classes):
""" return a ResNet 18 object
"""
return ResNet(BasicBlock, [2, 2, 2, 2],num_classes)
def resnet34(num_classes):
""" return a ResNet 34 object
"""
return ResNet(BasicBlock, [3, 4, 6, 3],num_classes)
def resnet50(num_classes):
""" return a ResNet 50 object
"""
return ResNet(BottleNeck, [3, 4, 6, 3],num_classes)
def resnet101(num_classes):
""" return a ResNet 101 object
"""
return ResNet(BottleNeck, [3, 4, 23, 3], num_classes)
def resnet152(num_classes):
""" return a ResNet 152 object
"""
return ResNet(BottleNeck, [3, 8, 36, 3], num_classes) | 34.414634 | 118 | 0.633239 |
4a202c19b784d993cdc0a2320fe78e6a029e628c | 34,014 | py | Python | envEditorModel.py | Mr-Second/EnvEditor | f24ddf2f8922e1e8f5770a407cf4a810f43bae95 | [
"MIT"
] | 11 | 2020-12-23T12:15:47.000Z | 2021-03-06T08:01:59.000Z | envEditorModel.py | Mr-Second/EnvEditor | f24ddf2f8922e1e8f5770a407cf4a810f43bae95 | [
"MIT"
] | null | null | null | envEditorModel.py | Mr-Second/EnvEditor | f24ddf2f8922e1e8f5770a407cf4a810f43bae95 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'EnvEditor.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import icons_rc
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("myappid")
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(799, 595)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/imgs/icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame = QtWidgets.QFrame(Form)
self.frame.setStyleSheet("* {\n"
" font: 8pt \"Source Code Pro for Powerline\";\n"
"}\n"
"#frame {\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:1, x2:1, y2:0, stop:0 rgba("
"91, 77, 163, 255), stop:1 rgba(112, 53, 160, 255));\n "
" border: 1px solid black;\n"
" border-radius: 5px;\n"
"}\n"
"\n"
"#closeBtn {\n"
" border-radius: 10px;\n"
" background-color: rgba(255, 0, 0, 255);\n"
"}\n"
"\n"
"#closeBtn:hover {\n"
" background-color: rgba(255, 0, 0, 150);\n"
"}\n"
"\n"
"#closeBtn:pressed {\n"
" background-color: rgba(255, 0, 0, 80);\n"
"}\n"
"\n"
"#maxBtn{\n"
" border-radius: 10px;\n"
" background-color: rgba(231, 231, 115, 255);\n"
"}\n"
"\n"
"#maxBtn:hover{\n"
" background-color: rgba(231, 231, 115, 150);\n"
"}\n"
"\n"
"#maxBtn:pressed{\n"
" background-color: rgba(231, 231, 115, 80);\n"
"}\n"
"\n"
"#minBtn {\n"
" border-radius: 10px;\n"
" background-color: rgba(77, 167, 219, 255);\n"
"}\n"
"\n"
"#minBtn:hover {\n"
" background-color: rgba(77, 167, 219, 150);\n"
"}\n"
"\n"
"#minBtn:pressed {\n"
" background-color: rgba(77, 167, 219, 80);\n"
"}\n"
"\n"
"QTreeWidget {\n"
" background-color: rgba(255, 255, 255, 0);\n"
"}\n"
"QTreeView::branch:has-children:!has-siblings:closed,"
"QTreeView::branch:closed:has-children:has-siblings{\n "
" border-image: none;\n"
" image: url(:/imgs/plus.png);\n"
"}\n"
"\n"
"QTreeView::branch:open:has-children:!has-siblings,QTreeView::branch:open:has-children:has-siblings{\n"
" border-image: none;\n"
" image: url(:/imgs/minus.png);\n"
"}\n"
"\n"
"#expandBtn {\n"
" border-radius: 20px;\n"
" border-width: 6px;\n"
" border-image: url(:/imgs/expand.png);\n"
" background-color: rgba(170, 85, 255, 50);\n"
"}\n"
"#expandBtn:hover {\n"
" background-color: rgba(170, 85, 255, 100);\n"
" border-image: url(:/imgs/expand_hover.png);\n"
"}\n"
"\n"
"#expandBtn:pressed {\n"
" background-color: rgba(170, 85, 255, 200);\n"
" border-image: url(:/imgs/expand_pressed.png);\n"
"}\n"
"\n"
"#narrowBtn {\n"
" border-radius: 20px;\n"
" border-width: 6px;\n"
" border-image: url(:/imgs/narrow.png);\n"
" background-color: rgba(170, 85, 255, 50);\n"
"}\n"
"\n"
"#narrowBtn:hover {\n"
" border-image: url(:/imgs/narrow_hover.png);\n"
" background-color: rgba(170, 85, 255, 100);\n"
"}\n"
"\n"
"#narrowBtn:pressed {\n"
" border-image: url(:/imgs/narrow_pressed.png);\n"
" background-color: rgba(170, 85, 255, 200);\n"
"}\n"
"\n"
"\n"
"QTabBar::tab {\n"
" height:20px;\n"
" border: 1px solid black;\n"
" background-color: rgba(255, 255, 255, 0);\n"
" padding: 2px;\n"
"}\n"
"\n"
"QTabBar::tab:selected {\n"
" background-color: rgba(255, 255, 255, 200);\n"
" \n"
"}\n"
"QTabBar::tab:!selected{\n"
" background-color: rgba(255, 255, 255, 100); \n"
"}\n"
"\n"
"QTabWidget::pane {\n"
" border: 1px solid black;\n"
" border-bottom-left-radius: 5px;\n"
" border-bottom-right-radius: 5px;\n"
" background-color: rgba(255, 255, 255, 0);\n"
"}\n"
"\n"
"#detailsTab {\n"
" border: none; \n"
" gridline-color:rgb(207, 207, 207);\n"
" font: 10pt \"华文楷体\";\n"
" color: rgb(216, 216, 216);\n"
"}\n"
"\n"
"#hintTab {\n"
" border: none;\n"
" color: rgb(216, 216, 216);\n"
"}\n"
"\n"
"#hintTab::item {\n"
" border-bottom: 1px solid rgba(207, 207, 207, 100)\n"
"}\n"
"\n"
"#machineLabel {\n"
" color: rgb(216, 216, 216);\n"
" font-size: 12px;\n"
"}\n"
"\n"
"#userLabel {\n"
" color: rgb(216, 216, 216);\n"
" font-size: 12px;\n"
"}\n"
"\n"
"#computerNameLabel {\n"
" color: rgb(216, 216, 216);\n"
"}\n"
"\n"
"#userNameLabel {\n"
" color: rgb(216, 216, 216);\n"
"}\n"
"\n"
"#addBtn {\n"
" border-image: url(:/imgs/add.png);\n"
" background-color: rgba(255, 255, 255, 0);\n"
"}\n"
"#addBtn:hover {\n"
" border-image: url(:/imgs/add_hover.png);\n"
"}\n"
"#addBtn:pressed {\n"
" border-image: url(:/imgs/add pressed.png);\n"
"}\n"
"#addBtn:disabled {\n"
" border-image: url(:/imgs/add_disable.png);\n"
"}\n"
"\n"
"#delBtn {\n"
" border-image: url(:/imgs/delete.png);\n"
" background-color: rgba(255, 255, 255, 0);\n"
"}\n"
"\n"
"#delBtn:hover {\n"
" border-image: url(:/imgs/delete_hover.png);\n"
"}\n"
"#delBtn:pressed {\n"
" border-image: url(:/imgs/delete_pressed.png);\n"
"}\n"
"#delBtn:disabled {\n"
" border-image: url(:/imgs/delete_disabled.png);\n"
"}\n"
"\n"
"\n"
"#modifyBtn {\n"
" border-image: url(:/imgs/edit.png);\n"
" background-color: rgba(255, 255, 255, 0);\n"
"}\n"
"#modifyBtn:hover {\n"
" border-image: url(:/imgs/edit_hover.png);\n"
"}\n"
"#modifyBtn:pressed {\n"
" border-image: url(:/imgs/edit_pressed.png);\n"
"}\n"
"#modifyBtn:disabled {\n"
" border-image: url(:/imgs/edit_disabled.png);\n"
"}\n"
"#exportBtn {\n"
" border-image: url(:/imgs/export.png);\n"
"}\n"
"#exportBtn:hover {\n"
" border-image: url(:/imgs/export_hover.png);\n"
"}\n"
"#exportBtn:pressed {\n"
" border-image: url(:/imgs/export_pressed.png);\n"
"}\n"
"#infoBtn {\n"
" border-image: url(:/imgs/info.png);\n"
"}\n"
"#infoBtn:hover {\n"
" border-image: url(:/imgs/info_hover.png);\n"
"}\n"
"#infoBtn:pressed {\n"
" border-image: url(:/imgs/info_pressed.png);\n"
"}")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setSpacing(2)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setSpacing(0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setContentsMargins(-1, -1, 2, 5)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.minBtn = QtWidgets.QPushButton(self.frame)
self.minBtn.setMinimumSize(QtCore.QSize(20, 20))
self.minBtn.setMaximumSize(QtCore.QSize(20, 20))
self.minBtn.setText("")
self.minBtn.setObjectName("minBtn")
self.horizontalLayout_2.addWidget(self.minBtn)
self.maxBtn = QtWidgets.QPushButton(self.frame)
self.maxBtn.setMinimumSize(QtCore.QSize(20, 20))
self.maxBtn.setMaximumSize(QtCore.QSize(20, 20))
self.maxBtn.setText("")
self.maxBtn.setObjectName("maxBtn")
self.horizontalLayout_2.addWidget(self.maxBtn)
self.closeBtn = QtWidgets.QPushButton(self.frame)
self.closeBtn.setMinimumSize(QtCore.QSize(20, 20))
self.closeBtn.setMaximumSize(QtCore.QSize(20, 20))
self.closeBtn.setFocusPolicy(QtCore.Qt.ClickFocus)
self.closeBtn.setText("")
self.closeBtn.setObjectName("closeBtn")
self.horizontalLayout_2.addWidget(self.closeBtn)
self.horizontalLayout_3.addLayout(self.horizontalLayout_2)
self.verticalLayout_4.addLayout(self.horizontalLayout_3)
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setSpacing(2)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.addBtn = QtWidgets.QPushButton(self.frame)
self.addBtn.setEnabled(False)
self.addBtn.setMinimumSize(QtCore.QSize(30, 30))
self.addBtn.setMaximumSize(QtCore.QSize(30, 30))
self.addBtn.setText("")
self.addBtn.setIconSize(QtCore.QSize(20, 20))
self.addBtn.setObjectName("addBtn")
self.horizontalLayout_7.addWidget(self.addBtn)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem2)
self.delBtn = QtWidgets.QPushButton(self.frame)
self.delBtn.setEnabled(False)
self.delBtn.setMinimumSize(QtCore.QSize(30, 30))
self.delBtn.setMaximumSize(QtCore.QSize(30, 30))
self.delBtn.setText("")
self.delBtn.setIconSize(QtCore.QSize(20, 20))
self.delBtn.setObjectName("delBtn")
self.horizontalLayout_7.addWidget(self.delBtn)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem3)
self.modifyBtn = QtWidgets.QPushButton(self.frame)
self.modifyBtn.setEnabled(False)
self.modifyBtn.setMinimumSize(QtCore.QSize(30, 30))
self.modifyBtn.setMaximumSize(QtCore.QSize(30, 30))
self.modifyBtn.setText("")
self.modifyBtn.setIconSize(QtCore.QSize(20, 20))
self.modifyBtn.setObjectName("modifyBtn")
self.horizontalLayout_7.addWidget(self.modifyBtn)
spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem4)
self.exportBtn = QtWidgets.QPushButton(self.frame)
self.exportBtn.setMinimumSize(QtCore.QSize(30, 30))
self.exportBtn.setMaximumSize(QtCore.QSize(30, 30))
self.exportBtn.setText("")
self.exportBtn.setObjectName("exportBtn")
self.horizontalLayout_7.addWidget(self.exportBtn)
spacerItem5 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem5)
self.infoBtn = QtWidgets.QPushButton(self.frame)
self.infoBtn.setMinimumSize(QtCore.QSize(33, 0))
self.infoBtn.setMaximumSize(QtCore.QSize(36, 30))
self.infoBtn.setText("")
self.infoBtn.setObjectName("infoBtn")
self.horizontalLayout_7.addWidget(self.infoBtn)
spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem6)
self.computerNameLabel = QtWidgets.QLabel(self.frame)
self.computerNameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.computerNameLabel.setObjectName("computerNameLabel")
self.horizontalLayout_7.addWidget(self.computerNameLabel)
self.computerBox = QtWidgets.QComboBox(self.frame)
self.computerBox.setObjectName("computerBox")
self.horizontalLayout_7.addWidget(self.computerBox)
self.userNameLabel = QtWidgets.QLabel(self.frame)
self.userNameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.userNameLabel.setObjectName("userNameLabel")
self.horizontalLayout_7.addWidget(self.userNameLabel)
self.userBox = QtWidgets.QComboBox(self.frame)
self.userBox.setObjectName("userBox")
self.horizontalLayout_7.addWidget(self.userBox)
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem7)
self.horizontalLayout_7.setStretch(1, 1)
self.horizontalLayout_7.setStretch(3, 1)
self.horizontalLayout_7.setStretch(5, 1)
self.horizontalLayout_7.setStretch(11, 1)
self.horizontalLayout_7.setStretch(12, 1)
self.horizontalLayout_7.setStretch(13, 1)
self.horizontalLayout_7.setStretch(14, 1)
self.horizontalLayout_7.setStretch(15, 1)
self.verticalLayout_4.addLayout(self.horizontalLayout_7)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setContentsMargins(-1, 10, -1, -1)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.machineLabel = QtWidgets.QLabel(self.frame)
self.machineLabel.setAlignment(QtCore.Qt.AlignCenter)
self.machineLabel.setObjectName("machineLabel")
self.verticalLayout.addWidget(self.machineLabel)
self.machineTree = QtWidgets.QTreeWidget(self.frame)
self.machineTree.setFocusPolicy(QtCore.Qt.ClickFocus)
self.machineTree.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerItem)
self.machineTree.setAnimated(True)
self.machineTree.setObjectName("machineTree")
self.machineTree.headerItem().setText(0, "Machine")
self.machineTree.headerItem().setTextAlignment(0, QtCore.Qt.AlignCenter)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/imgs/machine.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.machineTree.headerItem().setIcon(0, icon1)
self.machineTree.header().setVisible(True)
self.verticalLayout.addWidget(self.machineTree)
self.horizontalLayout_4.addLayout(self.verticalLayout)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
spacerItem8 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem8)
self.expandBtn = QtWidgets.QPushButton(self.frame)
self.expandBtn.setMinimumSize(QtCore.QSize(40, 40))
self.expandBtn.setText("")
self.expandBtn.setObjectName("expandBtn")
self.verticalLayout_3.addWidget(self.expandBtn)
spacerItem9 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem9)
self.narrowBtn = QtWidgets.QPushButton(self.frame)
self.narrowBtn.setMinimumSize(QtCore.QSize(40, 40))
self.narrowBtn.setText("")
self.narrowBtn.setObjectName("narrowBtn")
self.verticalLayout_3.addWidget(self.narrowBtn)
spacerItem10 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_3.addItem(spacerItem10)
self.verticalLayout_3.setStretch(0, 3)
self.verticalLayout_3.setStretch(1, 1)
self.verticalLayout_3.setStretch(2, 3)
self.verticalLayout_3.setStretch(3, 1)
self.verticalLayout_3.setStretch(4, 3)
self.horizontalLayout_4.addLayout(self.verticalLayout_3)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.userLabel = QtWidgets.QLabel(self.frame)
self.userLabel.setAlignment(QtCore.Qt.AlignCenter)
self.userLabel.setObjectName("userLabel")
self.verticalLayout_2.addWidget(self.userLabel)
self.userTree = QtWidgets.QTreeWidget(self.frame)
self.userTree.setHorizontalScrollMode(QtWidgets.QAbstractItemView.ScrollPerItem)
self.userTree.setAnimated(True)
self.userTree.setObjectName("userTree")
self.userTree.headerItem().setText(0, "User")
self.userTree.headerItem().setTextAlignment(0, QtCore.Qt.AlignCenter)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/imgs/user.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.userTree.headerItem().setIcon(0, icon2)
self.verticalLayout_2.addWidget(self.userTree)
self.horizontalLayout_4.addLayout(self.verticalLayout_2)
self.verticalLayout_4.addLayout(self.horizontalLayout_4)
self.tabWidget = QtWidgets.QTabWidget(self.frame)
self.tabWidget.setStyleSheet("background: rgba(255, 255, 255, 0);")
self.tabWidget.setObjectName("tabWidget")
self.Details = QtWidgets.QWidget()
self.Details.setObjectName("Details")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.Details)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setSpacing(1)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.detailsTab = QtWidgets.QTableWidget(self.Details)
self.detailsTab.setFocusPolicy(QtCore.Qt.NoFocus)
self.detailsTab.setAutoScroll(True)
self.detailsTab.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.detailsTab.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.detailsTab.setShowGrid(True)
self.detailsTab.setGridStyle(QtCore.Qt.SolidLine)
self.detailsTab.setWordWrap(True)
self.detailsTab.setCornerButtonEnabled(True)
self.detailsTab.setObjectName("detailsTab")
self.detailsTab.setColumnCount(2)
self.detailsTab.setRowCount(4)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.detailsTab.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.detailsTab.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.detailsTab.setItem(1, 0, item)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setItem(1, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.detailsTab.setItem(2, 0, item)
item = QtWidgets.QTableWidgetItem()
self.detailsTab.setItem(2, 1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.detailsTab.setItem(3, 0, item)
self.detailsTab.horizontalHeader().setVisible(False)
self.detailsTab.horizontalHeader().setCascadingSectionResizes(False)
self.detailsTab.horizontalHeader().setStretchLastSection(True)
self.detailsTab.verticalHeader().setVisible(False)
self.detailsTab.verticalHeader().setHighlightSections(False)
self.horizontalLayout_5.addWidget(self.detailsTab)
self.tabWidget.addTab(self.Details, "")
self.Hint = QtWidgets.QWidget()
self.Hint.setObjectName("Hint")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.Hint)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setSpacing(1)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.hintTab = QtWidgets.QListWidget(self.Hint)
self.hintTab.setFocusPolicy(QtCore.Qt.NoFocus)
self.hintTab.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.hintTab.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.hintTab.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.hintTab.setObjectName("hintTab")
item = QtWidgets.QListWidgetItem()
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/imgs/stringLabel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon3)
self.hintTab.addItem(item)
item = QtWidgets.QListWidgetItem()
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/imgs/expandStringLabel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon4)
self.hintTab.addItem(item)
item = QtWidgets.QListWidgetItem()
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/imgs/readOnlyStringLabel.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon5)
self.hintTab.addItem(item)
item = QtWidgets.QListWidgetItem()
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/imgs/error.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon6)
self.hintTab.addItem(item)
self.horizontalLayout_6.addWidget(self.hintTab)
self.tabWidget.addTab(self.Hint, "")
self.verticalLayout_4.addWidget(self.tabWidget)
self.verticalLayout_4.setStretch(1, 1)
self.verticalLayout_4.setStretch(2, 10)
self.verticalLayout_4.setStretch(3, 4)
self.horizontalLayout_8.addLayout(self.verticalLayout_4)
self.horizontalLayout.addWidget(self.frame)
self.retranslateUi(Form)
self.tabWidget.setCurrentIndex(0)
self.expandBtn.clicked.connect(self.userTree.expandAll)
self.narrowBtn.clicked.connect(self.machineTree.collapseAll)
self.narrowBtn.clicked.connect(self.userTree.collapseAll)
self.closeBtn.clicked.connect(Form.close)
self.minBtn.clicked.connect(Form.showMinimized)
self.expandBtn.clicked.connect(self.machineTree.expandAll)
self.maxBtn.clicked.connect(Form.setWindowsState)
self.machineTree.itemClicked['QTreeWidgetItem*', 'int'].connect(Form.receiveActivatedSignal)
self.userTree.itemClicked['QTreeWidgetItem*', 'int'].connect(Form.receiveActivatedSignal)
self.addBtn.clicked.connect(Form.createNewItem)
self.delBtn.clicked.connect(Form.deleteItem)
self.modifyBtn.clicked.connect(Form.modifyItem)
self.exportBtn.clicked.connect(Form.exportRegistryTable)
self.infoBtn.clicked.connect(Form.showInformation)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.addBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">Create</span> an environment variable</p></body></html>"))
self.delBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">Delete</span> the environment variable</p></body></html>"))
self.modifyBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">Edit</span> the environment variable</p></body></html>"))
self.exportBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">Export</span> the current environment variable</p></body></html>"))
self.infoBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">Get Information</span> of the software</p></body></html>"))
self.computerNameLabel.setText(_translate("Form", "ComputeName"))
self.userNameLabel.setText(_translate("Form", "User"))
self.machineLabel.setText(_translate("Form", "Machine Environment Variables"))
self.expandBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">expand</span> all environment variables</p></body></html>"))
self.narrowBtn.setToolTip(_translate("Form",
"<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">fold</span> all environment variables</p></body></html>"))
self.userLabel.setText(_translate("Form", "User Environment Variables"))
__sortingEnabled = self.detailsTab.isSortingEnabled()
self.detailsTab.setSortingEnabled(False)
item = self.detailsTab.item(0, 0)
item.setText(_translate("Form", "变量名"))
item = self.detailsTab.item(1, 0)
item.setText(_translate("Form", "类型"))
item = self.detailsTab.item(2, 0)
item.setText(_translate("Form", "源值"))
item = self.detailsTab.item(3, 0)
item.setText(_translate("Form", "转义值"))
self.detailsTab.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Details), _translate("Form", "Details"))
__sortingEnabled = self.hintTab.isSortingEnabled()
self.hintTab.setSortingEnabled(False)
item = self.hintTab.item(0)
item.setText(_translate("Form", "env variable is a text-string"))
item = self.hintTab.item(1)
item.setText(_translate("Form", "env variable is a compressed string"))
item = self.hintTab.item(2)
item.setText(_translate("Form", "env variable is a read-only string"))
item = self.hintTab.item(3)
item.setText(_translate("Form", "there is something wrong in env variable\'s value "))
self.hintTab.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.Hint), _translate("Form", "Hint"))
| 58.343053 | 198 | 0.525254 |
4a202c8035f98d98cd2ab1f1b73aa7a42cbd1f17 | 256 | py | Python | Algorithms/856/best.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/856/best.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/856/best.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | class Solution:
def scoreOfParentheses(self, s: str) -> int:
opt, d = 0, 0
for i, v in enumerate(s):
d += 1 if v == '(' else -1
if v == ')' and s[i-1:i+1] == '()':
opt += 1 << d
return opt | 32 | 48 | 0.402344 |
4a202f9e9fd8d09b46a5f62aff0b77f0dc581e10 | 2,364 | py | Python | data/cirq_new/cirq_program/startCirq_pragma55.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma55.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/cirq_new/cirq_program/startCirq_pragma55.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.rx(1.6147786239451536).on(input_qubit[3])) # number=5
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=8
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma55.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 30.701299 | 92 | 0.640863 |
4a202fc077193b4c50f9953dcea0180cfa1015cc | 1,394 | py | Python | pdf_bsw_gui/tests/test_plots.py | NSLS-II-PDF/pdf-bsw-gui | 6db847986d9bad6c59bdf3bca3b559959019ff46 | [
"BSD-3-Clause"
] | 3 | 2021-05-27T06:23:48.000Z | 2021-08-10T18:30:44.000Z | pdf_bsw_gui/tests/test_plots.py | NSLS-II-PDF/pdf-bsw-gui | 6db847986d9bad6c59bdf3bca3b559959019ff46 | [
"BSD-3-Clause"
] | 7 | 2021-05-06T03:15:12.000Z | 2021-07-19T16:09:07.000Z | pdf_bsw_gui/tests/test_plots.py | NSLS-II-PDF/pdf-bsw-gui | 6db847986d9bad6c59bdf3bca3b559959019ff46 | [
"BSD-3-Clause"
] | 3 | 2021-05-05T19:04:48.000Z | 2021-06-30T17:06:33.000Z | import os
import pytest
import tempfile
from pathlib import Path
from databroker._drivers.jsonl import BlueskyJSONLCatalog
from databroker.core import BlueskyRunFromGenerator
from bluesky_widgets.utils.streaming import stream_documents_into_runs
from bluesky_live.run_builder import RunBuilder
from ..kafka_previews import export_thumbnails_when_complete
@pytest.fixture(scope='module')
def catalog():
catalog = BlueskyJSONLCatalog(
f"{Path(__file__).parent.resolve()}/*.jsonl",
name='bmm')
return catalog
def test_from_config(catalog):
assert len(catalog)
@pytest.mark.parametrize(
"uid,titles",
[
("1dccff46-2576-4da2-8971-4de1ee4e98b7", ["rel_scan linescan xafs_y It: It_div_I0"]),
("d748dbdc-cec4-4211-b626-801f1799cb56", ["rel_scan linescan xafs_pitch It: It_div_I0"]),
("ac694ff6-2444-49af-8898-bfa23d99c28c", ["scan_nd xafs transmission"]),
]
)
def test_plots(catalog, uid, titles):
plotter = stream_documents_into_runs(export_thumbnails_when_complete)
for name, doc in catalog[uid].canonical(fill='no'):
plotter(name, doc)
for title in titles:
plot_file = os.path.join(tempfile.gettempdir(),"ariadne",
uid, f"{title}.png")
assert Path(plot_file).exists()
# Might need to remove the plots after the test.
# os.remove(plot_file)
| 30.304348 | 97 | 0.705882 |
4a20301b3b9eb5f5211c10ad9ba42bd662953343 | 981 | py | Python | src/sovereign/utils/templates.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | 1 | 2020-07-08T19:37:09.000Z | 2020-07-08T19:37:09.000Z | src/sovereign/utils/templates.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | null | null | null | src/sovereign/utils/templates.py | bochuxt/envoy-control-plane-python3 | 6d63ad6e1ecff5365bb571f0021951b066f8e270 | [
"Apache-2.0"
] | null | null | null | from socket import gethostbyname_ex
from socket import gaierror as dns_error
from starlette.exceptions import HTTPException
from sovereign import config
from sovereign.decorators import memoize
from sovereign.statistics import stats
@memoize(5)
def resolve(address):
try:
with stats.timed('dns.resolve_ms', tags=[f'address:{address}']):
_, _, addresses = gethostbyname_ex(address)
except dns_error:
raise HTTPException(
status_code=500,
detail=f'Failed to resolve DNS hostname: {address}'
)
else:
return addresses
def healthchecks_enabled(healthchecks):
for healthcheck in healthchecks:
if healthcheck.get('path') in ('no', False):
return False
return True
def upstream_requires_tls(cluster):
for host in cluster.get('hosts', []):
if '443' in str(host.get('port')):
return True
return False
def list_regions():
return config.regions
| 25.153846 | 72 | 0.674822 |
4a2031771e441507455afdee349209c8892582a9 | 3,093 | py | Python | misc/loss.py | debayan/dsve-loc | 21b1e1837668b6daa0881514d0756e9bec039fcb | [
"BSD-3-Clause-Clear"
] | 56 | 2018-12-13T20:41:30.000Z | 2022-01-03T12:12:27.000Z | misc/loss.py | debayan/dsve-loc | 21b1e1837668b6daa0881514d0756e9bec039fcb | [
"BSD-3-Clause-Clear"
] | 17 | 2018-12-18T10:55:59.000Z | 2021-03-08T09:54:32.000Z | misc/loss.py | debayan/dsve-loc | 21b1e1837668b6daa0881514d0756e9bec039fcb | [
"BSD-3-Clause-Clear"
] | 22 | 2018-12-14T04:13:58.000Z | 2022-01-21T03:44:47.000Z | """
****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ******************
Copyright (c) 2018 [Thomson Licensing]
All Rights Reserved
This program contains proprietary information which is a trade secret/business \
secret of [Thomson Licensing] and is protected, even if unpublished, under \
applicable Copyright laws (including French droit d'auteur) and/or may be \
subject to one or more patent(s).
Recipient is to retain this program in confidence and is not permitted to use \
or make copies thereof other than as permitted in a written agreement with \
[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \
by [Thomson Licensing] under express agreement.
Thomson Licensing is a company of the group TECHNICOLOR
*******************************************************************************
This scripts permits one to reproduce training and experiments of:
Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April).
Finding beans in burgers: Deep semantic-visual embedding with localization.
In Proceedings of CVPR (pp. 3984-3993)
Author: Martin Engilberge
"""
import torch.nn as nn
import torch
class ContrastiveLoss(nn.Module):
def __init__(self, margin=0.2):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
cost_s = torch.clamp((self.margin - diag).expand_as(scores) + scores, min=0)
# compare every diagonal score to scores in its row (i.e, all
# contrastive sentences for each image)
cost_im = torch.clamp((self.margin - diag.view(-1, 1)).expand_as(scores) + scores, min=0)
# clear diagonals
diag_s = torch.diag(cost_s.diag())
diag_im = torch.diag(cost_im.diag())
cost_s = cost_s - diag_s
cost_im = cost_im - diag_im
return cost_s.sum() + cost_im.sum()
class HardNegativeContrastiveLoss(nn.Module):
def __init__(self, nmax=1, margin=0.2):
super(HardNegativeContrastiveLoss, self).__init__()
self.margin = margin
self.nmax = nmax
def forward(self, imgs, caps):
scores = torch.mm(imgs, caps.t())
diag = scores.diag()
# Reducing the score on diagonal so there are not selected as hard negative
scores = (scores - 2 * torch.diag(scores.diag()))
sorted_cap, _ = torch.sort(scores, 0, descending=True)
sorted_img, _ = torch.sort(scores, 1, descending=True)
# Selecting the nmax hardest negative examples
max_c = sorted_cap[:self.nmax, :]
max_i = sorted_img[:, :self.nmax]
# Margin based loss with hard negative instead of random negative
neg_cap = torch.sum(torch.clamp(max_c + (self.margin - diag).view(1, -1).expand_as(max_c), min=0))
neg_img = torch.sum(torch.clamp(max_i + (self.margin - diag).view(-1, 1).expand_as(max_i), min=0))
loss = neg_cap + neg_img
return loss
| 39.653846 | 107 | 0.638215 |
4a2032b065147e1ac68f7839d5afcb48b163174b | 842 | py | Python | list-functions.py | CODEVELOPER1/PYTHON-FOLDER | 721870ad96b020fb02177a01fdd04ac730105995 | [
"MIT"
] | null | null | null | list-functions.py | CODEVELOPER1/PYTHON-FOLDER | 721870ad96b020fb02177a01fdd04ac730105995 | [
"MIT"
] | null | null | null | list-functions.py | CODEVELOPER1/PYTHON-FOLDER | 721870ad96b020fb02177a01fdd04ac730105995 | [
"MIT"
] | null | null | null | lucky_numbers = [4, 9, 15, 6, 7, 25, 16]
friends = ['Sam', 'Josh', 'Samantha', 'Jimmy', 'Jimmy', 'GodtheSon']
friends.extend(lucky_numbers) # take a list and append another list (item) on the other end of it
#friends.append('Creed') #adds element into the list
#friends.insert(1, 'James') # takes 2 paramenters 1st = index
#friends .remove("Josh") # removes element from the list
#friends.clear()# empties list
#friends.pop() #pops item off the list, removes last element in the list
#freinds.index("Josh") #tells me the index of the value in the list, tells me if a specifi element is in the list
#print(friends.count("Jimmy")) # counts the similair elements in the list,
#friends.sort()# sorts list in asc order
#friends2 = friends.copy() #creates another list and make it a copy
#friends.reverse() # reverse order of list
print(friends)
| 52.625 | 113 | 0.724466 |
4a2032d10fd34998aaedcc809e973a52f0e78695 | 2,328 | py | Python | misc/make_changelog.py | gaoxinge/taichi | 86d403f071b8505858763d4712b37cd71b89db91 | [
"MIT"
] | 1 | 2020-11-10T07:17:01.000Z | 2020-11-10T07:17:01.000Z | misc/make_changelog.py | gaoxinge/taichi | 86d403f071b8505858763d4712b37cd71b89db91 | [
"MIT"
] | 1 | 2020-08-24T05:18:43.000Z | 2020-08-24T05:18:43.000Z | misc/make_changelog.py | gaoxinge/taichi | 86d403f071b8505858763d4712b37cd71b89db91 | [
"MIT"
] | null | null | null | # Usage: make_changelog.py [v0.x.y]
import json
import os
import sys
from git import Repo
def load_pr_tags():
this_dir = os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(this_dir, 'prtags.json')
details = {}
with open(json_path) as f:
details = json.load(f)
details['release'] = ''
return details
def main(ver=None, repo_dir='.'):
g = Repo(repo_dir)
commits_with_tags = set([tag.commit for tag in g.tags])
commits = list(g.iter_commits(ver, max_count=200))
begin, end = -1, 0
def format(c):
return f'{c.summary} (by **{c.author}**)'
notable_changes = {}
all_changes = []
details = load_pr_tags()
for i, c in enumerate(commits):
s = format(c)
if c in commits_with_tags and i > 0:
break
tags = []
while s[0] == '[':
r = s.find(']')
tag = s[1:r]
tags.append(tag)
s = s[r + 1:]
s = s.strip()
for tag in tags:
if tag.lower() in details:
if details[tag.lower()] == '':
# E.g. 'release' does not need to appear in the change log
continue
if tag[0].isupper():
tag = tag.lower()
if tag not in notable_changes:
notable_changes[tag] = []
notable_changes[tag].append(s)
else:
print(
f'** Warning: tag {tag.lower()} undefined in the "details" dict. Please include the tag into "details", unless the tag is a typo.'
)
all_changes.append(format(c))
res = 'Highlights:\n'
for tag in sorted(notable_changes.keys()):
res += f' - **{details[tag]}**\n'
for item in notable_changes[tag]:
res += f' - {item}\n'
res += '\nFull changelog:\n'
for c in all_changes:
res += f' - {c}\n'
return res
if __name__ == '__main__':
ver = sys.argv[1] if len(sys.argv) > 1 else None
repo = sys.argv[2] if len(sys.argv) > 2 else '.'
save = sys.argv[3] if len(sys.argv) > 3 else False
res = main(ver, repo)
if save:
with open('./python/taichi/CHANGELOG.md', 'w') as f:
f.write(res)
print(res)
| 27.069767 | 150 | 0.514175 |
4a20339dc9db8a26b56ea429636417c42fcb4e40 | 3,350 | py | Python | deep_learning_v2_pytorch/intro-to-pytorch/fc_model.py | TeoZosa/deep-learning-v2-pytorch | 8e73c26f2ebf49769b798e9ff26bd90d7de69f7d | [
"Apache-2.0"
] | null | null | null | deep_learning_v2_pytorch/intro-to-pytorch/fc_model.py | TeoZosa/deep-learning-v2-pytorch | 8e73c26f2ebf49769b798e9ff26bd90d7de69f7d | [
"Apache-2.0"
] | 159 | 2021-05-07T21:34:19.000Z | 2022-03-28T13:33:29.000Z | deep_learning_v2_pytorch/intro-to-pytorch/fc_model.py | TeoZosa/deep-learning-v2-pytorch | 8e73c26f2ebf49769b798e9ff26bd90d7de69f7d | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn.functional as F
from torch import nn
class Network(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p=0.5):
"""Builds a feedforward network with arbitrary hidden layers.
Arguments
---------
input_size: integer, size of the input layer
output_size: integer, size of the output layer
hidden_layers: list of integers, the sizes of the hidden layers
"""
super().__init__()
# Input to a hidden layer
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
# Add a variable number of more hidden layers
layer_sizes = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layer_sizes])
self.output = nn.Linear(hidden_layers[-1], output_size)
self.dropout = nn.Dropout(p=drop_p)
def forward(self, x):
"""Forward pass through the network, returns the output logits"""
for each in self.hidden_layers:
x = F.relu(each(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
def validation(model, testloader, criterion):
accuracy = 0
test_loss = 0
for images, labels in testloader:
images = images.resize_(images.size()[0], 784)
output = model.forward(images)
test_loss += criterion(output, labels).item()
## Calculating the accuracy
# Model's output is log-softmax, take exponential to get the probabilities
ps = torch.exp(output)
# Class with highest probability is our predicted class, compare with true label
equality = labels.data == ps.max(1)[1]
# Accuracy is number of correct predictions divided by all predictions, just take the mean
accuracy += equality.type_as(torch.FloatTensor()).mean()
return test_loss, accuracy
def train(
model, trainloader, testloader, criterion, optimizer, epochs=5, print_every=40
):
steps = 0
running_loss = 0
for e in range(epochs):
# Model in training mode, dropout is on
model.train()
for images, labels in trainloader:
steps += 1
# Flatten images into a 784 long vector
images.resize_(images.size()[0], 784)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
# Model in inference mode, dropout is off
model.eval()
# Turn off gradients for validation, will speed up inference
with torch.no_grad():
test_loss, accuracy = validation(model, testloader, criterion)
print(
f"Epoch: {e + 1}/{epochs}.. ",
f"Training Loss: {running_loss / print_every:.3f}.. ",
f"Test Loss: {test_loss / len(testloader):.3f}.. ",
f"Test Accuracy: {accuracy / len(testloader):.3f}",
)
running_loss = 0
# Make sure dropout and grads are on for training
model.train()
| 32.211538 | 98 | 0.591343 |
4a2033c915fe426ab428e0ed38c6f9f1c782a81b | 3,436 | py | Python | airflow/contrib/operators/spark_sql_operator.py | vineet-rh/incubator-airflow | daa326cb4dc5e367182f344a957b979952731c73 | [
"Apache-2.0"
] | null | null | null | airflow/contrib/operators/spark_sql_operator.py | vineet-rh/incubator-airflow | daa326cb4dc5e367182f344a957b979952731c73 | [
"Apache-2.0"
] | 1 | 2016-10-19T21:49:51.000Z | 2016-10-19T21:49:51.000Z | airflow/contrib/operators/spark_sql_operator.py | isabella232/airflow | 99e7d8c61b25aeaf8a507cff0af10dc158159a4d | [
"Apache-2.0"
] | 1 | 2020-11-12T10:23:13.000Z | 2020-11-12T10:23:13.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.spark_sql_hook import SparkSqlHook
class SparkSqlOperator(BaseOperator):
"""
Execute Spark SQL query
:param sql: The SQL query to execute
:type sql: str
:param conf: arbitrary Spark configuration property
:type conf: str (format: PROP=VALUE)
:param conn_id: connection_id string
:type conn_id: str
:param executor_cores: Number of cores per executor
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param master: spark://host:port, mesos://host:port, yarn, or local
:type master: str
:param name: Name of the job
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-sql
:type verbose: bool
:param yarn_queue: The YARN queue to submit to (Default: "default")
:type yarn_queue: str
"""
@apply_defaults
def __init__(self,
sql,
conf=None,
conn_id='spark_sql_default',
executor_cores=None,
executor_memory=None,
keytab=None,
master='yarn',
name='default-name',
num_executors=None,
yarn_queue='default',
*args,
**kwargs):
super(SparkSqlOperator, self).__init__(*args, **kwargs)
self._sql = sql
self._conf = conf
self._conn_id = conn_id
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._master = master
self._name = name
self._num_executors = num_executors
self._yarn_queue = yarn_queue
self._hook = None
def execute(self, context):
"""
Call the SparkSqlHook to run the provided sql query
"""
self._hook = SparkSqlHook(sql=self._sql,
conf=self._conf,
conn_id=self._conn_id,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
name=self._name,
num_executors=self._num_executors,
master=self._master,
yarn_queue=self._yarn_queue
)
self._hook.run_query()
def on_kill(self):
self._hook.kill()
| 37.347826 | 78 | 0.597497 |
4a2033dda453746383a4afa22125654fac1d8b11 | 501 | py | Python | 17/ex17.py | Yaro812/LearnPythonTheHardWay | 123c609edc96ae3253e177d82d4e04e3165d72f9 | [
"MIT"
] | null | null | null | 17/ex17.py | Yaro812/LearnPythonTheHardWay | 123c609edc96ae3253e177d82d4e04e3165d72f9 | [
"MIT"
] | null | null | null | 17/ex17.py | Yaro812/LearnPythonTheHardWay | 123c609edc96ae3253e177d82d4e04e3165d72f9 | [
"MIT"
] | null | null | null | from sys import argv
from os.path import exists
script, from_file, to_file = argv
print(f"Copying from {from_file} to {to_file}")
# could do with one line
in_file = open(from_file)
indata = in_file.read()
print(f"The input file is {len(indata)} bytes long")
print(f"Does the output file exist? {exists(to_file)}")
print("Ready, hit RETURN to continue, CTRL-C to abort.")
input()
out_file = open(to_file, 'w')
out_file.write(indata)
print("Alright, all done.")
out_file.close()
in_file.close()
| 20.04 | 56 | 0.724551 |
4a2033e6d08d444607a7967b8dadef718b5ad9ca | 2,105 | py | Python | pysper/commands/ttop.py | arvy/sperf | c047ae5f3b1daf70cc227784197e4ef37caaf556 | [
"Apache-2.0"
] | null | null | null | pysper/commands/ttop.py | arvy/sperf | c047ae5f3b1daf70cc227784197e4ef37caaf556 | [
"Apache-2.0"
] | null | null | null | pysper/commands/ttop.py | arvy/sperf | c047ae5f3b1daf70cc227784197e4ef37caaf556 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ttop command wiring"""
from pysper.commands import flags
from pysper.ttop import TTopAnalyzer
def build(subparsers):
"""adds the flags for ttop"""
ttop_parser = subparsers.add_parser('ttop',
help='Analyze ttop files',
formatter_class=flags.LineWrapRawTextHelpFormatter)
ttop_parser.add_argument("files", help="ttop file to generate report on", nargs='+')
ttop_parser.add_argument('-a', '--alloc', action='store_true', dest='alloc', default=False,
help='show allocation instead of cpu')
ttop_parser.add_argument('-c', '--collate', action='store_false', dest='collate', default=True,
help="don't collate threads (default: true)")
ttop_parser.add_argument('-k', '--top_k', type=int, nargs='?', const=None, default=None,
help="number of top threads to show (default all)")
ttop_parser.add_argument('-st', '--start', type=str, nargs='?', const=None, default=None,
help="start date/time to begin parsing")
ttop_parser.add_argument('-et', '--end', type=str, nargs='?', const=None, default=None,
help="end date/time to stop parsing")
ttop_parser.set_defaults(func=run)
def run(args):
"""run the ttop analyzer"""
analyzer = TTopAnalyzer(args.files)
analyzer.print_report(top=args.top_k, alloc=args.alloc, collate=args.collate, start=args.start, end=args.end)
| 51.341463 | 113 | 0.653207 |
4a20345f083f4a36beaf6a92a946f7853200a38f | 9,132 | py | Python | frameworks/hdfs/tests/test_ssl_kerberos_auth.py | jitupawar22/dcos-commons | 945fdce7591fd1e983eb9c9121e820d4cabe44e1 | [
"Apache-2.0"
] | 7 | 2017-11-02T05:26:40.000Z | 2020-01-27T19:33:52.000Z | frameworks/hdfs/tests/test_ssl_kerberos_auth.py | jitupawar22/dcos-commons | 945fdce7591fd1e983eb9c9121e820d4cabe44e1 | [
"Apache-2.0"
] | 14 | 2017-09-20T22:47:48.000Z | 2020-09-11T19:54:25.000Z | frameworks/hdfs/tests/test_ssl_kerberos_auth.py | AlexRogalskiy/dcos-commons | 85711f05bc94172aabb6837f9ff529721437d20c | [
"Apache-2.0"
] | 9 | 2017-11-14T19:43:07.000Z | 2022-01-06T12:44:49.000Z | import logging
import uuid
import pytest
import sdk_auth
import sdk_cmd
import sdk_hosts
import sdk_install
import sdk_marathon
import sdk_security
import sdk_utils
from security import kerberos as krb5
from security import transport_encryption
from tests import auth
from tests import config
log = logging.getLogger(__name__)
pytestmark = pytest.mark.skipif(sdk_utils.is_open_dcos(),
reason='Feature only supported in DC/OS EE')
@pytest.fixture(scope='module', autouse=True)
def service_account(configure_security):
"""
Creates service account and yields the name.
"""
try:
name = config.SERVICE_NAME
sdk_security.create_service_account(
service_account_name=name, service_account_secret=name)
# TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475
sdk_cmd.run_cli(
"security org groups add_user superusers {name}".format(name=name))
yield name
finally:
sdk_security.delete_service_account(
service_account_name=name, service_account_secret=name)
@pytest.fixture(scope='module', autouse=True)
def kerberos(configure_security):
try:
principals = auth.get_service_principals(config.SERVICE_NAME, sdk_auth.REALM)
kerberos_env = sdk_auth.KerberosEnvironment()
kerberos_env.add_principals(principals)
kerberos_env.finalize()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope='module', autouse=True)
def hdfs_server(kerberos, service_account):
"""
A pytest fixture that installs a Kerberized HDFS service.
On teardown, the service is uninstalled.
"""
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"service_account": service_account,
"service_account_secret": service_account,
"security": {
"kerberos": {
"enabled": True,
"kdc": {
"hostname": kerberos.get_host(),
"port": int(kerberos.get_port())
},
"realm": kerberos.get_realm(),
"keytab_secret": kerberos.get_keytab_path(),
},
"transport_encryption": {
"enabled": True
}
}
},
"hdfs": {
"security_auth_to_local": auth.get_principal_to_user_mapping()
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_TASK_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope='module', autouse=True)
def hdfs_client(kerberos, hdfs_server):
try:
client_id = "hdfs-client"
client = {
"id": client_id,
"mem": 1024,
"user": "nobody",
"container": {
"type": "MESOS",
"docker": {
"image": "elezar/hdfs-client:dev",
"forcePullImage": True
},
"volumes": [
{
"containerPath": "/hadoop-2.6.0-cdh5.9.1/hdfs.keytab",
"secret": "hdfs_keytab"
}
]
},
"secrets": {
"hdfs_keytab": {
"source": kerberos.get_keytab_path()
}
},
"networks": [
{
"mode": "host"
}
],
"env": {
"REALM": kerberos.get_realm(),
"KDC_ADDRESS": kerberos.get_kdc_address(),
"JAVA_HOME": "/usr/lib/jvm/default-java",
"KRB5_CONFIG": "/etc/krb5.conf",
"HDFS_SERVICE_NAME": config.SERVICE_NAME,
}
}
sdk_marathon.install_app(client)
krb5.write_krb5_config_file(client_id, "/etc/krb5.conf", kerberos)
dcos_ca_bundle = transport_encryption.fetch_dcos_ca_bundle(client_id)
yield {**client, **{"dcos_ca_bundle": dcos_ca_bundle}}
finally:
sdk_marathon.destroy_app(client_id)
# TODO(elezar) Is there a better way to determine this?
DEFAULT_JOURNAL_NODE_TLS_PORT = 8481
DEFAULT_NAME_NODE_TLS_PORT = 9003
DEFAULT_DATA_NODE_TLS_PORT = 9006
@pytest.mark.tls
@pytest.mark.sanity
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.parametrize("node_type,port", [
('journal', DEFAULT_JOURNAL_NODE_TLS_PORT),
('name', DEFAULT_NAME_NODE_TLS_PORT),
('data', DEFAULT_DATA_NODE_TLS_PORT),
])
def test_verify_https_ports(hdfs_client, node_type, port):
"""
Verify that HTTPS port is open name, journal and data node types.
"""
task_id = "{}-0-node".format(node_type)
host = sdk_hosts.autoip_host(
config.SERVICE_NAME, task_id, port)
cmd = ["curl", "-v",
"--cacert", hdfs_client["dcos_ca_bundle"],
"https://{host}".format(host=host), ]
rc, stdout, stderr = sdk_cmd.task_exec(hdfs_client["id"], " ".join(cmd))
assert not rc
assert "SSL connection using TLS1.2 / ECDHE_RSA_AES_128_GCM_SHA256" in stderr
assert "server certificate verification OK" in stderr
assert "common name: {}.{} (matched)".format(task_id, config.SERVICE_NAME) in stderr
# In the Kerberos case we expect a 401 error
assert "401 Authentication required" in stdout
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.auth
@pytest.mark.sanity
def test_user_can_auth_and_write_and_read(hdfs_client, kerberos):
sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("hdfs"))
test_filename = "test_auth_write_read-{}".format(str(uuid.uuid4()))
write_cmd = "/bin/bash -c '{}'".format(config.hdfs_write_command(config.TEST_CONTENT_SMALL, test_filename))
sdk_cmd.task_exec(hdfs_client["id"], write_cmd)
read_cmd = "/bin/bash -c '{}'".format(config.hdfs_read_command(test_filename))
_, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], read_cmd)
assert stdout == config.TEST_CONTENT_SMALL
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.auth
@pytest.mark.sanity
def test_users_have_appropriate_permissions(hdfs_client, kerberos):
# "hdfs" is a superuser
sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("hdfs"))
log.info("Creating directory for alice")
make_user_directory_cmd = config.hdfs_command("mkdir -p /users/alice")
sdk_cmd.task_exec(hdfs_client["id"], make_user_directory_cmd)
change_ownership_cmd = config.hdfs_command("chown alice:users /users/alice")
sdk_cmd.task_exec(hdfs_client["id"], change_ownership_cmd)
change_permissions_cmd = config.hdfs_command("chmod 700 /users/alice")
sdk_cmd.task_exec(hdfs_client["id"], change_permissions_cmd)
test_filename = "test_user_permissions-{}".format(str(uuid.uuid4()))
# alice has read/write access to her directory
sdk_auth.kdestroy(hdfs_client["id"])
sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("alice"))
write_access_cmd = "/bin/bash -c \"{}\"".format(config.hdfs_write_command(
config.TEST_CONTENT_SMALL,
"/users/alice/{}".format(test_filename)))
log.info("Alice can write: %s", write_access_cmd)
rc, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], write_access_cmd)
assert stdout == '' and rc == 0
read_access_cmd = config.hdfs_read_command("/users/alice/{}".format(test_filename))
log.info("Alice can read: %s", read_access_cmd)
_, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], read_access_cmd)
assert stdout == config.TEST_CONTENT_SMALL
ls_cmd = config.hdfs_command("ls /users/alice")
_, stdout, _ = sdk_cmd.task_exec(hdfs_client["id"], ls_cmd)
assert "/users/alice/{}".format(test_filename) in stdout
# bob doesn't have read/write access to alice's directory
sdk_auth.kdestroy(hdfs_client["id"])
sdk_auth.kinit(hdfs_client["id"], keytab=config.KEYTAB, principal=kerberos.get_principal("bob"))
log.info("Bob tries to wrtie to alice's directory: %s", write_access_cmd)
_, _, stderr = sdk_cmd.task_exec(hdfs_client["id"], write_access_cmd)
log.info("Bob can't write to alice's directory: %s", write_access_cmd)
assert "put: Permission denied: user=bob" in stderr
log.info("Bob tries to read from alice's directory: %s", read_access_cmd)
_, _, stderr = sdk_cmd.task_exec(hdfs_client["id"], read_access_cmd)
log.info("Bob can't read from alice's directory: %s", read_access_cmd)
assert "cat: Permission denied: user=bob" in stderr
| 34.460377 | 111 | 0.643561 |
4a20347137062a585eb1f96672a5b84d7d3ead7c | 4,881 | py | Python | venv/Lib/site-packages/pandas/tests/indexing/interval/test_interval.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | 1 | 2021-02-06T21:00:00.000Z | 2021-02-06T21:00:00.000Z | venv/Lib/site-packages/pandas/tests/indexing/interval/test_interval.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/indexing/interval/test_interval.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, IntervalIndex, Series
import pandas._testing as tm
class TestIntervalIndex:
def setup_method(self, method):
self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
def test_getitem_with_scalar(self):
s = self.s
expected = s.iloc[:3]
tm.assert_series_equal(expected, s[:3])
tm.assert_series_equal(expected, s[:2.5])
tm.assert_series_equal(expected, s[0.1:2.5])
expected = s.iloc[1:4]
tm.assert_series_equal(expected, s[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, s[[2, 3, 4]])
tm.assert_series_equal(expected, s[[1.5, 3, 4]])
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s[s >= 2])
@pytest.mark.parametrize("direction", ["increasing", "decreasing"])
def test_nonoverlapping_monotonic(self, direction, closed):
tpls = [(0, 1), (2, 3), (4, 5)]
if direction == "decreasing":
tpls = tpls[::-1]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
s = Series(list("abc"), idx)
for key, expected in zip(idx.left, s):
if idx.closed_left:
assert s[key] == expected
assert s.loc[key] == expected
else:
with pytest.raises(KeyError, match=str(key)):
s[key]
with pytest.raises(KeyError, match=str(key)):
s.loc[key]
for key, expected in zip(idx.right, s):
if idx.closed_right:
assert s[key] == expected
assert s.loc[key] == expected
else:
with pytest.raises(KeyError, match=str(key)):
s[key]
with pytest.raises(KeyError, match=str(key)):
s.loc[key]
for key, expected in zip(idx.mid, s):
assert s[key] == expected
assert s.loc[key] == expected
def test_non_matching(self):
s = self.s
# this is a departure from our current
# indexing scheme, but simpler
with pytest.raises(KeyError, match="^$"):
s.loc[[-1, 3, 4, 5]]
with pytest.raises(KeyError, match="^$"):
s.loc[[-1, 3]]
def test_large_series(self):
s = Series(
np.arange(1000000), index=IntervalIndex.from_breaks(np.arange(1000001))
)
result1 = s.loc[:80000]
result2 = s.loc[0:80000]
result3 = s.loc[0:80000:1]
tm.assert_series_equal(result1, result2)
tm.assert_series_equal(result1, result3)
def test_loc_getitem_frame(self):
df = DataFrame({"A": range(10)})
s = pd.cut(df.A, 5)
df["B"] = s
df = df.set_index("B")
result = df.loc[4]
expected = df.iloc[4:6]
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError, match="10"):
df.loc[10]
# single list-like
result = df.loc[[4]]
expected = df.iloc[4:6]
tm.assert_frame_equal(result, expected)
# non-unique
result = df.loc[[4, 5]]
expected = df.take([4, 5, 4, 5])
tm.assert_frame_equal(result, expected)
with pytest.raises(KeyError, match="^$"):
df.loc[[10]]
# partial missing
with pytest.raises(KeyError, match="^$"):
df.loc[[10, 4]]
class TestIntervalIndexInsideMultiIndex:
def test_mi_intervalindex_slicing_with_scalar(self):
# GH#27456
idx = pd.MultiIndex.from_arrays(
[
pd.Index(["FC", "FC", "FC", "FC", "OWNER", "OWNER", "OWNER", "OWNER"]),
pd.Index(
["RID1", "RID1", "RID2", "RID2", "RID1", "RID1", "RID2", "RID2"]
),
pd.IntervalIndex.from_arrays(
[0, 1, 10, 11, 0, 1, 10, 11], [1, 2, 11, 12, 1, 2, 11, 12]
),
]
)
idx.names = ["Item", "RID", "MP"]
df = pd.DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]})
df.index = idx
query_df = pd.DataFrame(
{
"Item": ["FC", "OWNER", "FC", "OWNER", "OWNER"],
"RID": ["RID1", "RID1", "RID1", "RID2", "RID2"],
"MP": [0.2, 1.5, 1.6, 11.1, 10.9],
}
)
query_df = query_df.sort_index()
idx = pd.MultiIndex.from_arrays([query_df.Item, query_df.RID, query_df.MP])
query_df.index = idx
result = df.value.loc[query_df.index]
expected = pd.Series([1, 6, 2, 8, 7], index=idx, name="value")
tm.assert_series_equal(result, expected)
| 32.54 | 88 | 0.508912 |
4a20354ba62661407ba40f2bb2d63130eef81f3b | 16,159 | py | Python | ravenframework/IndexSets.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | ravenframework/IndexSets.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | ravenframework/IndexSets.py | dgarrett622/raven | f36cc108f7500b0e2717df4832b69b801b43960d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on December 14, 2014
This module is aimed to manage the createion of index Set (set of all combinations of polynomial orders
that are needed to represent the original model)
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import numpy as np
import operator
import itertools
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .EntityFactoryBase import EntityFactory
from .BaseClasses import MessageUser
#Internal Modules End--------------------------------------------------------------------------------
class IndexSet(MessageUser):
"""
In stochastic collocation for generalised polynomial chaos, the Index Set
is a set of all combinations of polynomial orders needed to represent the
original model to a "level" L (maxPolyOrder).
"""
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
super().__init__()
self.type = 'IndexSet' #type of index set (Tensor Product, Total Degree, Hyperbolic Cross)
self.printTag = 'IndexSet' #type of index set (Tensor Product, Total Degree, Hyperbolic Cross)
self.maxOrds = None #maximum requested polynomial order requested for each distribution
self.points = [] #array of polynomial order tuples
self.maxPolyOrder = None #integer, maximum order polynomial to use in any one dimension -> misleading! Relative order for anisotropic case
self.polyOrderList = [] #array of lists containing all the polynomial orders needed for each dimension
self.impWeights = [] #array of scalars for assigning importance weights to each dimension
def __len__(self):
"""
Returns number of entries in the index set.
@ In, None, None
@ Out, __len__, int, cardinality of index set
"""
return len(self.points)
def __getitem__(self,i=None):
"""
Returns as if called on self.points.
@ In, i, string/int, splice notation for array
@ Out, points, array of tuples/tuple, requested points
"""
if i==None:
return np.array(self.points)
else:
return self.points[i]
def __repr__(self):
"""
Produces a more human-readable version of the index set.
@ In, None
@ Out, msg, string, visual representation of index set
"""
if len(self.points)<1:
return "Index set is empty!"
msg='IndexSet Printout:\n'
if len(self.points[0])==2:
#graphical block visualization
left=0
p=0
while p<len(self.points):
pt = self.points[p]
if pt[0]==left:
msg+=' '+str(pt)
p+=1
else:
msg+='\n'
left+=1
else:
#just list them
for pt in self.points:
msg+=' '+str(pt)+'\n'
return msg
def __eq__(self,other):
"""
Checks equivalency of index set
@ In, other, object, object to compare to
@ Out, isEqual, bool, equivalency
"""
isEqual = self.type == other.type and self.points == other.points and (self.impWeights == other.impWeights).all()
return isEqual
def __ne__(self,other):
"""
Checks non-equivalency of index set
@ In, other, object, object to compare to
@ Out, isNotEqual, bool, non-equivalency
"""
isNotEqual = not self.__eq__(other)
return isNotEqual
def _xy(self):
"""
Returns reordered data. Originally,
Points = [(a1,b1,...,z1),
(a2,b2,...,z2),
...]
Returns [(a1,a2,a3,...),
(b1,b2,b3,...),
...,
(z1,z2,z3,...)]
@ In, None
@ Out, orderedPoints, array of tuples, points by dimension
"""
orderedPoints = zip(*self.points)
return orderedPoints
def printOut(self):
"""
Prints out the contents of the index set.
@ In, None
@ Out, None
"""
self.raiseADebug('IndexSet Printout:')
if len(self.points[0])==2:
#graphical block visualization
msg=''
left=0
p=0
while p<len(self.points):
pt = self.points[p]
if pt[0]==left:
msg+=' '+str(pt)
p+=1
else:
self.raiseADebug(msg)
msg=''
left+=1
self.raiseADebug(msg)
else:
#just list them
for pt in self.points:
self.raiseADebug(' '+str(pt))
def order(self):
"""
Orders the index set points in partially-increasing order.
@ In, None
@ Out, None
"""
self.points.sort(key=operator.itemgetter(*range(len(self.points[0]))))
def initialize(self,features,impList,maxPolyOrder):
"""
Initialize everything index set needs
@ In, features, list(str), input parameters
@ In, impList, dict{str:float}, weights by dimension
@ In, maxPolyOrder, int, relative maximum polynomial order to be used for index set
@ Out, None, None
"""
#set up and normalize weights
# this algorithm assures higher weight means more importance,
# and end product is normalized so smallest is 1
self.impWeights = np.array(list(impList[v] for v in features))
self.impWeights/= np.max(self.impWeights)
self.impWeights = 1.0/self.impWeights
#establish max orders
self.maxOrder=maxPolyOrder
self.polyOrderList=[]
for _ in features:
self.polyOrderList.append(range(self.maxOrder+1))
def generateMultiIndex(self,N,rule,I=None,MI=None):
"""
Recursive algorithm to build monotonically-increasing-order index set.
@ In, N, int, dimension of the input space
@ In, rule, function, rule for type of index set (tensor product, total degree, etc)
@ In, I, array of scalar, optional, single index point
@ In, MI, array of tuples, optional, multiindex point collection
@ Out, MI, array of tuples, index set
"""
if I ==None:
I =[]
if MI==None:
MI=[]
if len(I)!=N:
i=0
while rule(I+[i]): #rule is defined by subclasses, limits number of index points by criteria
MI = self.generateMultiIndex(N,rule,I+[i],MI)
i+=1
else:
MI.append(tuple(I))
return MI
class TensorProduct(IndexSet):
"""
This Index Set requires only that the max poly order in the index point i is less than maxPolyOrder ( max(i)<=L )
"""
def initialize(self,features,impList,maxPolyOrder):
"""
Initialize everything index set needs
@ In, features, list(str), input parameters
@ In, impList, dict{str:float}, weights by dimension
@ In, maxPolyOrder, int, relative maximum polynomial order to be used for index set
@ Out, None
"""
IndexSet.initialize(self,features,impList,maxPolyOrder)
self.type='Tensor Product'
self.printTag='TensorProductIndexSet'
target = sum(self.impWeights)/float(len(self.impWeights))*self.maxOrder
def rule(i):
"""
Method to define the index rule
@ In, i, list, list of points
@ Out, rule, bool, if big is <= target
"""
big=0
for j,p in enumerate(i):
big=max(big,p*self.impWeights[j])
return big <= target
self.points = self.generateMultiIndex(len(features),rule)
class TotalDegree(IndexSet):
"""
This Index Set requires the sum of poly orders in the index point is less than maxPolyOrder ( sum(i)<=L ).
"""
def initialize(self,features,impList,maxPolyOrder):
"""
Initialize everything index set needs
@ In, features, list(str), input parameters
@ In, impList, dict{str:float}, weights by dimension
@ In, maxPolyOrder, int, relative maximum polynomial order to be used for index set
@ Out, None
"""
IndexSet.initialize(self,features,impList,maxPolyOrder)
self.type='Total Degree'
self.printTag='TotalDegreeIndexSet'
#TODO if user has set max poly orders (levels), make it so you never use more
# - right now is only limited by the maximum overall level (and importance weight)
target = sum(self.impWeights)/float(len(self.impWeights))*self.maxOrder
def rule(i):
"""
Method to define the index rule
@ In, i, list, list of points
@ Out, rule, bool, if tot is <= target
"""
tot=0
for j,p in enumerate(i):
tot+=p*self.impWeights[j]
return tot<=target
self.points = self.generateMultiIndex(len(features),rule)
class HyperbolicCross(IndexSet):
"""
This Index Set requires the product of poly orders in the index point is less than maxPolyOrder ( prod(i+1)<=L+1 ).
"""
def initialize(self,features,impList,maxPolyOrder):
"""
Initialize everything index set needs
@ In, features, list(str), input parameters
@ In, impList, dict{str:float}, weights by dimension
@ In, maxPolyOrder, int, relative maximum polynomial order to be used for index set
@ Out, None
"""
IndexSet.initialize(self,features,impList,maxPolyOrder)
self.type='Hyperbolic Cross'
self.printTag='HyperbolicCrossIndexSet'
#TODO if user has set max poly orders (levels), make it so you never use more
# - right now is only limited by the maximum overall level (and importance weight)
target = (self.maxOrder+1)**(sum(self.impWeights)/max(1,float(len(self.impWeights))))
def rule(i):
"""
Method to define the index rule
@ In, i, list, list of points
@ Out, rule, bool, if big is <= target
"""
tot=1
for e,val in enumerate(i):
tot*=(val+1)**self.impWeights[e]
return tot<=target
self.points = self.generateMultiIndex(len(features),rule)
class Custom(IndexSet):
"""
User-based index set point choices
"""
def initialize(self,features,impList,maxPolyOrder):
"""
Initialize everything index set needs
@ In, features, list(str), input parameters
@ In, impList, dict{str:float}, weights by dimension
@ In, maxPolyOrder, int, relative maximum polynomial order to be used for index set
@ Out, None
"""
IndexSet.initialize(self,features,impList,maxPolyOrder)
self.type = 'Custom'
self.printTag = 'CustomIndexSet'
self.N = len(features)
self.points = []
def setPoints(self,points):
"""
Used to set the index set points manually.
@ In, points, list, tuples to set points to
@ Out, None
"""
self.points=[]
if len(points)>0:
self.addPoints(points)
self.order()
def addPoints(self,points):
"""
Adds points to existing index set. Reorders set on completion.
@ In, points, list of points, either single tuple or list of tuples to add
@ Out, None
"""
if type(points)==list:
for pt in points:
self.points.append(pt)
elif type(points)==tuple and len(points)==self.N:
self.points.append(points)
else:
self.raiseAnError(ValueError,'Unexpected points to add to set:',points)
self.order()
class AdaptiveSet(IndexSet):
"""
Adaptive index set that can expand itself on call. Used in conjunctoin with AdaptiveSparseGrid sampler.
"""
def initialize(self,features,impList,maxPolyOrder,full=False):
"""
Initialize everything index set needs
@ In, features, list(str), input parameters
@ In, impList, dict{str:float}, weights by dimension
@ In, maxPolyOrder, int, relative maximum polynomial order to be used for index set
@ In, full, bool, optional, if True will do all perturbations of {0,1}^N, else only 1 in any axis at a time
@ Out, None
"""
IndexSet.initialize(self,features,impList,maxPolyOrder)
self.type = 'Adaptive Index Set'
self.printTag = self.type
self.N = len(features)
self.points = [] #retained points in the index set
#need 0, first-order polynomial in each dimension to start predictions
firstPoint = [0]*self.N #mean point polynomial
self.active = [tuple(firstPoint)] #stores the polynomial indices being actively trained
if full:
for pt in list(itertools.product([0,1],repeat=self.N)):
self.active.append(pt)
else:
for i in range(self.N):
#add first-order polynomial along each axis -> this isn't enough though, necessarily
#adaptive sobol really needs an estimate that involves the (1,1,...,1) point
pt = firstPoint[:]
pt[i]+=1
self.active.append(tuple(pt))
self.history = [] #list of tuples, index set point and its impact parameter
def accept(self,pt):
"""
Indicates the provided point should be accepted from the active set to the use set
@ In, pt, tuple(int), the polynomial index to accept
@ Out, None
"""
if pt not in self.active:
self.raiseAnError(KeyError,'Adaptive index set instructed to accept point',pt,'but point is not in active set!')
self.active.remove(pt)
self.points.append(pt)
self.order()
def reject(self,pt):
"""
Indicates the provided point should be accepted from the active set to the use set
@ In, pt, tuple(int), the polynomial index to accept
@ Out, None
"""
if pt not in self.active.keys():
self.raiseAnError(KeyError,'Adaptive index set instructed to reject point',pt,'but point is not in active set!')
self.active.remove(pt)
def forward(self,maxPoly=None):
"""
Check the upper neighbors of each point for indices to add.
@ In, maxPoly, integer, optional maximum value to have in any direction
@ Out, None
"""
for i in self.points:
self.forwardOne(i,maxPoly)
def forwardOne(self,pt,maxPoly=None):
"""
Searches for new active points based on the point given and the established set.
@ In, pt, tuple of int, the point to move forward from
@ In, maxPoly, integer, optional maximum value to have in any direction
@ Out, None
"""
#TODO generalize this not to refer to polys, if anything else ever wants to use these sets.
#add one to each dimenssion, one at a time, as the potential candidates
for i in range(self.N):
newpt = list(pt)
newpt[i]+=1
if tuple(newpt) in self.active:
continue
if maxPoly != None:
if newpt[i]>maxPoly:
continue
pt = tuple(newpt)
if pt in self.active or pt in self.points:
continue
#remove the candidate if not all of its predecessors are accepted.
found=True
for j in range(self.N):
checkpt = newpt[:]
if checkpt[j]==0:
continue
checkpt[j] -= 1
if tuple(checkpt) not in self.points:
found=False
break
if found:
self.active.append(pt)
def printOut(self):
"""
Prints the accepted/established points and the current active set to screen.
@ In, None
@ Out, None
"""
self.raiseADebug(' Accepted Points:')
for p in self.points:
self.raiseADebug(' ',p)
self.raiseADebug(' Active Set')
for a in self.active:
self.raiseADebug(' ',a)
factory = EntityFactory('IndexSet')
factory.registerAllSubtypes(IndexSet)
| 34.675966 | 143 | 0.623739 |
4a2036fed363ac2de67c0ad0e48abea568262fae | 65,281 | py | Python | tripleo_common/tests/actions/test_parameters.py | d0ugal/tripleo-common | dcf76e1e905613170d2011d0430bed5d35fe1006 | [
"Apache-2.0"
] | 2 | 2016-05-25T14:55:27.000Z | 2020-04-13T09:53:09.000Z | tripleo_common/tests/actions/test_parameters.py | d0ugal/tripleo-common | dcf76e1e905613170d2011d0430bed5d35fe1006 | [
"Apache-2.0"
] | null | null | null | tripleo_common/tests/actions/test_parameters.py | d0ugal/tripleo-common | dcf76e1e905613170d2011d0430bed5d35fe1006 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import yaml
from swiftclient import exceptions as swiftexceptions
from tripleo_common.actions import parameters
from tripleo_common import constants
from tripleo_common import exception
from tripleo_common.tests import base
from tripleo_common.utils import passwords as password_utils
_EXISTING_PASSWORDS = {
'PlacementPassword': 'VFJeqBKbatYhQm9jja67hufft',
'MistralPassword': 'VFJeqBKbatYhQm9jja67hufft',
'BarbicanPassword': 'MGGQBtgKT7FnywvkcdMwE9nhx',
'BarbicanSimpleCryptoKek': 'dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=',
'AdminPassword': 'jFmY8FTpvtF2e4d4ReXvmUP8k',
'CeilometerMeteringSecret': 'CbHTGK4md4Cc8P8ZyzTns6wry',
'ZaqarPassword': 'bbFgCTFbAH8vf9n3xvZCP8aMR',
'NovaPassword': '7dZATgVPwD7Ergs9kTTDMCr7F',
'MysqlRootPassword': 'VqJYpEdKks',
'RabbitCookie': 'BqJYpEdKksAqJYpEdKks',
'HeatAuthEncryptionKey': '9xZXehsKc2HbmFFMKjuqxTJHn',
'PcsdPassword': 'KjEzeitus8eu751a',
'HorizonSecret': 'mjEzeitus8eu751B',
'NovajoinPassword': '7dZATgVPwD7Ergs9kTTDMCr7F',
'IronicPassword': '4hFDgn9ANeVfuqk84pHpD4ksa',
'RedisPassword': 'xjj3QZDcUQmU6Q7NzWBHRUhGd',
'SaharaPassword': 'spFvYGezdFwnTk7NPxgYTbUPh',
'AdminToken': 'jq6G6HyZtj7dcZEvuyhAfjutM',
'CinderPassword': 'dcxC3xyUcrmvzfrrxpAd3REcm',
'CongressPassword': 'DwcKvMqXMuNYYFU4zTCuG4234',
'GlancePassword': 'VqJYNEdKKsGZtgnHct77XBtrV',
'RabbitPassword': 'ahuHRXdPMx9rzCdjD9CJJNCgA',
'RpcPassword': 'ahuHRXdPMx9rzCdjD9CJJNCgA',
'NotifyPassword': 'ahuHRXdPMx9rzCdjD9CJJNCgA',
'CephAdminKey': b'AQCQXtlXAAAAABAAT4Gk+U8EqqStL+JFa9bp1Q==',
'HAProxyStatsPassword': 'P8tbdK6n4YUkTaUyy8XgEVTe6',
'TackerPassword': 'DwcKvMqXMuNYYFU4zTCuG4234',
'TrovePassword': 'V7A7zegkMdRFnYuN23gdc4KQC',
'CeilometerPassword': 'RRdpwK6qf2pbKz2UtzxqauAdk',
'GnocchiPassword': 'cRYHcUkMuJeK3vyU9pCaznUZc',
'HeatStackDomainAdminPassword': 'GgTRyWzKYsxK4mReTJ4CM6sMc',
'CephRgwKey': b'AQCQXtlXAAAAABAAUKcqUMu6oMjAXMjoUV4/3A==',
'AodhPassword': '8VZXehsKc2HbmFFMKYuqxTJHn',
'PankoPassword': 'cVZXehsSc2KdmFFMKDudxTLKn',
'OctaviaHeartbeatKey': 'oct-heartbeat-key',
'OctaviaPassword': 'NMl7j3nKk1VVwMxUZC8Cgw==',
'OctaviaServerCertsKeyPassphrase': 'aW5zZWN1cmUta2V5LWRvLW5vdC11c2U=',
'OctaviaCaKeyPassphrase': 'SLj4c3uCk4DDxPwQOG1Heb==',
'ManilaPassword': 'NYJN86Fua3X8AVFWmMhQa2zTH',
'NeutronMetadataProxySharedSecret': 'Q2YgUCwmBkYdqsdhhCF4hbghu',
'CephMdsKey': b'AQCQXtlXAAAAABAAT4Gk+U8EqqStL+JFa9bp1Q==',
'CephManilaClientKey': b'AQANOFFY1NW6AxAAu6jWI3YSOsp2QWusb5Y3DQ==',
'CephMonKey': b'AQCQXtlXAAAAABAA9l+59N3yH+C49Y0JiKeGFg==',
'CephGrafanaAdminPassword': 'NYJN86Fua3X8AVFWmMhQa2zTH',
'CephDashboardAdminPassword': 'NYJN86Fua3X8AVFWmMhQa2zTH',
'SwiftHashSuffix': 'td8mV6k7TYEGKCDvjVBwckpn9',
'SnmpdReadonlyUserPassword': 'TestPassword',
'SwiftPassword': 'z6EWAVfW7CuxvKdzjWTdrXCeg',
'HeatPassword': 'bREnsXtMHKTHxt8XW6NXAYr48',
'MysqlClustercheckPassword': 'jN4RMMWWJ4sycaRwh7UvrAtfX',
'CephClientKey': b'AQCQXtlXAAAAABAAKyc+8St8i9onHyu2mPk+vg==',
'NeutronPassword': 'ZxAjdU2UXCV4GM3WyPKrzAZXD',
'DesignatePassword': 'wHYj7rftFzHMpJKnGxbjjR9CW',
'DesignateRndcKey': 'hB8XaZRd2Tf00jKsyoXpyw==',
'KeystoneCredential0': 'ftJNQ_XlDUK7Lgvv1kdWf3SyqVsrvNDgoNV4kJg3yzw=',
'KeystoneCredential1': 'c4MFq82TQLFLKpiiUjrKkp15dafE2ALcD3jbaIu3rfE=',
'KeystoneFernetKey0': 'O8NSPxr4zXBBAoGIj-5aUmtE7-Jk5a4ptVsEhzJ8Vd8=',
'KeystoneFernetKey1': 'AueoL37kd6eLjV29AG-Ruxu5szW47osgXx6aPOqtI6I=',
'KeystoneFernetKeys': {
'/etc/keystone/fernet-keys/0': {'content': 'IAMAVERYSAFEKEY'},
'/etc/keystone/fernet-keys/1': {'content': 'IALSOAMAVERYSAFEKEY'}
},
'CephClusterFSID': u'97c16f44-b62c-11e6-aed3-185e0f73fdc5',
'Ec2ApiPassword': 'FPvz2WiWxrHVWrmSSvv44bqmr',
'EtcdInitialClusterToken': 'fcVZXehsSc2KdmFFMKDudxTLKa',
'PacemakerRemoteAuthkey':
'bCfHQx4fX7FqENVBbDfBnKvf6FTH6mPfVdNjfzakEjuF4UbmZJHAxWdheEr6feEyZmtM'
'XEd4w3qM8nMVrzjnDCmqAFDmMDQfKcuNgTnqGnkbVUDGpym67Ry4vNCPHyp9tGGyfjNX'
't66csYZTYUHPv6jdJk4HWBjE66v8B3nRpc3FePQ8DRMWX4hcGFNNxapJu7v2frKwq4tD'
'78cc7aPPMGPn8kR3mj7kMP8Ah8VVGXJEtybEvRg4sQ67zEkAzfKggrpXYPK2Qvv9sHKp'
't2VjwZBHTvWKarJjyeMTqbzJyW6JTbm62gqZCr9afZRFQug62pPRduvkUNfUYNPNpqjy'
'yznmeAZPxVseU3jJVxKrxdrgzavKEMtW6BbTmw86j8wuUdaWgRccRGVUQvtQ4p9kXHAy'
'eXVduZvpvxFtbKvfNTvf6qCuJ8qeQp2TwJQPHUYHkxZYrpAA7fZUzNCZR2tFFdZzWGt2'
'PEnYvYts4m7Fp9XEmNm7Jyme38CBfnaVERmTMRvHkq3EE2Amsc72aDdzeVRjR3xRgMNJ'
'2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P'
'q7NyZfRYrGcNDKJuzNWH8UNwGP68uQsUUrV9NVTVpB2sRPG2tJm3unYqekUg3KYXu46J'
'mANxqgrqDv6vPx6NCPdUXZTXFaesQatKRkkf3nZFqZQJXZVbkudTmrPYyRQAjvWuAmrY'
'6RcFFmygeFnhAxhwXNdge9tEfsfPeQ4GMxa8Amj2fMjmNvQXFfQ8uxMUnusDmhbwCRKM'
'CvN2dNE92MaQge34vtxsueyDEmbuVE9sNRD3EQBRwx8nktgRwKHfRZJ3BX8f9XMaQe2e'
'ZfGjtUNkbgKdCyYgEwEybXKPfevDnxFvbZMpJx4fqqCAbAZud9RnAuvqHgFbKHXcVEE4'
'nRmgJmdqJsRsTkYPpYkKN9rssEDCXr9HFjbenkxXcUe8afrTvKAzwBvbDWcjYBEQKbuY'
'6Ptm9VJrjutUHCPmW2sh66qvq4C9vPhVEey7FpCZDEyYUPrjRfhKjxEFNBKWpcZzvmT2'
'nRmgJmdqJsRsTkYPpYkKN9rssEDCXr9HFjbenkxXcUe8afrTvKAzwBvbDWcjYBEQKbuY'
'2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P'
'VRE4aqMfuY72xFacxXHjvWagEGQEYtkMtQnsh7XAMGuazT3pkppeUTyDbKTY2Dz7Quc3'
'8UKaw8ece6fTXWpjX2EYrsd4qzvhC6eEPdgnpmzjqmuG8YqEAUZ7dYADgAhTkBQsNct8'
'btQsQDYD4PBjxG2KWAZ9vgTsvBpjjEVcrPfWgwZKJTAZWfWq2u7nT4N2t39EYmQEzbEf'
'8UKaw8ece6fTXWpjX2EYrsd4qzvhC6eEPdgnpmzjqmuG8YqEAUZ7dYADgAhTkBQsNct8'
'DkCF3DJ49jjZm9N4EKnKGGXD7XkFE79AFRGPUw4gXpeQCtUXyEugUErqMjqgJjC7ykdg'
'zz7txnzYfRaKHNVs4r4GwNEHRHt7VcTuT3WBcbE4skQgjMnttgP7hts7dMU7PA8kRrfq'
'BKdkPkUwqQ9Xn4zrysY4GvJQHWXxD6Tyqf9PZaz4xbUmsvtuY7NAz27U2aT3EA9XCgfn'
'2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P'
'CEfTJQz342nwRMY4DCuhawz4cnrWwxgsnVPCbeXYH4RcgswVsk9edxKkYMkpTwpcKf6n'
'nRmgJmdqJsRsTkYPpYkKN9rssEDCXr9HFjbenkxXcUe8afrTvKAzwBvbDWcjYBEQKbuY'
'6Ptm9VJrjutUHCPmW2sh66qvq4C9vPhVEey7FpCZDEyYUPrjRfhKjxEFNBKWpcZzvmT2'
'VRE4aqMfuY72xFacxXHjvWagEGQEYtkMtQnsh7XAMGuazT3pkppeUTyDbKTY2Dz7Quc3'
'8UKaw8ece6fTXWpjX2EYrsd4qzvhC6eEPdgnpmzjqmuG8YqEAUZ7dYADgAhTkBQsNct8'
'btQsQDYD4PBjxG2KWAZ9vgTsvBpjjEVcrPfWgwZKJTAZWfWq2u7nT4N2t39EYmQEzbEf'
'DkCF3DJ49jjZm9N4EKnKGGXD7XkFE79AFRGPUw4gXpeQCtUXyEugUErqMjqgJjC7ykdg'
'zz7txnzYfRaKHNVs4r4GwNEHRHt7VcTuT3WBcbE4skQgjMnttgP7hts7dMU7PA8kRrfq'
'BKdkPkUwqQ9Xn4zrysY4GvJQHWXxD6Tyqf9PZaz4xbUmsvtuY7NAz27U2aT3EA9XCgfn'
'2cEEWqatZXveHxJr6VmBNWJUyvPrfmVegwtKCGJND8d3Ysruy7GCn6zcrNY7d84aDk3P'
'CEfTJQz342nwRMY4DCuhawz4cnrWwxgsnVPCbeXYH4RcgswVsk9edxKkYMkpTwpcKf6n'
'E2dhquqdKVTAYf7YKbTfFVsRwqykkPduKXuPwVDjbCqdEJPcmnRJAJkwkQCWgukpvzzm'
'DKFVYxncxmzKgEN27VtgfpsXWBJ2jaxMeQCXb2rbjkVcaypyaETQ3Wkw98EptNAKRcjM'
'E2dhquqdKVTAYf7YKbTfFVsRwqykkPduKXuPwVDjbCqdEJPcmnRJAJkwkQCWgukpvzzm'
'zZJ2xFdfNYh7RZ7EgAAbY8Tqy3j2c9c6HNmXwAVV6dzPTrE4FHcKZGg76anGchczF9ev'
'AG8RHQ7ea2sJhXqBmGsmEj6Q84TN9E7pgmtAtmVAA38AYsQBNZUMYdMcmBdpV9w7G3NZ'
'mEU8R8uWqx6w3NzzqsMg78bnhCR7sdWDkhuEp2M8fYWmqujYFNYvzz6BcHNKQyrWETRD'
'E2dhquqdKVTAYf7YKbTfFVsRwqykkPduKXuPwVDjbCqdEJPcmnRJAJkwkQCWgukpvzzm'
'zaTdNWgM7wsXGkvgYVNdTWnReCPXJUN3yQwrvApZzdaF86QaeYwXW7qqEJrqmwpUUbw2'
'JHkmvJB4AWtVhDc9etzUqfuTaqMyXwxFEWvht3RDTDx8dfQ3Ek8BD4QP4BtUQeQJpfsG'
'FEJeQQYVcBxqVuK26xJrERUDmeNw8KWKBCrYPPy48cjCFdgZHz3cNet6bwJMdsgKMpZT'
'erdYy9nqBw6FRZ37rRMtxmrcB4VsWHbf4HjdPRpu4xyJTqMThnXWa8nPDde3C9wCuKkQ'
'23k2zDYsMeHc6KD93vm7Ky48v3veYEuJvNNxQPyyCZ9XNnpGsWrqsVduCswR4MQpp6yJ'
'RBmwbMYbuEjwJy9UuZxa9bQV4GqYFnVuETC6bXaT9uauWdaa2TrbuuXx3WWdmRGd4Rqh'
'Z3NA9Kqx9pTQHe3KGZ2tFejsJqNvjJvFX94eVeMGDgHjtJzDdxp9NWYtG6v9zABGRzVF'
'MqJX6nhhBPbsvjpswcgJq3ZXxzmWFJmvjECghGrbG6bKawtv4aYhMeaHagfMP8W6KrTy'
'uGxWUhcEhfygjE4truAkjfKCtzzVtTcBArbWMny6HWMp6TAen3f6hEB6kBb7pgvKxkND'
'3JxueYBZvDeq4WWtRzUjcFF2qhEjwrtuCJhy3WMXX3MN6nFDtYRTHZGdPqyatW9Jcc8t'
'7gCMWMVzYyNuXZ2A6rwX6Umv8g3mBuwnrwKXEFTZkPCAZMxk3A6MTmMcJCVy3hw6MmRM'
'eXKyhFxRcKWraysTQG7hd9kP8DeJZNDurYDJwqrh6cwDwaMhBfTgnxTBeyjwpbCJK2FD'
'Jg2vFWPmTJ37gDMdwxWCMRQ9kyqz9PJZ4Xn2MPxMhNqT3Hb39YshryqnbvBagHbqYx9M'
'r4ZKJpKya34JMaPambzg2pKRDd2WdFCZcdHTFyqxxzJbjXM2gjfBZ2strUNqWvQYNTw8'
'QttkuxyeQTgHupKNaZF6y7rDyf7mbNR9DaPXpBQuZ7un6KDj2Dfh7yvfhPk8cHG7n9pb'
'KEKD3sgbbKnQ8d9MsGhUtCQVed7dtjpYKsmGJmbYMvZjpGpqsfsHQfFRdCgJHnW3FdQ6'
'sGhUtCQVed7dtj12',
'MigrationSshKey': {
'private_key': 'private_key',
'public_key': 'public_key'
},
'LibvirtTLSPassword': 'xCdt9yeamKz8Fb6EGba9u82XU',
'OpenDaylightPassword': 'abc487gfh017rmviuq75jdiw7',
}
class GetParametersActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, mock_get_object_client,
mock_get_orchestration_client,
mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_cache_get,
mock_cache_set):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_heat.stacks.validate.return_value = {}
mock_get_orchestration_client.return_value = mock_heat
mock_cache_get.return_value = None
# Test
action = parameters.GetParametersAction()
action.run(mock_ctx)
mock_heat.stacks.validate.assert_called_once_with(
environment={},
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'},
)
mock_cache_get.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
mock_cache_set.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get",
{'heat_resource_tree': {}, 'environment_parameters': None}
)
class ResetParametersActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, mock_get_object_client, mock_cache):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'parameter_defaults': {'SomeTestParameter': 42}
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
# Test
action = parameters.ResetParametersAction()
action.run(mock_ctx)
mock_env_reset = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_called_once_with(
constants.DEFAULT_CONTAINER_NAME,
constants.PLAN_ENVIRONMENT,
mock_env_reset
)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
class UpdateParametersActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.parameters.uuid')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.'
'get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run(self, mock_get_orchestration_client_client,
mock_get_object_client, mock_cache,
mock_get_template_contents, mock_env_files,
mock_uuid):
mock_env_files.return_value = ({}, {})
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
}, default_flow_style=False)
mock_roles = yaml.safe_dump([{"name": "foo"}])
mock_network = yaml.safe_dump([{'enabled': False}])
mock_exclude = yaml.safe_dump({"name": "foo"})
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
def return_container_files(*args):
return ('headers', [{'name': 'foo.role.j2.yaml'}])
swift.get_container = mock.MagicMock(
side_effect=return_container_files)
mock_get_object_client.return_value = swift
mock_heat = mock.MagicMock()
mock_get_orchestration_client_client.return_value = mock_heat
mock_heat.stacks.validate.return_value = {
"Type": "Foo",
"Description": "Le foo bar",
"Parameters": {"bar": {"foo": "bar barz"}},
"NestedParameters": {"Type": "foobar"}
}
mock_uuid.uuid4.return_value = "cheese"
expected_value = {
'environment_parameters': None,
'heat_resource_tree': {
'parameters': {'bar': {'foo': 'bar barz',
'name': 'bar'}},
'resources': {'cheese': {
'id': 'cheese',
'name': 'Root',
'description': 'Le foo bar',
'parameters': ['bar'],
'resources': ['cheese'],
'type': 'Foo'}
}
}
}
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
# Test
test_parameters = {'SomeTestParameter': 42}
action = parameters.UpdateParametersAction(test_parameters)
return_value = action.run(mock_ctx)
mock_env_updated = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'parameter_defaults': {'SomeTestParameter': 42},
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_any_call(
constants.DEFAULT_CONTAINER_NAME,
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
mock_heat.stacks.validate.assert_called_once_with(
environment={},
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'},
)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get",
expected_value
)
self.assertEqual(return_value, expected_value)
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.'
'get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run_new_key(self, mock_get_orchestration_client_client,
mock_get_object_client, mock_cache,
mock_get_template_contents, mock_env_files):
mock_env_files.return_value = ({}, {})
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
}, default_flow_style=False)
mock_roles = yaml.safe_dump([{"name": "foo"}])
mock_network = yaml.safe_dump([{'enabled': False}])
mock_exclude = yaml.safe_dump({"name": "foo"})
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
def return_container_files(*args):
return ('headers', [{'name': 'foo.role.j2.yaml'}])
swift.get_container = mock.MagicMock(
side_effect=return_container_files)
mock_get_object_client.return_value = swift
heat = mock.MagicMock()
heat.stacks.validate.return_value = {}
mock_get_orchestration_client_client.return_value = heat
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
# Test
test_parameters = {'SomeTestParameter': 42}
action = parameters.UpdateParametersAction(test_parameters,
key='test_key')
action.run(mock_ctx)
mock_env_updated = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'test_key': {'SomeTestParameter': 42},
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_any_call(
constants.DEFAULT_CONTAINER_NAME,
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
heat.stacks.validate.assert_called_once_with(
environment={},
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'},
)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get",
{'environment_parameters': None, 'heat_resource_tree': {}}
)
class UpdateRoleParametersActionTest(base.TestCase):
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.'
'get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.utils.parameters.set_count_and_flavor_params')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_baremetal_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_object_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run(self, mock_get_orchestration_client_client,
mock_get_object_client, mock_get_compute_client,
mock_get_baremetal_client, mock_set_count_and_flavor,
mock_cache, mock_get_template_contents, mock_env_files):
mock_env_files.return_value = ({}, {})
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcast'
}, default_flow_style=False)
mock_roles = yaml.safe_dump([{"name": "foo"}])
mock_network = yaml.safe_dump([{'enabled': False}])
mock_exclude = yaml.safe_dump({"name": "foo"})
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
def return_container_files(*args):
return ('headers', [{'name': 'foo.yaml'}])
swift.get_container = mock.MagicMock(
side_effect=return_container_files)
mock_get_object_client.return_value = swift
heat = mock.MagicMock()
heat.stacks.validate.return_value = {}
mock_get_orchestration_client_client.return_value = heat
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
params = {'CephStorageCount': 1,
'OvercloudCephStorageFlavor': 'ceph-storage'}
mock_set_count_and_flavor.return_value = params
action = parameters.UpdateRoleParametersAction('ceph-storage',
'overcast')
action.run(mock_ctx)
mock_env_updated = yaml.safe_dump({
'name': 'overcast',
'parameter_defaults': params
}, default_flow_style=False)
swift.put_object.assert_any_call(
'overcast',
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
heat.stacks.validate.assert_called_once_with(
environment={},
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'},
)
mock_cache.assert_called_once_with(
mock_ctx,
"overcast",
"tripleo.parameters.get",
{'environment_parameters': None, 'heat_resource_tree': {}}
)
class GeneratePasswordsActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.utils.passwords.'
'get_snmpd_readonly_user_password')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client', return_value="TestPassword")
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, mock_get_object_client,
mock_get_workflow_client,
mock_get_snmpd_readonly_user_password,
mock_get_orchestration_client, mock_cache):
mock_get_snmpd_readonly_user_password.return_value = "TestPassword"
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcast',
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_orchestration.stacks.environment.return_value = {
'parameter_defaults': {}
}
mock_resource = mock.MagicMock()
mock_resource.attributes = {
'value': 'existing_value'
}
mock_orchestration.resources.get.return_value = mock_resource
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GeneratePasswordsAction()
result = action.run(mock_ctx)
for password_param_name in constants.PASSWORD_PARAMETER_NAMES:
self.assertTrue(password_param_name in result,
"%s is not in %s" % (password_param_name, result))
if password_param_name in \
constants.LEGACY_HEAT_PASSWORD_RESOURCE_NAMES:
self.assertEqual(result[password_param_name], 'existing_value')
else:
self.assertNotEqual(result[password_param_name],
'existing_value')
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.utils.passwords.'
'create_ssh_keypair')
@mock.patch('tripleo_common.utils.passwords.'
'create_fernet_keys_repo_structure_and_keys')
@mock.patch('tripleo_common.utils.passwords.'
'get_snmpd_readonly_user_password')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_passwords_exist(self, mock_get_object_client,
mock_get_workflow_client,
mock_get_snmpd_readonly_user_password,
mock_fernet_keys_setup,
mock_create_ssh_keypair,
mock_get_orchestration_client,
mock_cache):
mock_get_snmpd_readonly_user_password.return_value = "TestPassword"
mock_create_ssh_keypair.return_value = {'public_key': 'Foo',
'private_key': 'Bar'}
mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'},
'/tmp/bar': {'content': 'Bar'}}
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'passwords': _EXISTING_PASSWORDS.copy()
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_orchestration.stacks.environment.return_value = {
'parameter_defaults': {}
}
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GeneratePasswordsAction()
result = action.run(mock_ctx)
# ensure old passwords used and no new generation
self.assertEqual(_EXISTING_PASSWORDS, result)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.utils.passwords.'
'create_ssh_keypair')
@mock.patch('tripleo_common.utils.passwords.'
'create_fernet_keys_repo_structure_and_keys')
@mock.patch('tripleo_common.utils.passwords.'
'get_snmpd_readonly_user_password')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_rotate_no_rotate_list(self, mock_get_object_client,
mock_get_workflow_client,
mock_get_snmpd_readonly_user_password,
mock_fernet_keys_setup,
mock_create_ssh_keypair,
mock_get_orchestration_client,
mock_cache):
mock_get_snmpd_readonly_user_password.return_value = "TestPassword"
mock_create_ssh_keypair.return_value = {'public_key': 'Foo',
'private_key': 'Bar'}
mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'},
'/tmp/bar': {'content': 'Bar'}}
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'passwords': _EXISTING_PASSWORDS.copy()
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_orchestration.stacks.environment.return_value = {
'parameter_defaults': {}
}
mock_resource = mock.MagicMock()
mock_resource.attributes = {
'value': 'existing_value'
}
mock_orchestration.resources.get.return_value = mock_resource
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GeneratePasswordsAction(rotate_passwords=True)
result = action.run(mock_ctx)
# ensure passwords in the DO_NOT_ROTATE_LIST are not modified
for name in constants.DO_NOT_ROTATE_LIST:
self.assertEqual(_EXISTING_PASSWORDS[name], result[name])
# ensure all passwords are generated
for name in constants.PASSWORD_PARAMETER_NAMES:
self.assertTrue(name in result, "%s is not in %s" % (name, result))
# ensure new passwords have been generated
self.assertNotEqual(_EXISTING_PASSWORDS, result)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.utils.passwords.'
'create_ssh_keypair')
@mock.patch('tripleo_common.utils.passwords.'
'create_fernet_keys_repo_structure_and_keys')
@mock.patch('tripleo_common.utils.passwords.'
'get_snmpd_readonly_user_password')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_rotate_with_rotate_list(self, mock_get_object_client,
mock_get_workflow_client,
mock_get_snmpd_readonly_user_password,
mock_fernet_keys_setup,
mock_create_ssh_keypair,
mock_get_orchestration_client,
mock_cache):
mock_get_snmpd_readonly_user_password.return_value = "TestPassword"
mock_create_ssh_keypair.return_value = {'public_key': 'Foo',
'private_key': 'Bar'}
mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'},
'/tmp/bar': {'content': 'Bar'}}
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'passwords': _EXISTING_PASSWORDS.copy()
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_orchestration.stacks.environment.return_value = {
'parameter_defaults': {}
}
mock_resource = mock.MagicMock()
mock_resource.attributes = {
'value': 'existing_value'
}
mock_orchestration.resources.get.return_value = mock_resource
mock_get_orchestration_client.return_value = mock_orchestration
rotate_list = [
'MistralPassword',
'BarbicanPassword',
'AdminPassword',
'CeilometerMeteringSecret',
'ZaqarPassword',
'NovaPassword',
'MysqlRootPassword'
]
action = parameters.GeneratePasswordsAction(
rotate_passwords=True,
rotate_pw_list=rotate_list
)
result = action.run(mock_ctx)
# ensure only specified passwords are regenerated
for name in constants.PASSWORD_PARAMETER_NAMES:
self.assertTrue(name in result, "%s is not in %s" % (name, result))
if name in rotate_list:
self.assertNotEqual(_EXISTING_PASSWORDS[name], result[name])
else:
self.assertEqual(_EXISTING_PASSWORDS[name], result[name])
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.utils.passwords.'
'create_ssh_keypair')
@mock.patch('tripleo_common.utils.passwords.'
'create_fernet_keys_repo_structure_and_keys')
@mock.patch('tripleo_common.utils.passwords.'
'get_snmpd_readonly_user_password')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_passwords_exist_in_heat(self, mock_get_object_client,
mock_get_workflow_client,
mock_get_snmpd_readonly_user_password,
mock_fernet_keys_setup,
mock_create_ssh_keypair,
mock_get_orchestration_client,
mock_cache):
mock_get_snmpd_readonly_user_password.return_value = "TestPassword"
mock_create_ssh_keypair.return_value = {'public_key': 'Foo',
'private_key': 'Bar'}
mock_fernet_keys_setup.return_value = {'/tmp/foo': {'content': 'Foo'},
'/tmp/bar': {'content': 'Bar'}}
existing_passwords = _EXISTING_PASSWORDS.copy()
existing_passwords.pop("AdminPassword")
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
'passwords': existing_passwords.copy()
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_orchestration.stacks.environment.return_value = {
'parameter_defaults': {
'AdminPassword': 'ExistingPasswordInHeat',
}
}
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GeneratePasswordsAction()
result = action.run(mock_ctx)
existing_passwords["AdminPassword"] = "ExistingPasswordInHeat"
# ensure old passwords used and no new generation
self.assertEqual(existing_passwords, result)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
class GetPasswordsActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_password_from_parameter_defaults(self,
mock_get_object_client,
mock_get_orchestration_client):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
"name": constants.DEFAULT_CONTAINER_NAME,
"parameter_defaults": _EXISTING_PASSWORDS,
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GetPasswordsAction()
result = action.run(mock_ctx)
# ensure old passwords used and no new generation
self.assertEqual(_EXISTING_PASSWORDS, result)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_password_from_generated_passwords(self,
mock_get_object_client,
mock_get_orchestration_client):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
"name": constants.DEFAULT_CONTAINER_NAME,
"parameter_defaults": {},
"passwords": _EXISTING_PASSWORDS,
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GetPasswordsAction()
result = action.run(mock_ctx)
# ensure old passwords used and no new generation
self.assertEqual(_EXISTING_PASSWORDS, result)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_password_merging_passwords(self,
mock_get_object_client,
mock_get_orchestration_client):
parameter_defaults = _EXISTING_PASSWORDS.copy()
passwords = {"AdminPassword": parameter_defaults.pop("AdminPassword")}
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
"name": constants.DEFAULT_CONTAINER_NAME,
"parameter_defaults": parameter_defaults,
"passwords": passwords
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
mock_orchestration = mock.MagicMock()
mock_get_orchestration_client.return_value = mock_orchestration
action = parameters.GetPasswordsAction()
result = action.run(mock_ctx)
# ensure old passwords used and no new generation
self.assertEqual(_EXISTING_PASSWORDS, result)
class GenerateFencingParametersActionTestCase(base.TestCase):
@mock.patch('tripleo_common.utils.nodes.generate_hostmap')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_baremetal_client')
def test_no_success(self, mock_get_baremetal, mock_get_compute,
mock_generate_hostmap):
mock_ctx = mock.MagicMock()
test_hostmap = {
"00:11:22:33:44:55": {
"compute_name": "compute_name_0",
"baremetal_name": "baremetal_name_0"
},
"11:22:33:44:55:66": {
"compute_name": "compute_name_1",
"baremetal_name": "baremetal_name_1"
},
"aa:bb:cc:dd:ee:ff": {
"compute_name": "compute_name_4",
"baremetal_name": "baremetal_name_4"
},
"bb:cc:dd:ee:ff:gg": {
"compute_name": "compute_name_5",
"baremetal_name": "baremetal_name_5"
}
}
mock_generate_hostmap.return_value = test_hostmap
test_envjson = [{
"name": "control-0",
"pm_password": "control-0-password",
"pm_type": "ipmi",
"pm_user": "control-0-admin",
"pm_addr": "0.1.2.3",
"pm_port": "0123",
"mac": [
"00:11:22:33:44:55"
]
}, {
"name": "control-1",
"pm_password": "control-1-password",
# Still support deprecated drivers
"pm_type": "pxe_ipmitool",
"pm_user": "control-1-admin",
"pm_addr": "1.2.3.4",
"mac": [
"11:22:33:44:55:66"
]
}, {
# test node using redfish pm
"name": "compute-4",
"pm_password": "calvin",
"pm_type": "redfish",
"pm_user": "root",
"pm_addr": "172.16.0.1:8000",
"pm_port": "8000",
"redfish_verify_ca": "false",
"pm_system_id": "/redfish/v1/Systems/5678",
"mac": [
"aa:bb:cc:dd:ee:ff"
]
}, {
# This is an extra node on oVirt/RHV
"name": "control-3",
"pm_password": "ovirt-password",
"pm_type": "staging-ovirt",
"pm_user": "admin@internal",
"pm_addr": "3.4.5.6",
"pm_vm_name": "control-3",
"mac": [
"bb:cc:dd:ee:ff:gg"
]
}, {
# This is an extra node that is not in the hostmap, to ensure we
# cope with unprovisioned nodes
"name": "control-2",
"pm_password": "control-2-password",
"pm_type": "ipmi",
"pm_user": "control-2-admin",
"pm_addr": "2.3.4.5",
"mac": [
"22:33:44:55:66:77"
]
}
]
action = parameters.GenerateFencingParametersAction(test_envjson,
28,
5,
0,
True)
result = action.run(mock_ctx)["parameter_defaults"]
self.assertTrue(result["EnableFencing"])
self.assertEqual(len(result["FencingConfig"]["devices"]), 4)
self.assertEqual(result["FencingConfig"]["devices"][0], {
"agent": "fence_ipmilan",
"host_mac": "00:11:22:33:44:55",
"params": {
"delay": 28,
"ipaddr": "0.1.2.3",
"ipport": "0123",
"lanplus": True,
"privlvl": 5,
"login": "control-0-admin",
"passwd": "control-0-password",
"pcmk_host_list": "compute_name_0"
}
})
self.assertEqual(result["FencingConfig"]["devices"][1], {
"agent": "fence_ipmilan",
"host_mac": "11:22:33:44:55:66",
"params": {
"delay": 28,
"ipaddr": "1.2.3.4",
"lanplus": True,
"privlvl": 5,
"login": "control-1-admin",
"passwd": "control-1-password",
"pcmk_host_list": "compute_name_1"
}
})
self.assertEqual(result["FencingConfig"]["devices"][2], {
"agent": "fence_redfish",
"host_mac": "aa:bb:cc:dd:ee:ff",
"params": {
"delay": 28,
"ipaddr": "172.16.0.1:8000",
"ipport": "8000",
"lanplus": True,
"privlvl": 5,
"login": "root",
"passwd": "calvin",
"systems_uri": "/redfish/v1/Systems/5678",
"ssl_insecure": "true",
"pcmk_host_list": "compute_name_4"
}
})
self.assertEqual(result["FencingConfig"]["devices"][3], {
"agent": "fence_rhevm",
"host_mac": "bb:cc:dd:ee:ff:gg",
"params": {
"delay": 28,
"ipaddr": "3.4.5.6",
"login": "admin@internal",
"passwd": "ovirt-password",
"port": "control-3",
"ssl": 1,
"ssl_insecure": 1,
"pcmk_host_list": "compute_name_5"
}
})
class GetFlattenedParametersActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_empty_resource_tree(self, mock_get_object_client,
mock_get_orchestration_client,
mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_cache_get,
mock_cache_set):
mock_ctx = mock.MagicMock()
mock_cache_get.return_value = None
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_get_orchestration_client.return_value = mock_heat
mock_heat.stacks.validate.return_value = {}
expected_value = {
'heat_resource_tree': {},
'environment_parameters': None,
}
# Test
action = parameters.GetFlattenedParametersAction()
result = action.run(mock_ctx)
mock_heat.stacks.validate.assert_called_once_with(
environment={},
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'},
)
self.assertEqual(result, expected_value)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('uuid.uuid4', side_effect=['1', '2'])
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_valid_resource_tree(self, mock_get_object_client,
mock_get_orchestration_client,
mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_uuid,
mock_cache_get,
mock_cache_set):
mock_ctx = mock.MagicMock()
mock_cache_get.return_value = None
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_get_orchestration_client.return_value = mock_heat
mock_heat.stacks.validate.return_value = {
'NestedParameters': {
'CephStorageHostsDeployment': {
'Type': 'OS::Heat::StructuredDeployments',
},
},
'description': 'sample',
'Parameters': {
'ControllerCount': {
'Default': 1,
'Type': 'Number',
},
}
}
expected_value = {
'heat_resource_tree': {
'resources': {
'1': {
'id': '1',
'name': 'Root',
'resources': [
'2'
],
'parameters': [
'ControllerCount'
]
},
'2': {
'id': '2',
'name': 'CephStorageHostsDeployment',
'type': 'OS::Heat::StructuredDeployments'
}
},
'parameters': {
'ControllerCount': {
'default': 1,
'type': 'Number',
'name': 'ControllerCount'
}
},
},
'environment_parameters': None,
}
# Test
action = parameters.GetFlattenedParametersAction()
result = action.run(mock_ctx)
self.assertEqual(result, expected_value)
class GetProfileOfFlavorActionTest(base.TestCase):
@mock.patch('tripleo_common.utils.parameters.get_profile_of_flavor')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
def test_profile_found(self, mock_get_compute_client,
mock_get_profile_of_flavor):
mock_ctx = mock.MagicMock()
mock_get_profile_of_flavor.return_value = 'compute'
action = parameters.GetProfileOfFlavorAction('oooq_compute')
result = action.run(mock_ctx)
expected_result = "compute"
self.assertEqual(result, expected_result)
@mock.patch('tripleo_common.utils.parameters.get_profile_of_flavor')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
def test_profile_not_found(self, mock_get_compute_client,
mock_get_profile_of_flavor):
mock_ctx = mock.MagicMock()
profile = (exception.DeriveParamsError, )
mock_get_profile_of_flavor.side_effect = profile
action = parameters.GetProfileOfFlavorAction('no_profile')
result = action.run(mock_ctx)
self.assertTrue(result.is_error())
mock_get_profile_of_flavor.assert_called_once()
class RotateFernetKeysActionTest(base.TestCase):
def test_get_next_index(self):
action = parameters.RotateFernetKeysAction()
keys_map = {
password_utils.KEYSTONE_FERNET_REPO + '0': {
'content': 'Some key'},
password_utils.KEYSTONE_FERNET_REPO + '1': {
'content': 'Some other key'},
}
next_index = action.get_next_index(keys_map)
self.assertEqual(next_index, 2)
@mock.patch('tripleo_common.utils.passwords.'
'create_keystone_credential')
def test_rotate_keys(self, mock_keystone_creds):
action = parameters.RotateFernetKeysAction()
mock_keystone_creds.return_value = 'Some new key'
staged_key_index = password_utils.KEYSTONE_FERNET_REPO + '0'
new_primary_key_index = password_utils.KEYSTONE_FERNET_REPO + '2'
keys_map = {
password_utils.KEYSTONE_FERNET_REPO + '0': {
'content': 'Some key'},
password_utils.KEYSTONE_FERNET_REPO + '1': {
'content': 'Some other key'},
}
new_keys_map = action.rotate_keys(keys_map, 2)
# Staged key should be the new key
self.assertEqual('Some new key',
new_keys_map[staged_key_index]['content'])
# primary key should be the previous staged key
self.assertEqual('Some key',
new_keys_map[new_primary_key_index]['content'])
def test_purge_excess_keys_should_purge(self):
action = parameters.RotateFernetKeysAction()
keys_map = {
password_utils.KEYSTONE_FERNET_REPO + '0': {
'content': 'key0'},
password_utils.KEYSTONE_FERNET_REPO + '1': {
'content': 'key1'},
password_utils.KEYSTONE_FERNET_REPO + '2': {
'content': 'key2'},
password_utils.KEYSTONE_FERNET_REPO + '3': {
'content': 'key3'},
password_utils.KEYSTONE_FERNET_REPO + '4': {
'content': 'key4'},
}
max_keys = 3
keys_map = action.purge_excess_keys(max_keys, keys_map)
self.assertEqual(max_keys, len(keys_map))
# It should keep index 0, 3 and 4
self.assertIn(password_utils.KEYSTONE_FERNET_REPO + '0', keys_map)
self.assertIn(password_utils.KEYSTONE_FERNET_REPO + '3', keys_map)
self.assertIn(password_utils.KEYSTONE_FERNET_REPO + '4', keys_map)
# It sould have removed index 1 and 2
self.assertNotIn(password_utils.KEYSTONE_FERNET_REPO + '1', keys_map)
self.assertNotIn(password_utils.KEYSTONE_FERNET_REPO + '2', keys_map)
def test_purge_excess_keys_should_not_purge_if_equal_to_max(self):
action = parameters.RotateFernetKeysAction()
keys_map = {
password_utils.KEYSTONE_FERNET_REPO + '0': {
'content': 'key0'},
password_utils.KEYSTONE_FERNET_REPO + '1': {
'content': 'key1'},
password_utils.KEYSTONE_FERNET_REPO + '2': {
'content': 'key2'},
}
max_keys = 3
keys_map = action.purge_excess_keys(max_keys, keys_map)
self.assertEqual(max_keys, len(keys_map))
def test_purge_excess_keys_should_not_purge_if_less_than_max(self):
action = parameters.RotateFernetKeysAction()
keys_map = {
password_utils.KEYSTONE_FERNET_REPO + '0': {
'content': 'key0'},
password_utils.KEYSTONE_FERNET_REPO + '1': {
'content': 'key1'},
}
max_keys = 3
keys_map = action.purge_excess_keys(max_keys, keys_map)
self.assertEqual(2, len(keys_map))
class GetNetworkConfigActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_valid_network_config(
self, mock_get_object_client, mock_get_workflow_client,
mock_get_orchestration_client, mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_cache_get,
mock_cache_set):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_heat.stacks.preview.return_value = mock.Mock(resources=[{
"resource_identity": {"stack_name": "overcloud-TEMP-Compute-0"},
"resource_name": "OsNetConfigImpl",
"properties": {"config": "echo \'{\"network_config\": {}}\'"}
}])
mock_get_orchestration_client.return_value = mock_heat
mock_cache_get.return_value = None
expected = {"network_config": {}}
# Test
action = parameters.GetNetworkConfigAction(container='overcloud',
role_name='Compute')
result = action.run(mock_ctx)
self.assertEqual(expected, result)
mock_heat.stacks.preview.assert_called_once_with(
environment={},
files={},
template={'heat_template_version': '2016-04-30'},
stack_name='overcloud-TEMP',
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_invalid_network_config(
self, mock_get_object_client,
mock_get_workflow_client, mock_get_orchestration_client,
mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_cache_get, mock_cache_set):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_heat.stacks.preview.return_value = mock.Mock(resources=[{
"resource_identity": {"stack_name": "overcloud-TEMP-Compute-0"},
"resource_name": "OsNetConfigImpl",
"properties": {"config": ""}
}])
mock_get_orchestration_client.return_value = mock_heat
mock_cache_get.return_value = None
# Test
action = parameters.GetNetworkConfigAction(container='overcloud',
role_name='Compute')
result = action.run(mock_ctx)
self.assertTrue(result.is_error())
mock_heat.stacks.preview.assert_called_once_with(
environment={},
files={},
template={'heat_template_version': '2016-04-30'},
stack_name='overcloud-TEMP',
)
| 41.713099 | 79 | 0.605153 |
4a20373b72940dd1c7a1c69724ed1fe576fda8a1 | 18,481 | py | Python | gbmgeometry/_version.py | drJfunk/gbmgeometry | ca11005c349546ed962bb1bbc4f66d8022ea79a1 | [
"MIT"
] | 4 | 2019-10-31T06:28:13.000Z | 2020-03-28T14:31:07.000Z | gbmgeometry/_version.py | drJfunk/gbmgeometry | ca11005c349546ed962bb1bbc4f66d8022ea79a1 | [
"MIT"
] | 4 | 2020-03-04T16:16:39.000Z | 2020-04-08T11:28:03.000Z | gbmgeometry/_version.py | drJfunk/gbmgeometry | ca11005c349546ed962bb1bbc4f66d8022ea79a1 | [
"MIT"
] | 7 | 2017-10-26T09:32:37.000Z | 2022-03-21T16:32:20.000Z | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "gbmgeometry-"
cfg.versionfile_source = "gbmgeometry/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| 33.179533 | 88 | 0.584113 |
4a203a96475d4aff3d3d631e08d26f7d188947c5 | 12,466 | py | Python | tests/model_tests.py | thibault-ketterer/incubator-superset | 7386b017ab2ee5847c5bbe2f7aa551c4f8b1a8cb | [
"Apache-2.0"
] | 16 | 2017-01-22T19:24:02.000Z | 2021-04-29T05:34:33.000Z | tests/model_tests.py | thibault-ketterer/incubator-superset | 7386b017ab2ee5847c5bbe2f7aa551c4f8b1a8cb | [
"Apache-2.0"
] | 339 | 2017-03-09T00:02:21.000Z | 2022-03-29T22:28:50.000Z | tests/model_tests.py | thibault-ketterer/incubator-superset | 7386b017ab2ee5847c5bbe2f7aa551c4f8b1a8cb | [
"Apache-2.0"
] | 5 | 2017-06-27T03:42:10.000Z | 2020-02-14T18:38:22.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import textwrap
import unittest
import pandas
from sqlalchemy.engine.url import make_url
import tests.test_app
from superset import app, db as metadata_db
from superset.models.core import Database
from superset.models.slice import Slice
from superset.utils.core import get_example_database, QueryStatus
from .base_tests import SupersetTestCase
class DatabaseModelTestCase(SupersetTestCase):
@unittest.skipUnless(
SupersetTestCase.is_module_installed("requests"), "requests not installed"
)
def test_database_schema_presto(self):
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive/default"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive/default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
sqlalchemy_uri = "presto://presto.airbnb.io:8080/hive"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("hive", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("hive/core_db", db)
def test_database_schema_postgres(self):
sqlalchemy_uri = "postgresql+psycopg2://postgres.airbnb.io:5439/prod"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("prod", db)
db = make_url(model.get_sqla_engine(schema="foo").url).database
self.assertEqual("prod", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("thrift"), "thrift not installed"
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pyhive"), "pyhive not installed"
)
def test_database_schema_hive(self):
sqlalchemy_uri = "hive://[email protected]:10000/default?auth=NOSASL"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("default", db)
db = make_url(model.get_sqla_engine(schema="core_db").url).database
self.assertEqual("core_db", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_schema_mysql(self):
sqlalchemy_uri = "mysql://root@localhost/superset"
model = Database(database_name="test_database", sqlalchemy_uri=sqlalchemy_uri)
db = make_url(model.get_sqla_engine().url).database
self.assertEqual("superset", db)
db = make_url(model.get_sqla_engine(schema="staging").url).database
self.assertEqual("staging", db)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("MySQLdb"), "mysqlclient not installed"
)
def test_database_impersonate_user(self):
uri = "mysql://root@localhost"
example_user = "giuseppe"
model = Database(database_name="test_database", sqlalchemy_uri=uri)
model.impersonate_user = True
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertEqual(example_user, user_name)
model.impersonate_user = False
user_name = make_url(model.get_sqla_engine(user_name=example_user).url).username
self.assertNotEqual(example_user, user_name)
def test_select_star(self):
db = get_example_database()
table_name = "energy_usage"
sql = db.select_star(table_name, show_cols=False, latest_partition=False)
expected = textwrap.dedent(
f"""\
SELECT *
FROM {table_name}
LIMIT 100"""
)
assert sql.startswith(expected)
sql = db.select_star(table_name, show_cols=True, latest_partition=False)
expected = textwrap.dedent(
f"""\
SELECT source,
target,
value
FROM energy_usage
LIMIT 100"""
)
assert sql.startswith(expected)
def test_select_star_fully_qualified_names(self):
db = get_example_database()
schema = "schema.name"
table_name = "table/name"
sql = db.select_star(
table_name, schema=schema, show_cols=False, latest_partition=False
)
fully_qualified_names = {
"sqlite": '"schema.name"."table/name"',
"mysql": "`schema.name`.`table/name`",
"postgres": '"schema.name"."table/name"',
}
fully_qualified_name = fully_qualified_names.get(db.db_engine_spec.engine)
if fully_qualified_name:
expected = textwrap.dedent(
f"""\
SELECT *
FROM {fully_qualified_name}
LIMIT 100"""
)
assert sql.startswith(expected)
def test_single_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("SELECT 1;", None)
self.assertEqual(df.iat[0, 0], 1)
def test_multi_statement(self):
main_db = get_example_database()
if main_db.backend == "mysql":
df = main_db.get_df("USE superset; SELECT 1", None)
self.assertEqual(df.iat[0, 0], 1)
df = main_db.get_df("USE superset; SELECT ';';", None)
self.assertEqual(df.iat[0, 0], ";")
class SqlaTableModelTestCase(SupersetTestCase):
def test_get_timestamp_expression(self):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
sqla_literal = ds_col.get_timestamp_expression(None)
self.assertEqual(str(sqla_literal.compile()), "ds")
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(ds)")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(DATE_ADD(ds, 1))")
ds_col.expression = prev_ds_expr
def test_get_timestamp_expression_epoch(self):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression(None)
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "from_unixtime(ds)")
ds_col.python_date_format = "epoch_s"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(ds))")
prev_ds_expr = ds_col.expression
ds_col.expression = "DATE_ADD(ds, 1)"
sqla_literal = ds_col.get_timestamp_expression("P1D")
compiled = "{}".format(sqla_literal.compile())
if tbl.database.backend == "mysql":
self.assertEqual(compiled, "DATE(from_unixtime(DATE_ADD(ds, 1)))")
ds_col.expression = prev_ds_expr
def query_with_expr_helper(self, is_timeseries, inner_join=True):
tbl = self.get_table_by_name("birth_names")
ds_col = tbl.get_column("ds")
ds_col.expression = None
ds_col.python_date_format = None
spec = self.get_database_by_id(tbl.database_id).db_engine_spec
if not spec.allows_joins and inner_join:
# if the db does not support inner joins, we cannot force it so
return None
old_inner_join = spec.allows_joins
spec.allows_joins = inner_join
arbitrary_gby = "state || gender || '_test'"
arbitrary_metric = dict(
label="arbitrary", expressionType="SQL", sqlExpression="COUNT(1)"
)
query_obj = dict(
groupby=[arbitrary_gby, "name"],
metrics=[arbitrary_metric],
filter=[],
is_timeseries=is_timeseries,
columns=[],
granularity="ds",
from_dttm=None,
to_dttm=None,
extras=dict(time_grain_sqla="P1Y"),
)
qr = tbl.query(query_obj)
self.assertEqual(qr.status, QueryStatus.SUCCESS)
sql = qr.query
self.assertIn(arbitrary_gby, sql)
self.assertIn("name", sql)
if inner_join and is_timeseries:
self.assertIn("JOIN", sql.upper())
else:
self.assertNotIn("JOIN", sql.upper())
spec.allows_joins = old_inner_join
self.assertFalse(qr.df.empty)
return qr.df
def test_query_with_expr_groupby_timeseries(self):
def cannonicalize_df(df):
ret = df.sort_values(by=list(df.columns.values), inplace=False)
ret.reset_index(inplace=True, drop=True)
return ret
df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
self.assertFalse(df2.empty)
# df1 can be empty if the db does not support join
if not df1.empty:
pandas.testing.assert_frame_equal(
cannonicalize_df(df1), cannonicalize_df(df2)
)
def test_query_with_expr_groupby(self):
self.query_with_expr_helper(is_timeseries=False)
def test_sql_mutator(self):
tbl = self.get_table_by_name("birth_names")
query_obj = dict(
groupby=[],
metrics=[],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
sql = tbl.get_query_str(query_obj)
self.assertNotIn("-- COMMENT", sql)
def mutator(*args):
return "-- COMMENT\n" + args[0]
app.config["SQL_QUERY_MUTATOR"] = mutator
sql = tbl.get_query_str(query_obj)
self.assertIn("-- COMMENT", sql)
app.config["SQL_QUERY_MUTATOR"] = None
def test_query_with_non_existent_metrics(self):
tbl = self.get_table_by_name("birth_names")
query_obj = dict(
groupby=[],
metrics=["invalid"],
filter=[],
is_timeseries=False,
columns=["name"],
granularity=None,
from_dttm=None,
to_dttm=None,
extras={},
)
with self.assertRaises(Exception) as context:
tbl.get_query_str(query_obj)
self.assertTrue("Metric 'invalid' does not exist", context.exception)
def test_data_for_slices(self):
tbl = self.get_table_by_name("birth_names")
slc = (
metadata_db.session.query(Slice)
.filter_by(datasource_id=tbl.id, datasource_type=tbl.type)
.first()
)
data_for_slices = tbl.data_for_slices([slc])
self.assertEquals(len(data_for_slices["columns"]), 0)
self.assertEquals(len(data_for_slices["metrics"]), 1)
self.assertEquals(len(data_for_slices["verbose_map"].keys()), 2)
| 37.21194 | 88 | 0.643751 |
4a203af041450b3625906586d8bb62ab1e0ecb40 | 3,330 | py | Python | tests/test_libs_tz_utils.py | fyntex/lib-cl-sii-python | b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34 | [
"MIT"
] | 8 | 2020-03-07T19:58:40.000Z | 2021-12-15T13:47:40.000Z | tests/test_libs_tz_utils.py | fyntex/lib-cl-sii-python | b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34 | [
"MIT"
] | 141 | 2020-01-17T22:47:35.000Z | 2022-03-31T18:29:47.000Z | tests/test_libs_tz_utils.py | fyntex/lib-cl-sii-python | b6ffb72be1f173a1d2e44b17ae5c08caf96ebf34 | [
"MIT"
] | 3 | 2020-03-07T20:30:02.000Z | 2021-03-22T03:14:26.000Z | import datetime
import re
import unittest
from cl_sii.libs.tz_utils import ( # noqa: F401
convert_naive_dt_to_tz_aware, convert_tz_aware_dt_to_naive,
dt_is_aware, dt_is_naive, get_now_tz_aware, validate_dt_tz,
PytzTimezone, _TZ_CL_SANTIAGO, TZ_UTC,
)
class FunctionsTest(unittest.TestCase):
def test_get_now_tz_aware(self) -> None:
# TODO: implement for 'get_now_tz_aware'
# Reuse doctests/examples in function docstring.
pass
def test_convert_naive_dt_to_tz_aware(self) -> None:
# TODO: implement for 'convert_naive_dt_to_tz_aware'
# Reuse doctests/examples in function docstring.
pass
def test_convert_tz_aware_dt_to_naive(self) -> None:
# TODO: implement for 'convert_tz_aware_dt_to_naive'
# Reuse doctests/examples in function docstring.
pass
def test_dt_is_aware(self) -> None:
# TODO: implement for 'dt_is_aware'
# Reuse doctests/examples in function docstring.
pass
def test_dt_is_naive(self) -> None:
# TODO: implement for 'dt_is_naive'
# Reuse doctests/examples in function docstring.
pass
def test_validate_dt_tz(self) -> None:
# TODO: implement for 'validate_dt_tz'
pass
def test_validate_dt_tz_tzinfo_zone_attribute_check(self) -> None:
# Time zone: UTC. Source: Pytz:
tzinfo_utc_pytz = TZ_UTC
dt_with_tzinfo_utc_pytz = convert_naive_dt_to_tz_aware(
datetime.datetime(2021, 1, 6, 15, 21),
tzinfo_utc_pytz,
)
# Time zone: UTC. Source: Python Standard Library:
tzinfo_utc_stdlib = datetime.timezone.utc
dt_with_tzinfo_utc_stdlib = datetime.datetime.fromisoformat('2021-01-06T15:04+00:00')
# Time zone: Not UTC. Source: Pytz:
tzinfo_not_utc_pytz = _TZ_CL_SANTIAGO
dt_with_tzinfo_not_utc_pytz = convert_naive_dt_to_tz_aware(
datetime.datetime(2021, 1, 6, 15, 21),
tzinfo_not_utc_pytz,
)
# Time zone: Not UTC. Source: Python Standard Library:
tzinfo_not_utc_stdlib = datetime.timezone(datetime.timedelta(days=-1, seconds=75600))
dt_with_tzinfo_not_utc_stdlib = datetime.datetime.fromisoformat('2021-01-06T15:04-03:00')
# Test datetimes with UTC time zone:
expected_error_message = re.compile(
r"^Object datetime.timezone.utc must have 'zone' attribute.$"
)
with self.assertRaisesRegex(AssertionError, expected_error_message):
validate_dt_tz(dt_with_tzinfo_utc_pytz, tzinfo_utc_stdlib)
with self.assertRaisesRegex(AssertionError, expected_error_message):
validate_dt_tz(dt_with_tzinfo_utc_stdlib, tzinfo_utc_pytz)
# Test datetimes with non-UTC time zone:
expected_error_message = re.compile(
r"^Object"
r" datetime.timezone\(datetime.timedelta\(days=-1, seconds=75600\)\)"
r" must have 'zone' attribute.$"
)
with self.assertRaisesRegex(AssertionError, expected_error_message):
validate_dt_tz(dt_with_tzinfo_not_utc_pytz, tzinfo_not_utc_stdlib) # type: ignore
with self.assertRaisesRegex(AssertionError, expected_error_message):
validate_dt_tz(dt_with_tzinfo_not_utc_stdlib, tzinfo_not_utc_pytz)
| 39.176471 | 97 | 0.689189 |
4a203b077bee40fb4377745e12eb3c807e1f020e | 259 | py | Python | lab6_sln/text_recognizer/datasets/__init__.py | sergeyktest/fsdl-text-recognizer-project | 0a35a2cb5b55cfcc1cde661b305c5c635272657c | [
"MIT"
] | 44 | 2018-08-01T07:39:37.000Z | 2021-10-31T10:11:54.000Z | lab6_sln/text_recognizer/datasets/__init__.py | sanzgiri/fsdl-text-recognizer-project | 1bf7b8655f36149e3c1fcd7726be120d6b0e7b63 | [
"MIT"
] | 1 | 2018-08-03T19:38:09.000Z | 2018-08-03T19:38:09.000Z | lab6_sln/text_recognizer/datasets/__init__.py | sanzgiri/fsdl-text-recognizer-project | 1bf7b8655f36149e3c1fcd7726be120d6b0e7b63 | [
"MIT"
] | 182 | 2018-08-01T01:50:40.000Z | 2022-03-25T19:45:36.000Z | from .emnist import EmnistDataset
##### Hide lines below until Lab 2
from .emnist_lines import EmnistLinesDataset
##### Hide lines above until Lab 2
##### Hide lines below until Lab 5
from .iam_lines import IamLinesDataset
##### Hide lines below until Lab 5
| 28.777778 | 44 | 0.752896 |
4a203b6384feeafe2fe7da1d73ab8b845c393ed4 | 4,591 | py | Python | robot-server/robot_server/service/dependencies.py | soroushjp/opentrons | f7e82834417630d5e6c90ed452de35d80eb1be8a | [
"Apache-2.0"
] | null | null | null | robot-server/robot_server/service/dependencies.py | soroushjp/opentrons | f7e82834417630d5e6c90ed452de35d80eb1be8a | [
"Apache-2.0"
] | null | null | null | robot-server/robot_server/service/dependencies.py | soroushjp/opentrons | f7e82834417630d5e6c90ed452de35d80eb1be8a | [
"Apache-2.0"
] | null | null | null | import typing
from starlette import status
from fastapi import Depends, HTTPException, Header
from starlette.requests import Request
from opentrons.hardware_control import ThreadManager, ThreadedAsyncLock
from robot_server import constants, util
from robot_server.hardware_wrapper import HardwareWrapper
from robot_server.service.errors import BaseRobotServerError
from robot_server.service.json_api import Error
from robot_server.service.session.manager import SessionManager
from robot_server.service.protocol.manager import ProtocolManager
from robot_server.service.legacy.rpc import RPCServer
from notify_server.clients import publisher
from notify_server.settings import Settings as NotifyServerSettings
@util.call_once
async def get_event_publisher() -> publisher.Publisher:
"""A dependency creating a single notify-server event
publisher instance."""
notify_server_settings = NotifyServerSettings()
event_publisher = publisher.create(
notify_server_settings.publisher_address.connection_string()
)
return event_publisher
@util.call_once
async def get_hardware_wrapper(
event_publisher: publisher.Publisher = Depends(get_event_publisher)) \
-> HardwareWrapper:
"""Get the single HardwareWrapper instance."""
return HardwareWrapper(event_publisher=event_publisher)
async def verify_hardware(
api_wrapper: HardwareWrapper = Depends(get_hardware_wrapper)) -> None:
"""
A dependency that raises an http exception if hardware is not ready. Must
only be used in PATH operation.
"""
if not api_wrapper.get_hardware():
raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail="Robot is not ready for request")
async def get_hardware(
api_wrapper: HardwareWrapper = Depends(get_hardware_wrapper)) \
-> ThreadManager:
"""Hardware dependency"""
return api_wrapper.get_hardware()
@util.call_once
async def get_motion_lock() -> ThreadedAsyncLock:
"""
Get the single motion lock.
:return: a threaded async lock
"""
return ThreadedAsyncLock()
@util.call_once
async def get_rpc_server(
hardware: ThreadManager = Depends(get_hardware),
lock: ThreadedAsyncLock = Depends(get_motion_lock)
) -> RPCServer:
"""The RPC Server instance"""
from opentrons.api import MainRouter
root = MainRouter(hardware, lock=lock)
return RPCServer(None, root)
@util.call_once
async def get_protocol_manager() -> ProtocolManager:
"""The single protocol manager instance"""
return ProtocolManager()
@util.call_once
async def get_session_manager(
hardware: ThreadManager = Depends(get_hardware),
motion_lock: ThreadedAsyncLock = Depends(get_motion_lock),
protocol_manager: ProtocolManager = Depends(get_protocol_manager)) \
-> SessionManager:
"""The single session manager instance"""
return SessionManager(
hardware=hardware,
motion_lock=motion_lock,
protocol_manager=protocol_manager)
async def check_version_header(
request: Request,
opentrons_version: typing.Union[
int, constants.API_VERSION_LATEST_TYPE
] = Header(
...,
description=f"The requested HTTP API version which must be at "
f"least '{constants.MIN_API_VERSION}' or higher. To "
f"use the latest version specify "
f"'{constants.API_VERSION_LATEST}'.")
) -> None:
"""Dependency that will check that Opentrons-Version header meets
requirements."""
# Get the maximum version accepted by client
requested_version = (
int(opentrons_version)
if opentrons_version != constants.API_VERSION_LATEST else
constants.API_VERSION
)
if requested_version < constants.MIN_API_VERSION:
error = Error(
id="OutdatedAPIVersion",
title="Requested HTTP API version no longer supported",
detail=(
f"HTTP API version {constants.MIN_API_VERSION - 1} is "
"no longer supported. Please upgrade your Opentrons "
"App or other HTTP API client."
),
)
raise BaseRobotServerError(
status_code=status.HTTP_400_BAD_REQUEST,
error=error
)
else:
# Attach the api version to request's state dict
request.state.api_version = min(requested_version,
constants.API_VERSION)
| 34.007407 | 78 | 0.69266 |
4a203bcf43f708b9dfb6645b40630a9605305361 | 4,101 | py | Python | app/notification_manage.py | mapoetto/group2_CTFLab | 5b492ce46875ea37a57701686897bd9613e2dd13 | [
"MIT"
] | 1 | 2021-10-15T14:37:33.000Z | 2021-10-15T14:37:33.000Z | app/notification_manage.py | mapoetto/group2_CTFLab | 5b492ce46875ea37a57701686897bd9613e2dd13 | [
"MIT"
] | null | null | null | app/notification_manage.py | mapoetto/group2_CTFLab | 5b492ce46875ea37a57701686897bd9613e2dd13 | [
"MIT"
] | null | null | null | import json
import urllib
from app.models import Notifica, Notifica_vista
from app.models import User
from django.db.models import Q
def decode_input(inputs):
RETURNED = {}
for key,value in inputs.items():
RETURNED[key] = urllib.parse.unquote(value)
return RETURNED
def insert_notification(insert_testo, insert_link, insert_utente):
insert = Notifica(testo=insert_testo, link=insert_link, destinatario=insert_utente)
insert.save()
def manage(request):
if request.is_ajax():
POST_VALUES = decode_input(json.loads(request.POST.get('data')))
if POST_VALUES["action"] == "get_notifications":
notifiche = dict()
num_notifiche = 0
try:
notifiche_per_me = Notifica.objects.filter(destinatario=request.session["user_pk"])
for notifica in notifiche_per_me:
try:
vista = Notifica_vista.objects.get(notifica_id=notifica.pk) #se ci sta vuol dire che è già stata vista, e non dobbiamo metterla
if vista.stato == "vista":
#print("Questa notifica già l'ho vista1")
pass
else:
#print("Questa notifica NON l'ho vista1")
raise Notifica_vista.DoesNotExist
except Notifica_vista.DoesNotExist: #se non è nella tabella delle notifiche viste, vuol dire che non è stata vista
notifiche["x_utente_"+str(notifica.pk)] = dict()
notifiche["x_utente_"+str(notifica.pk)]["testo"] = notifica.testo
notifiche["x_utente_"+str(notifica.pk)]["link"] = notifica.link
if POST_VALUES["click"] == "si":
Notifica.objects.filter(pk=notifica.pk).delete()
num_notifiche= num_notifiche + 1
#print("NOTIFICA DA VEDERE1")
except Notifica.DoesNotExist: #non ci sono notifiche fatte personalmente per me
print("non ci sono notifiche fatte personalmente per me1")
pass
try:
notifiche_per_tutti = Notifica.objects.filter(destinatario="tutti")
for notifica in notifiche_per_tutti:
try:
vista = Notifica_vista.objects.get(Q(notifica_id=notifica.pk) & Q(user_id=request.session["user_pk"])) #se ci sta vuol dire che è già stata vista, e non dobbiamo metterla
if vista.stato == "vista":
#print("Questa notifica già l'ho vista2")
pass
else:
#print("Questa notifica NON l'ho vista2")
raise Notifica_vista.DoesNotExist
except Notifica_vista.DoesNotExist: #se non è nella tabella delle notifiche viste, vuol dire che non è stata vista
notifiche["x_tutti_"+str(notifica.pk)] = dict()
notifiche["x_tutti_"+str(notifica.pk)]["testo"] = notifica.testo
notifiche["x_tutti_"+str(notifica.pk)]["link"] = notifica.link
if POST_VALUES["click"] == "si":
insert = Notifica_vista(stato="vista", user_id=User.objects.get(pk=request.session["user_pk"]), notifica_id=Notifica.objects.get(pk=notifica.pk))
insert.save()
num_notifiche= num_notifiche + 1
#print("NOTIFICA DA VEDERE2")
except Notifica.DoesNotExist: #non esiste ancora alcuna notifica per tutti
#print("non esiste ancora alcuna notifica per tutti2")
pass
response_list = {
"notifiche": json.dumps(notifiche),
"num_notifiche": num_notifiche
}
message = json.dumps(response_list)
else:
message = "Not Ajax"
return message
| 43.62766 | 194 | 0.55328 |
4a203c64c3df0390325062e836ba670f358c7e55 | 1,547 | py | Python | examples/pubsub2.py | emorozov/aioredis | bad0bd2d1435e56688d189cf3209beec3d239557 | [
"MIT"
] | null | null | null | examples/pubsub2.py | emorozov/aioredis | bad0bd2d1435e56688d189cf3209beec3d239557 | [
"MIT"
] | 75 | 2020-12-09T06:53:47.000Z | 2022-03-25T01:05:24.000Z | examples/pubsub2.py | emorozov/aioredis | bad0bd2d1435e56688d189cf3209beec3d239557 | [
"MIT"
] | null | null | null | import asyncio
import aioredis
async def pubsub():
sub = await aioredis.create_redis("redis://localhost")
ch1, ch2 = await sub.subscribe("channel:1", "channel:2")
assert isinstance(ch1, aioredis.Channel)
assert isinstance(ch2, aioredis.Channel)
async def async_reader(channel):
while await channel.wait_message():
msg = await channel.get(encoding="utf-8")
# ... process message ...
print("message in {}: {}".format(channel.name, msg))
tsk1 = asyncio.ensure_future(async_reader(ch1))
# Or alternatively:
async def async_reader2(channel):
while True:
msg = await channel.get(encoding="utf-8")
if msg is None:
break
# ... process message ...
print("message in {}: {}".format(channel.name, msg))
tsk2 = asyncio.ensure_future(async_reader2(ch2))
# Publish messages and terminate
pub = await aioredis.create_redis("redis://localhost")
while True:
channels = await pub.pubsub_channels("channel:*")
if len(channels) == 2:
break
for msg in ("Hello", ",", "world!"):
for ch in ("channel:1", "channel:2"):
await pub.publish(ch, msg)
await asyncio.sleep(0.1)
pub.close()
sub.close()
await pub.wait_closed()
await sub.wait_closed()
await asyncio.gather(tsk1, tsk2)
if __name__ == "__main__":
import os
if "redis_version:2.6" not in os.environ.get("REDIS_VERSION", ""):
asyncio.run(pubsub())
| 28.127273 | 70 | 0.603103 |
4a203c785c1b3600979c2832c2a05dee796fe804 | 1,623 | py | Python | algorithms/code/leetcode/lc216_combination_sum_iii/lc216_combination_sum_iii.py | altermarkive/training | 6a13f5b2f466156ad5db0e25da0e601d2404b4c3 | [
"MIT"
] | null | null | null | algorithms/code/leetcode/lc216_combination_sum_iii/lc216_combination_sum_iii.py | altermarkive/training | 6a13f5b2f466156ad5db0e25da0e601d2404b4c3 | [
"MIT"
] | 1 | 2022-02-16T11:28:56.000Z | 2022-02-16T11:28:56.000Z | algorithms/code/leetcode/lc216_combination_sum_iii/lc216_combination_sum_iii.py | altermarkive/training | 6a13f5b2f466156ad5db0e25da0e601d2404b4c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# https://leetcode.com/problems/combination-sum-iii/
import unittest
from typing import List
class Solution:
# pylint: disable=R0913
def __traverse(self, contains, summed, left, n, found, start):
if left == 0 and summed == n:
found.append(contains)
else:
for i in range(start, 10):
mask = 1 << i
# if (contains & mask) == 0:
self.__traverse(
contains | mask, summed + i, left - 1, n, found, i + 1)
def combinationSum3(self, k: int, n: int) -> List[List[int]]:
found = []
self.__traverse(0, 0, k, n, found, 1)
each = []
for contains in found:
entry = []
each.append(entry)
for i in range(1, 10):
mask = 1 << i
if (contains & mask) != 0:
entry.append(i)
return each
class TestCode(unittest.TestCase):
def __test(self, expected, result):
for entry in result:
entry.sort()
self.assertEqual(len(expected), len(result))
for i, _ in enumerate(expected):
entry = result[i]
self.assertEqual(len(expected[i]), len(entry))
for j, _ in enumerate(expected[i]):
self.assertEqual(expected[i][j], entry[j])
def test_3_7(self):
expected = [[1, 2, 4]]
self.__test(expected, Solution().combinationSum3(3, 7))
def test_3_9(self):
expected = [[1, 2, 6], [1, 3, 5], [2, 3, 4]]
self.__test(expected, Solution().combinationSum3(3, 9))
| 30.622642 | 75 | 0.523105 |
4a203dc31bd5498df991022a4a75653dee87018f | 3,054 | py | Python | apps/giphy.py | skyksit/jandibot | 0d708a5142bbadc346e34b51b757f7e97ceac14c | [
"MIT"
] | 1 | 2019-10-31T05:53:47.000Z | 2019-10-31T05:53:47.000Z | apps/giphy.py | skyksit/jandibot | 0d708a5142bbadc346e34b51b757f7e97ceac14c | [
"MIT"
] | 1 | 2020-06-05T18:41:08.000Z | 2020-06-05T18:41:08.000Z | apps/giphy.py | skyksit/jandibot | 0d708a5142bbadc346e34b51b757f7e97ceac14c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
@author: skyksit
"""
import os
import re
import time
import random
import requests
from apps.decorators import on_command
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options as ChromeOptions
HELP_MSG = [
'giphy 에서 smile 키워드로 검색하고 싶으면 \'/ns !움짤 smile\' 이라고 해주세요.',
'검색어는 `영어`와 `한글` 둘 다 됩니다. 한글은 파파고에서 영어로 전환해서 찾아줍니다',
'!움짤,!ㅇㅉ,!gif,!giphy 명령어로 사용이 가능합니다'
]
PAPAGO_URL = "https://papago.naver.com/"
LOCAL_DRIVER_URL = 'D:/Anaconda3/envs/envscraping/Scripts/chromedriver.exe'
URL = 'http://api.giphy.com/v1/gifs/search?'
def get_english(arg):
'''한글을 받아서 파파고번역사이트에서 영어로 번역해서 리턴합니다'''
rtn_str = ""
try:
chrome_bin = os.environ.get('GOOGLE_CHROME_SHIM', None)
if chrome_bin is not None:
opts = ChromeOptions()
opts.binary_location = chrome_bin
opts.add_argument('headless')
opts.add_argument("lang=ko_KR")
opts.add_argument('--disable-gpu')
opts.add_argument('--no-sandbox')
browser = webdriver.Chrome(executable_path="chromedriver", chrome_options=opts)
else:
browser = webdriver.Chrome(LOCAL_DRIVER_URL)
browser.implicitly_wait(3)
browser.get(PAPAGO_URL)
txt_source = browser.find_element_by_id('txtSource')
txt_source.send_keys(arg)
txt_source.send_keys(Keys.RETURN)
time.sleep(5)
txt_target = browser.find_element_by_id('txtTarget')
rtn_str = txt_target.text
finally:
browser.quit()
return rtn_str
def get_giphy_message(query):
if re.search(r'[ㄱ-ㅎㅏ-ㅣ가-힣]', query, re.U):
query = get_english(query)
description = "{0} 로 이미지 검색했습니다".format(query)
params = {
'q': query,
'api_key': 'dc6zaTOxFJmzC',
'offset': random.randint(0, 1024),
'limit': 1,
}
try:
result = requests.get(URL, params=params).json()
except:
description = 'Could not connected to giphy.com. Try again later.'
else:
if result['pagination']['count'] < 1:
description = 'No result found for %s Only English query is avaliable for giphy.com' % query
else:
el = result['data'][0]
image_url = el['images']['original']['url']
image = image_url[0:image_url.index('?')]
return description, image
@on_command(['ㅇㅉ', '움짤', 'gif', 'giphy'])
def run(query):
'''Search a image from giphy.com'''
title = 'Giphy'
description = ''
image = ''
if query:
if len(query) < 1:
description = '\n'.join(HELP_MSG)
else:
description, image = get_giphy_message(query)
else:
description = '\n'.join(HELP_MSG)
return title, description, image
if '__main__' == __name__:
print(get_english('웃긴고양이'))
| 30.237624 | 105 | 0.602161 |
4a203f4708306f28aaa44c906836eefb11be44ff | 4,475 | py | Python | mne/commands/mne_browse_raw.py | achilleas-k/mne-python | 0078e1af13a92ab47498dd167bc5ec73be864427 | [
"BSD-3-Clause"
] | 1 | 2019-03-22T04:47:45.000Z | 2019-03-22T04:47:45.000Z | mne/commands/mne_browse_raw.py | achilleas-k/mne-python | 0078e1af13a92ab47498dd167bc5ec73be864427 | [
"BSD-3-Clause"
] | 4 | 2016-06-04T15:28:08.000Z | 2016-12-22T14:23:13.000Z | mne/commands/mne_browse_raw.py | achilleas-k/mne-python | 0078e1af13a92ab47498dd167bc5ec73be864427 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
r"""Browse raw data.
You can do for example:
$ mne browse_raw --raw sample_audvis_raw.fif \
--proj sample_audvis_ecg-proj.fif \
--eve sample_audvis_raw-eve.fif
"""
# Authors : Eric Larson, PhD
import sys
import mne
def run():
"""Run command."""
import matplotlib.pyplot as plt
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("--raw", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("--proj", dest="proj_in",
help="Projector file", metavar="FILE",
default='')
parser.add_option("--eve", dest="eve_in",
help="Events file", metavar="FILE",
default='')
parser.add_option("-d", "--duration", dest="duration", type="float",
help="Time window for plotting (sec)",
default=10.0)
parser.add_option("-t", "--start", dest="start", type="float",
help="Initial start time for plotting",
default=0.0)
parser.add_option("-n", "--n_channels", dest="n_channels", type="int",
help="Number of channels to plot at a time",
default=20)
parser.add_option("-o", "--order", dest="group_by",
help="Order to use for grouping during plotting "
"('type' or 'original')", default='type')
parser.add_option("-p", "--preload", dest="preload",
help="Preload raw data (for faster navigaton)",
default=False, action="store_true")
parser.add_option("-s", "--show_options", dest="show_options",
help="Show projection options dialog",
default=False)
parser.add_option("--allowmaxshield", dest="maxshield",
help="Allow loading MaxShield processed data",
action="store_true")
parser.add_option("--highpass", dest="highpass", type="float",
help="Display high-pass filter corner frequency",
default=-1)
parser.add_option("--lowpass", dest="lowpass", type="float",
help="Display low-pass filter corner frequency",
default=-1)
parser.add_option("--filtorder", dest="filtorder", type="int",
help="Display filtering IIR order (or 0 to use FIR)",
default=4)
parser.add_option("--clipping", dest="clipping",
help="Enable trace clipping mode, either 'clip' or "
"'transparent'", default=None)
parser.add_option("--filterchpi", dest="filterchpi",
help="Enable filtering cHPI signals.", default=None,
action="store_true")
options, args = parser.parse_args()
raw_in = options.raw_in
duration = options.duration
start = options.start
n_channels = options.n_channels
group_by = options.group_by
preload = options.preload
show_options = options.show_options
proj_in = options.proj_in
eve_in = options.eve_in
maxshield = options.maxshield
highpass = options.highpass
lowpass = options.lowpass
filtorder = options.filtorder
clipping = options.clipping
filterchpi = options.filterchpi
if raw_in is None:
parser.print_help()
sys.exit(1)
raw = mne.io.read_raw_fif(raw_in, preload=preload,
allow_maxshield=maxshield)
if len(proj_in) > 0:
projs = mne.read_proj(proj_in)
raw.info['projs'] = projs
if len(eve_in) > 0:
events = mne.read_events(eve_in)
else:
events = None
if filterchpi:
if not preload:
raise RuntimeError(
'Raw data must be preloaded for chpi, use --preload')
raw = mne.chpi.filter_chpi(raw)
highpass = None if highpass < 0 or filtorder < 0 else highpass
lowpass = None if lowpass < 0 or filtorder < 0 else lowpass
raw.plot(duration=duration, start=start, n_channels=n_channels,
group_by=group_by, show_options=show_options, events=events,
highpass=highpass, lowpass=lowpass, filtorder=filtorder,
clipping=clipping)
plt.show(block=True)
is_main = (__name__ == '__main__')
if is_main:
run()
| 37.291667 | 75 | 0.574302 |
4a203f4e93a9f20d1e28c6b2008920c0511202dc | 3,384 | py | Python | baseline/bitextor_util/lett2ridx_combine.py | parallelcrawl/DataCollection | 4308473e6b53779159a15c1416bff3f2291dd1f2 | [
"Apache-2.0"
] | 8 | 2018-02-08T16:03:00.000Z | 2022-01-19T11:41:38.000Z | baseline/bitextor_util/lett2ridx_combine.py | paracrawl/DataCollection | 4308473e6b53779159a15c1416bff3f2291dd1f2 | [
"Apache-2.0"
] | 3 | 2017-08-08T10:53:29.000Z | 2017-08-08T10:58:51.000Z | baseline/bitextor_util/lett2ridx_combine.py | parallelcrawl/DataCollection | 4308473e6b53779159a15c1416bff3f2291dd1f2 | [
"Apache-2.0"
] | 4 | 2018-06-09T21:53:09.000Z | 2022-01-19T11:41:48.000Z | #!/usr/bin/env python
import sys
from operator import itemgetter
from collections import defaultdict
def read_source_chunk(filename, n):
""" Reads a couple of source documents at a time """
docs = []
with open(filename, 'r') as sf:
for sline in sf:
s_doc_id, s_tokens = sline.split('\t', 1)
s_doc_id = int(s_doc_id)
s_tokens = set(s_tokens.strip().split('\t'))
docs.append((s_doc_id, s_tokens))
if len(docs) >= n:
yield docs
docs = []
if docs:
yield docs
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('source', help='source tokens file')
parser.add_argument('target', help='translated target tokens')
parser.add_argument('-max_candidates', type=int, default=10,
help='maximum number of candidates per document')
parser.add_argument('-buffersize', type=int, default=2000,
help='number of source documents held im memory')
parser.add_argument('-valid_words',
help='List of valid words, i.e. those of low count')
args = parser.parse_args()
for source_docs in read_source_chunk(args.source, args.buffersize):
similarities = defaultdict(list)
with open(args.target, 'r') as tf:
for tline in tf:
t_doc_id, n_translated, n_orig_tokens, t_tokens = \
tline.split('\t', 3)
t_doc_id = int(t_doc_id)
n_translated = float(n_translated)
n_orig_tokens = int(n_orig_tokens)
t_tokens = set(t_tokens.strip().split('\t'))
for s_doc_id, s_tokens in source_docs:
# formula from bitextor-idx2ridx
max_vocab = float(max(len(s_tokens), n_orig_tokens))
min_vocab = float(min(len(s_tokens), n_orig_tokens))
num_intersect_words = len(s_tokens.intersection(t_tokens))
if max_vocab > 0 and n_translated > 0:
similarity = min_vocab / max_vocab * \
num_intersect_words / n_translated
# if s_doc_id == 13 and t_doc_id == 0:
# print s_tokens
# print len(s_tokens)
# print "Original tokens: ", n_orig_tokens
# print t_tokens
# print len(t_tokens)
#
# print s_doc_id, t_doc_id
# print similarity
# print min_vocab, max_vocab
# print num_intersect_words, n_translated
similarities[s_doc_id].append((t_doc_id, similarity))
for s_doc_id in similarities:
similarities[s_doc_id].sort(key=itemgetter(1), reverse=True)
# High similarity at beginning
# Fileformat expects docids starting with 1
sys.stdout.write("%d" % (s_doc_id + 1))
for t_doc_id, similarity in \
similarities[s_doc_id][:args.max_candidates]:
sys.stdout.write("\t%d:%f" % (t_doc_id + 1, similarity))
sys.stdout.write('\n')
| 41.777778 | 78 | 0.538121 |
4a203fc8e1c0459595b3384752e54c1503b72ed1 | 11,587 | py | Python | kubernetes/client/models/v1_deployment_status.py | lp67/python | 33c5ea9835356410ce4a9fa54a02c6a2a22143c6 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_deployment_status.py | lp67/python | 33c5ea9835356410ce4a9fa54a02c6a2a22143c6 | [
"Apache-2.0"
] | 4 | 2019-11-19T10:33:47.000Z | 2022-03-01T03:33:52.000Z | kubernetes/client/models/v1_deployment_status.py | mohramadan911/PythonClientAPI | 5d111812c81b7a573ac8661d1aec60bb97072412 | [
"Apache-2.0"
] | 2 | 2021-08-10T16:35:31.000Z | 2021-09-14T04:53:06.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.20
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1DeploymentStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available_replicas': 'int',
'collision_count': 'int',
'conditions': 'list[V1DeploymentCondition]',
'observed_generation': 'int',
'ready_replicas': 'int',
'replicas': 'int',
'unavailable_replicas': 'int',
'updated_replicas': 'int'
}
attribute_map = {
'available_replicas': 'availableReplicas',
'collision_count': 'collisionCount',
'conditions': 'conditions',
'observed_generation': 'observedGeneration',
'ready_replicas': 'readyReplicas',
'replicas': 'replicas',
'unavailable_replicas': 'unavailableReplicas',
'updated_replicas': 'updatedReplicas'
}
def __init__(self, available_replicas=None, collision_count=None, conditions=None, observed_generation=None, ready_replicas=None, replicas=None, unavailable_replicas=None, updated_replicas=None, local_vars_configuration=None): # noqa: E501
"""V1DeploymentStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._available_replicas = None
self._collision_count = None
self._conditions = None
self._observed_generation = None
self._ready_replicas = None
self._replicas = None
self._unavailable_replicas = None
self._updated_replicas = None
self.discriminator = None
if available_replicas is not None:
self.available_replicas = available_replicas
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
if observed_generation is not None:
self.observed_generation = observed_generation
if ready_replicas is not None:
self.ready_replicas = ready_replicas
if replicas is not None:
self.replicas = replicas
if unavailable_replicas is not None:
self.unavailable_replicas = unavailable_replicas
if updated_replicas is not None:
self.updated_replicas = updated_replicas
@property
def available_replicas(self):
"""Gets the available_replicas of this V1DeploymentStatus. # noqa: E501
Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. # noqa: E501
:return: The available_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._available_replicas
@available_replicas.setter
def available_replicas(self, available_replicas):
"""Sets the available_replicas of this V1DeploymentStatus.
Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. # noqa: E501
:param available_replicas: The available_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._available_replicas = available_replicas
@property
def collision_count(self):
"""Gets the collision_count of this V1DeploymentStatus. # noqa: E501
Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. # noqa: E501
:return: The collision_count of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1DeploymentStatus.
Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet. # noqa: E501
:param collision_count: The collision_count of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1DeploymentStatus. # noqa: E501
Represents the latest available observations of a deployment's current state. # noqa: E501
:return: The conditions of this V1DeploymentStatus. # noqa: E501
:rtype: list[V1DeploymentCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1DeploymentStatus.
Represents the latest available observations of a deployment's current state. # noqa: E501
:param conditions: The conditions of this V1DeploymentStatus. # noqa: E501
:type: list[V1DeploymentCondition]
"""
self._conditions = conditions
@property
def observed_generation(self):
"""Gets the observed_generation of this V1DeploymentStatus. # noqa: E501
The generation observed by the deployment controller. # noqa: E501
:return: The observed_generation of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1DeploymentStatus.
The generation observed by the deployment controller. # noqa: E501
:param observed_generation: The observed_generation of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def ready_replicas(self):
"""Gets the ready_replicas of this V1DeploymentStatus. # noqa: E501
Total number of ready pods targeted by this deployment. # noqa: E501
:return: The ready_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._ready_replicas
@ready_replicas.setter
def ready_replicas(self, ready_replicas):
"""Sets the ready_replicas of this V1DeploymentStatus.
Total number of ready pods targeted by this deployment. # noqa: E501
:param ready_replicas: The ready_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._ready_replicas = ready_replicas
@property
def replicas(self):
"""Gets the replicas of this V1DeploymentStatus. # noqa: E501
Total number of non-terminated pods targeted by this deployment (their labels match the selector). # noqa: E501
:return: The replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1DeploymentStatus.
Total number of non-terminated pods targeted by this deployment (their labels match the selector). # noqa: E501
:param replicas: The replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._replicas = replicas
@property
def unavailable_replicas(self):
"""Gets the unavailable_replicas of this V1DeploymentStatus. # noqa: E501
Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created. # noqa: E501
:return: The unavailable_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._unavailable_replicas
@unavailable_replicas.setter
def unavailable_replicas(self, unavailable_replicas):
"""Sets the unavailable_replicas of this V1DeploymentStatus.
Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created. # noqa: E501
:param unavailable_replicas: The unavailable_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._unavailable_replicas = unavailable_replicas
@property
def updated_replicas(self):
"""Gets the updated_replicas of this V1DeploymentStatus. # noqa: E501
Total number of non-terminated pods targeted by this deployment that have the desired template spec. # noqa: E501
:return: The updated_replicas of this V1DeploymentStatus. # noqa: E501
:rtype: int
"""
return self._updated_replicas
@updated_replicas.setter
def updated_replicas(self, updated_replicas):
"""Sets the updated_replicas of this V1DeploymentStatus.
Total number of non-terminated pods targeted by this deployment that have the desired template spec. # noqa: E501
:param updated_replicas: The updated_replicas of this V1DeploymentStatus. # noqa: E501
:type: int
"""
self._updated_replicas = updated_replicas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DeploymentStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DeploymentStatus):
return True
return self.to_dict() != other.to_dict()
| 36.322884 | 297 | 0.66074 |
4a204071c6edccdd3f80911cfa0008e098edddb2 | 65,056 | py | Python | databricks/koalas/indexing.py | varunsh-coder/koalas | 1cfc9ec76a1b023d89870688bf802cf58df537f1 | [
"Apache-2.0"
] | 3,211 | 2019-04-22T04:40:50.000Z | 2022-03-31T10:42:31.000Z | databricks/koalas/indexing.py | varunsh-coder/koalas | 1cfc9ec76a1b023d89870688bf802cf58df537f1 | [
"Apache-2.0"
] | 2,017 | 2019-04-21T23:37:12.000Z | 2022-03-24T03:48:51.000Z | databricks/koalas/indexing.py | varunsh-coder/koalas | 1cfc9ec76a1b023d89870688bf802cf58df537f1 | [
"Apache-2.0"
] | 375 | 2019-04-21T23:58:57.000Z | 2022-03-30T00:42:19.000Z | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A loc indexer for Koalas DataFrame/Series.
"""
from abc import ABCMeta, abstractmethod
from collections.abc import Iterable
from functools import reduce
from typing import Any, Optional, List, Tuple, TYPE_CHECKING, Union, cast, Sized
import pandas as pd
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import BooleanType, LongType
from pyspark.sql.utils import AnalysisException
import numpy as np
from databricks import koalas as ks # noqa: F401
from databricks.koalas.internal import (
InternalFrame,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from databricks.koalas.exceptions import SparkPandasIndexingError, SparkPandasNotImplementedError
from databricks.koalas.typedef.typehints import (
Dtype,
Scalar,
extension_dtypes,
spark_type_to_pandas_dtype,
)
from databricks.koalas.utils import (
is_name_like_tuple,
is_name_like_value,
lazy_property,
name_like_string,
same_anchor,
scol_for,
verify_temp_column_name,
)
if TYPE_CHECKING:
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series
class IndexerLike(object):
def __init__(self, kdf_or_kser):
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series
assert isinstance(kdf_or_kser, (DataFrame, Series)), "unexpected argument type: {}".format(
type(kdf_or_kser)
)
self._kdf_or_kser = kdf_or_kser
@property
def _is_df(self):
from databricks.koalas.frame import DataFrame
return isinstance(self._kdf_or_kser, DataFrame)
@property
def _is_series(self):
from databricks.koalas.series import Series
return isinstance(self._kdf_or_kser, Series)
@property
def _kdf(self):
if self._is_df:
return self._kdf_or_kser
else:
assert self._is_series
return self._kdf_or_kser._kdf
@property
def _internal(self):
return self._kdf._internal
class AtIndexer(IndexerLike):
"""
Access a single value for a row/column label pair.
If the index is not unique, all matching pairs are returned as an array.
Similar to ``loc``, in that both provide label-based lookups. Use ``at`` if you only need to
get a single value in a DataFrame or Series.
.. note:: Unlike pandas, Koalas only allows using ``at`` to get values but not to set them.
.. note:: Warning: If ``row_index`` matches a lot of rows, large amounts of data will be
fetched, potentially causing your machine to run out of memory.
Raises
------
KeyError
When label does not exist in DataFrame
Examples
--------
>>> kdf = ks.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... index=[4, 5, 5], columns=['A', 'B', 'C'])
>>> kdf
A B C
4 0 2 3
5 0 4 1
5 10 20 30
Get value at specified row/column pair
>>> kdf.at[4, 'B']
2
Get array if an index occurs multiple times
>>> kdf.at[5, 'B']
array([ 4, 20])
"""
def __getitem__(self, key) -> Union["Series", "DataFrame", Scalar]:
if self._is_df:
if not isinstance(key, tuple) or len(key) != 2:
raise TypeError("Use DataFrame.at like .at[row_index, column_name]")
row_sel, col_sel = key
else:
assert self._is_series, type(self._kdf_or_kser)
if isinstance(key, tuple) and len(key) != 1:
raise TypeError("Use Series.at like .at[row_index]")
row_sel = key
col_sel = self._kdf_or_kser._column_label
if self._internal.index_level == 1:
if not is_name_like_value(row_sel, allow_none=False, allow_tuple=False):
raise ValueError("At based indexing on a single index can only have a single value")
row_sel = (row_sel,)
else:
if not is_name_like_tuple(row_sel, allow_none=False):
raise ValueError("At based indexing on multi-index can only have tuple values")
if col_sel is not None:
if not is_name_like_value(col_sel, allow_none=False):
raise ValueError("At based indexing on multi-index can only have tuple values")
if not is_name_like_tuple(col_sel):
col_sel = (col_sel,)
cond = reduce(
lambda x, y: x & y,
[scol == row for scol, row in zip(self._internal.index_spark_columns, row_sel)],
)
pdf = (
self._internal.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
.filter(cond)
.select(self._internal.spark_column_for(col_sel))
.toPandas()
)
if len(pdf) < 1:
raise KeyError(name_like_string(row_sel))
values = pdf.iloc[:, 0].values
return (
values if (len(row_sel) < self._internal.index_level or len(values) > 1) else values[0]
)
class iAtIndexer(IndexerLike):
"""
Access a single value for a row/column pair by integer position.
Similar to ``iloc``, in that both provide integer-based lookups. Use
``iat`` if you only need to get or set a single value in a DataFrame
or Series.
Raises
------
KeyError
When label does not exist in DataFrame
Examples
--------
>>> df = ks.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 0 2 3
1 0 4 1
2 10 20 30
Get value at specified row/column pair
>>> df.iat[1, 2]
1
Get value within a series
>>> kser = ks.Series([1, 2, 3], index=[10, 20, 30])
>>> kser
10 1
20 2
30 3
dtype: int64
>>> kser.iat[1]
2
"""
def __getitem__(self, key) -> Union["Series", "DataFrame", Scalar]:
if self._is_df:
if not isinstance(key, tuple) or len(key) != 2:
raise TypeError(
"Use DataFrame.iat like .iat[row_integer_position, column_integer_position]"
)
row_sel, col_sel = key
if not isinstance(row_sel, int) or not isinstance(col_sel, int):
raise ValueError("iAt based indexing can only have integer indexers")
return self._kdf_or_kser.iloc[row_sel, col_sel]
else:
assert self._is_series, type(self._kdf_or_kser)
if not isinstance(key, int) and len(key) != 1:
raise TypeError("Use Series.iat like .iat[row_integer_position]")
if not isinstance(key, int):
raise ValueError("iAt based indexing can only have integer indexers")
return self._kdf_or_kser.iloc[key]
class LocIndexerLike(IndexerLike, metaclass=ABCMeta):
def _select_rows(
self, rows_sel: Any
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
"""
Dispatch the logic for select rows to more specific methods by `rows_sel` argument types.
Parameters
----------
rows_sel : the key specified to select rows.
Returns
-------
Tuple of Spark column, int, int:
* The Spark column for the condition to filter the rows.
* The number of rows when the selection can be simplified by limit.
* The remaining index rows if the result index size is shrunk.
"""
from databricks.koalas.series import Series
if rows_sel is None:
return None, None, None
elif isinstance(rows_sel, Series):
return self._select_rows_by_series(rows_sel)
elif isinstance(rows_sel, spark.Column):
return self._select_rows_by_spark_column(rows_sel)
elif isinstance(rows_sel, slice):
if rows_sel == slice(None):
# If slice is None - select everything, so nothing to do
return None, None, None
return self._select_rows_by_slice(rows_sel)
elif isinstance(rows_sel, tuple):
return self._select_rows_else(rows_sel)
elif is_list_like(rows_sel):
return self._select_rows_by_iterable(rows_sel)
else:
return self._select_rows_else(rows_sel)
def _select_cols(
self, cols_sel: Any, missing_keys: Optional[List[Tuple]] = None
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
"""
Dispatch the logic for select columns to more specific methods by `cols_sel` argument types.
Parameters
----------
cols_sel : the key specified to select columns.
Returns
-------
Tuple of list of column label, list of Spark columns, list of dtypes, bool:
* The column labels selected.
* The Spark columns selected.
* The dtypes selected.
* The boolean value whether Series should be returned or not.
* The Series name if needed.
"""
from databricks.koalas.series import Series
if cols_sel is None:
column_labels = self._internal.column_labels
data_spark_columns = self._internal.data_spark_columns
data_dtypes = self._internal.data_dtypes
return column_labels, data_spark_columns, data_dtypes, False, None
elif isinstance(cols_sel, Series):
return self._select_cols_by_series(cols_sel, missing_keys)
elif isinstance(cols_sel, spark.Column):
return self._select_cols_by_spark_column(cols_sel, missing_keys)
elif isinstance(cols_sel, slice):
if cols_sel == slice(None):
# If slice is None - select everything, so nothing to do
column_labels = self._internal.column_labels
data_spark_columns = self._internal.data_spark_columns
data_dtypes = self._internal.data_dtypes
return column_labels, data_spark_columns, data_dtypes, False, None
return self._select_cols_by_slice(cols_sel, missing_keys)
elif isinstance(cols_sel, tuple):
return self._select_cols_else(cols_sel, missing_keys)
elif is_list_like(cols_sel):
return self._select_cols_by_iterable(cols_sel, missing_keys)
else:
return self._select_cols_else(cols_sel, missing_keys)
# Methods for row selection
@abstractmethod
def _select_rows_by_series(
self, rows_sel: "Series"
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
""" Select rows by `Series` type key. """
pass
@abstractmethod
def _select_rows_by_spark_column(
self, rows_sel: spark.column
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
""" Select rows by Spark `Column` type key. """
pass
@abstractmethod
def _select_rows_by_slice(
self, rows_sel: slice
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
""" Select rows by `slice` type key. """
pass
@abstractmethod
def _select_rows_by_iterable(
self, rows_sel: Iterable
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
""" Select rows by `Iterable` type key. """
pass
@abstractmethod
def _select_rows_else(
self, rows_sel: Any
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
""" Select rows by other type key. """
pass
# Methods for col selection
@abstractmethod
def _select_cols_by_series(
self, cols_sel: "Series", missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
""" Select columns by `Series` type key. """
pass
@abstractmethod
def _select_cols_by_spark_column(
self, cols_sel: spark.Column, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
""" Select columns by Spark `Column` type key. """
pass
@abstractmethod
def _select_cols_by_slice(
self, cols_sel: slice, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
""" Select columns by `slice` type key. """
pass
@abstractmethod
def _select_cols_by_iterable(
self, cols_sel: Iterable, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
""" Select columns by `Iterable` type key. """
pass
@abstractmethod
def _select_cols_else(
self, cols_sel: Any, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
""" Select columns by other type key. """
pass
def __getitem__(self, key) -> Union["Series", "DataFrame"]:
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series, first_series
if self._is_series:
if isinstance(key, Series) and not same_anchor(key, self._kdf_or_kser):
kdf = self._kdf_or_kser.to_frame()
temp_col = verify_temp_column_name(kdf, "__temp_col__")
kdf[temp_col] = key
return type(self)(kdf[self._kdf_or_kser.name])[kdf[temp_col]]
cond, limit, remaining_index = self._select_rows(key)
if cond is None and limit is None:
return self._kdf_or_kser
column_label = self._kdf_or_kser._column_label
column_labels = [column_label]
data_spark_columns = [self._internal.spark_column_for(column_label)]
data_dtypes = [self._internal.dtype_for(column_label)]
returns_series = True
series_name = self._kdf_or_kser.name
else:
assert self._is_df
if isinstance(key, tuple):
if len(key) != 2:
raise SparkPandasIndexingError("Only accepts pairs of candidates")
rows_sel, cols_sel = key
else:
rows_sel = key
cols_sel = None
if isinstance(rows_sel, Series) and not same_anchor(rows_sel, self._kdf_or_kser):
kdf = self._kdf_or_kser.copy()
temp_col = verify_temp_column_name(kdf, "__temp_col__")
kdf[temp_col] = rows_sel
return type(self)(kdf)[kdf[temp_col], cols_sel][list(self._kdf_or_kser.columns)]
cond, limit, remaining_index = self._select_rows(rows_sel)
(
column_labels,
data_spark_columns,
data_dtypes,
returns_series,
series_name,
) = self._select_cols(cols_sel)
if cond is None and limit is None and returns_series:
kser = self._kdf_or_kser._kser_for(column_labels[0])
if series_name is not None and series_name != kser.name:
kser = kser.rename(series_name)
return kser
if remaining_index is not None:
index_spark_columns = self._internal.index_spark_columns[-remaining_index:]
index_names = self._internal.index_names[-remaining_index:]
index_dtypes = self._internal.index_dtypes[-remaining_index:]
else:
index_spark_columns = self._internal.index_spark_columns
index_names = self._internal.index_names
index_dtypes = self._internal.index_dtypes
if len(column_labels) > 0:
column_labels = column_labels.copy()
column_labels_level = max(
len(label) if label is not None else 1 for label in column_labels
)
none_column = 0
for i, label in enumerate(column_labels):
if label is None:
label = (none_column,)
none_column += 1
if len(label) < column_labels_level:
label = tuple(list(label) + ([""]) * (column_labels_level - len(label)))
column_labels[i] = label
if i == 0 and none_column == 1:
column_labels = [None]
column_label_names = self._internal.column_label_names[-column_labels_level:]
else:
column_label_names = self._internal.column_label_names
try:
sdf = self._internal.spark_frame
if cond is not None:
index_columns = sdf.select(index_spark_columns).columns
data_columns = sdf.select(data_spark_columns).columns
sdf = sdf.filter(cond).select(index_spark_columns + data_spark_columns)
index_spark_columns = [scol_for(sdf, col) for col in index_columns]
data_spark_columns = [scol_for(sdf, col) for col in data_columns]
if limit is not None:
if limit >= 0:
sdf = sdf.limit(limit)
else:
sdf = sdf.limit(sdf.count() + limit)
sdf = sdf.drop(NATURAL_ORDER_COLUMN_NAME)
except AnalysisException:
raise KeyError(
"[{}] don't exist in columns".format(
[col._jc.toString() for col in data_spark_columns]
)
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
index_dtypes=index_dtypes,
column_labels=column_labels,
data_spark_columns=data_spark_columns,
data_dtypes=data_dtypes,
column_label_names=column_label_names,
)
kdf = DataFrame(internal)
if returns_series:
kdf_or_kser = first_series(kdf)
if series_name is not None and series_name != kdf_or_kser.name:
kdf_or_kser = kdf_or_kser.rename(series_name)
else:
kdf_or_kser = kdf
if remaining_index is not None and remaining_index == 0:
pdf_or_pser = kdf_or_kser.head(2).to_pandas()
length = len(pdf_or_pser)
if length == 0:
raise KeyError(name_like_string(key))
elif length == 1:
return pdf_or_pser.iloc[0]
else:
return kdf_or_kser
else:
return kdf_or_kser
def __setitem__(self, key, value):
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series, first_series
if self._is_series:
if (
isinstance(key, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(key, self._kdf_or_kser))
) or (
isinstance(value, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(value, self._kdf_or_kser))
):
if self._kdf_or_kser.name is None:
kdf = self._kdf_or_kser.to_frame()
column_label = kdf._internal.column_labels[0]
else:
kdf = self._kdf_or_kser._kdf.copy()
column_label = self._kdf_or_kser._column_label
temp_natural_order = verify_temp_column_name(kdf, "__temp_natural_order__")
temp_key_col = verify_temp_column_name(kdf, "__temp_key_col__")
temp_value_col = verify_temp_column_name(kdf, "__temp_value_col__")
kdf[temp_natural_order] = F.monotonically_increasing_id()
if isinstance(key, Series):
kdf[temp_key_col] = key
if isinstance(value, Series):
kdf[temp_value_col] = value
kdf = kdf.sort_values(temp_natural_order).drop(temp_natural_order)
kser = kdf._kser_for(column_label)
if isinstance(key, Series):
key = F.col(
"`{}`".format(kdf[temp_key_col]._internal.data_spark_column_names[0])
)
if isinstance(value, Series):
value = F.col(
"`{}`".format(kdf[temp_value_col]._internal.data_spark_column_names[0])
)
type(self)(kser)[key] = value
if self._kdf_or_kser.name is None:
kser = kser.rename()
self._kdf_or_kser._kdf._update_internal_frame(
kser._kdf[
self._kdf_or_kser._kdf._internal.column_labels
]._internal.resolved_copy,
requires_same_anchor=False,
)
return
if isinstance(value, DataFrame):
raise ValueError("Incompatible indexer with DataFrame")
cond, limit, remaining_index = self._select_rows(key)
if cond is None:
cond = F.lit(True)
if limit is not None:
cond = cond & (self._internal.spark_frame[self._sequence_col] < F.lit(limit))
if isinstance(value, (Series, spark.Column)):
if remaining_index is not None and remaining_index == 0:
raise ValueError(
"No axis named {} for object type {}".format(key, type(value).__name__)
)
if isinstance(value, Series):
value = value.spark.column
else:
value = F.lit(value)
scol = (
F.when(cond, value)
.otherwise(self._internal.spark_column_for(self._kdf_or_kser._column_label))
.alias(name_like_string(self._kdf_or_kser.name or SPARK_DEFAULT_SERIES_NAME))
)
internal = self._internal.with_new_spark_column(
self._kdf_or_kser._column_label, scol # TODO: dtype?
)
self._kdf_or_kser._kdf._update_internal_frame(internal, requires_same_anchor=False)
else:
assert self._is_df
if isinstance(key, tuple):
if len(key) != 2:
raise SparkPandasIndexingError("Only accepts pairs of candidates")
rows_sel, cols_sel = key
else:
rows_sel = key
cols_sel = None
if isinstance(value, DataFrame):
if len(value.columns) == 1:
value = first_series(value)
else:
raise ValueError("Only a dataframe with one column can be assigned")
if (
isinstance(rows_sel, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(rows_sel, self._kdf_or_kser))
) or (
isinstance(value, Series)
and (isinstance(self, iLocIndexer) or not same_anchor(value, self._kdf_or_kser))
):
kdf = self._kdf_or_kser.copy()
temp_natural_order = verify_temp_column_name(kdf, "__temp_natural_order__")
temp_key_col = verify_temp_column_name(kdf, "__temp_key_col__")
temp_value_col = verify_temp_column_name(kdf, "__temp_value_col__")
kdf[temp_natural_order] = F.monotonically_increasing_id()
if isinstance(rows_sel, Series):
kdf[temp_key_col] = rows_sel
if isinstance(value, Series):
kdf[temp_value_col] = value
kdf = kdf.sort_values(temp_natural_order).drop(temp_natural_order)
if isinstance(rows_sel, Series):
rows_sel = F.col(
"`{}`".format(kdf[temp_key_col]._internal.data_spark_column_names[0])
)
if isinstance(value, Series):
value = F.col(
"`{}`".format(kdf[temp_value_col]._internal.data_spark_column_names[0])
)
type(self)(kdf)[rows_sel, cols_sel] = value
self._kdf_or_kser._update_internal_frame(
kdf[list(self._kdf_or_kser.columns)]._internal.resolved_copy,
requires_same_anchor=False,
)
return
cond, limit, remaining_index = self._select_rows(rows_sel)
missing_keys = []
_, data_spark_columns, _, _, _ = self._select_cols(cols_sel, missing_keys=missing_keys)
if cond is None:
cond = F.lit(True)
if limit is not None:
cond = cond & (self._internal.spark_frame[self._sequence_col] < F.lit(limit))
if isinstance(value, (Series, spark.Column)):
if remaining_index is not None and remaining_index == 0:
raise ValueError("Incompatible indexer with Series")
if len(data_spark_columns) > 1:
raise ValueError("shape mismatch")
if isinstance(value, Series):
value = value.spark.column
else:
value = F.lit(value)
new_data_spark_columns = []
new_dtypes = []
for new_scol, spark_column_name, new_dtype in zip(
self._internal.data_spark_columns,
self._internal.data_spark_column_names,
self._internal.data_dtypes,
):
for scol in data_spark_columns:
if new_scol._jc.equals(scol._jc):
new_scol = F.when(cond, value).otherwise(scol).alias(spark_column_name)
new_dtype = spark_type_to_pandas_dtype(
self._internal.spark_frame.select(new_scol).schema[0].dataType,
use_extension_dtypes=isinstance(new_dtype, extension_dtypes),
)
break
new_data_spark_columns.append(new_scol)
new_dtypes.append(new_dtype)
column_labels = self._internal.column_labels.copy()
for label in missing_keys:
if not is_name_like_tuple(label):
label = (label,)
if len(label) < self._internal.column_labels_level:
label = tuple(
list(label) + ([""] * (self._internal.column_labels_level - len(label)))
)
elif len(label) > self._internal.column_labels_level:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(label), self._internal.column_labels_level
)
)
column_labels.append(label)
new_data_spark_columns.append(F.when(cond, value).alias(name_like_string(label)))
new_dtypes.append(None)
internal = self._internal.with_new_columns(
new_data_spark_columns, column_labels=column_labels, data_dtypes=new_dtypes
)
self._kdf_or_kser._update_internal_frame(internal, requires_same_anchor=False)
class LocIndexer(LocIndexerLike):
"""
Access a group of rows and columns by label(s) or a boolean Series.
``.loc[]`` is primarily label based, but may also be used with a
conditional boolean Series derived from the DataFrame or Series.
Allowed inputs are:
- A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is
interpreted as a *label* of the index, and **never** as an
integer position along the index) for column selection.
- A list or array of labels, e.g. ``['a', 'b', 'c']``.
- A slice object with labels, e.g. ``'a':'f'``.
- A conditional boolean Series derived from the DataFrame or Series
- A boolean array of the same length as the column axis being sliced,
e.g. ``[True, False, True]``.
- An alignable boolean pandas Series to the column axis being sliced.
The index of the key will be aligned before masking.
Not allowed inputs which pandas allows are:
- A boolean array of the same length as the row axis being sliced,
e.g. ``[True, False, True]``.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above)
.. note:: MultiIndex is not supported yet.
.. note:: Note that contrary to usual python slices, **both** the
start and the stop are included, and the step of the slice is not allowed.
.. note:: With a list or array of labels for row selection,
Koalas behaves as a filter without reordering by the labels.
See Also
--------
Series.loc : Access group of values using labels.
Examples
--------
**Getting values**
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
Single label. Note this returns the row as a Series.
>>> df.loc['viper']
max_speed 4
shield 5
Name: viper, dtype: int64
List of labels. Note using ``[[]]`` returns a DataFrame.
Also note that Koalas behaves just a filter without reordering by the labels.
>>> df.loc[['viper', 'sidewinder']]
max_speed shield
viper 4 5
sidewinder 7 8
>>> df.loc[['sidewinder', 'viper']]
max_speed shield
viper 4 5
sidewinder 7 8
Single label for column.
>>> df.loc['cobra', 'shield']
2
List of labels for row.
>>> df.loc[['cobra'], 'shield']
cobra 2
Name: shield, dtype: int64
List of labels for column.
>>> df.loc['cobra', ['shield']]
shield 2
Name: cobra, dtype: int64
List of labels for both row and column.
>>> df.loc[['cobra'], ['shield']]
shield
cobra 2
Slice with labels for row and single label for column. As mentioned
above, note that both the start and stop of the slice are included.
>>> df.loc['cobra':'viper', 'max_speed']
cobra 1
viper 4
Name: max_speed, dtype: int64
Conditional that returns a boolean Series
>>> df.loc[df['shield'] > 6]
max_speed shield
sidewinder 7 8
Conditional that returns a boolean Series with column labels specified
>>> df.loc[df['shield'] > 6, ['max_speed']]
max_speed
sidewinder 7
A boolean array of the same length as the column axis being sliced.
>>> df.loc[:, [False, True]]
shield
cobra 2
viper 5
sidewinder 8
An alignable boolean Series to the column axis being sliced.
>>> df.loc[:, pd.Series([False, True], index=['max_speed', 'shield'])]
shield
cobra 2
viper 5
sidewinder 8
**Setting values**
Setting value for all items matching the list of labels.
>>> df.loc[['viper', 'sidewinder'], ['shield']] = 50
>>> df
max_speed shield
cobra 1 2
viper 4 50
sidewinder 7 50
Setting value for an entire row
>>> df.loc['cobra'] = 10
>>> df
max_speed shield
cobra 10 10
viper 4 50
sidewinder 7 50
Set value for an entire column
>>> df.loc[:, 'max_speed'] = 30
>>> df
max_speed shield
cobra 30 10
viper 30 50
sidewinder 30 50
Set value for an entire list of columns
>>> df.loc[:, ['max_speed', 'shield']] = 100
>>> df
max_speed shield
cobra 100 100
viper 100 100
sidewinder 100 100
Set value with Series
>>> df.loc[:, 'shield'] = df['shield'] * 2
>>> df
max_speed shield
cobra 100 200
viper 100 200
sidewinder 100 200
**Getting values on a DataFrame with an index that has integer labels**
Another example using integers for the index
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=[7, 8, 9],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
7 1 2
8 4 5
9 7 8
Slice with integer labels for rows. As mentioned above, note that both
the start and stop of the slice are included.
>>> df.loc[7:9]
max_speed shield
7 1 2
8 4 5
9 7 8
"""
@staticmethod
def _NotImplemented(description):
return SparkPandasNotImplementedError(
description=description,
pandas_function=".loc[..., ...]",
spark_target_function="select, where",
)
def _select_rows_by_series(
self, rows_sel: "Series"
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
assert isinstance(rows_sel.spark.data_type, BooleanType), rows_sel.spark.data_type
return rows_sel.spark.column, None, None
def _select_rows_by_spark_column(
self, rows_sel: spark.Column
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
spark_type = self._internal.spark_frame.select(rows_sel).schema[0].dataType
assert isinstance(spark_type, BooleanType), spark_type
return rows_sel, None, None
def _select_rows_by_slice(
self, rows_sel: slice
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
from databricks.koalas.indexes import MultiIndex
if rows_sel.step is not None:
raise LocIndexer._NotImplemented("Cannot use step with Spark.")
elif self._internal.index_level == 1:
sdf = self._internal.spark_frame
index = self._kdf_or_kser.index
index_column = index.to_series()
index_data_type = index_column.spark.data_type
start = rows_sel.start
stop = rows_sel.stop
# get natural order from '__natural_order__' from start to stop
# to keep natural order.
start_and_stop = (
sdf.select(index_column.spark.column, NATURAL_ORDER_COLUMN_NAME)
.where(
(index_column.spark.column == F.lit(start).cast(index_data_type))
| (index_column.spark.column == F.lit(stop).cast(index_data_type))
)
.collect()
)
start = [row[1] for row in start_and_stop if row[0] == start]
start = start[0] if len(start) > 0 else None
stop = [row[1] for row in start_and_stop if row[0] == stop]
stop = stop[-1] if len(stop) > 0 else None
cond = []
if start is not None:
cond.append(F.col(NATURAL_ORDER_COLUMN_NAME) >= F.lit(start).cast(LongType()))
if stop is not None:
cond.append(F.col(NATURAL_ORDER_COLUMN_NAME) <= F.lit(stop).cast(LongType()))
# if index order is not monotonic increasing or decreasing
# and specified values don't exist in index, raise KeyError
if (start is None and rows_sel.start is not None) or (
stop is None and rows_sel.stop is not None
):
inc = index_column.is_monotonic_increasing
if inc is False:
dec = index_column.is_monotonic_decreasing
if start is None and rows_sel.start is not None:
start = rows_sel.start
if inc is not False:
cond.append(index_column.spark.column >= F.lit(start).cast(index_data_type))
elif dec is not False:
cond.append(index_column.spark.column <= F.lit(start).cast(index_data_type))
else:
raise KeyError(rows_sel.start)
if stop is None and rows_sel.stop is not None:
stop = rows_sel.stop
if inc is not False:
cond.append(index_column.spark.column <= F.lit(stop).cast(index_data_type))
elif dec is not False:
cond.append(index_column.spark.column >= F.lit(stop).cast(index_data_type))
else:
raise KeyError(rows_sel.stop)
return reduce(lambda x, y: x & y, cond), None, None
else:
index = self._kdf_or_kser.index
index_data_type = [f.dataType for f in index.to_series().spark.data_type]
start = rows_sel.start
if start is not None:
if not isinstance(start, tuple):
start = (start,)
if len(start) == 0:
start = None
stop = rows_sel.stop
if stop is not None:
if not isinstance(stop, tuple):
stop = (stop,)
if len(stop) == 0:
stop = None
depth = max(
len(start) if start is not None else 0, len(stop) if stop is not None else 0
)
if depth == 0:
return None, None, None
elif (
depth > self._internal.index_level
or not index.droplevel(list(range(self._internal.index_level)[depth:])).is_monotonic
):
raise KeyError(
"Key length ({}) was greater than MultiIndex sort depth".format(depth)
)
conds = [] # type: List[spark.Column]
if start is not None:
cond = F.lit(True)
for scol, value, dt in list(
zip(self._internal.index_spark_columns, start, index_data_type)
)[::-1]:
compare = MultiIndex._comparator_for_monotonic_increasing(dt)
cond = F.when(scol.eqNullSafe(F.lit(value).cast(dt)), cond).otherwise(
compare(scol, F.lit(value).cast(dt), spark.Column.__gt__)
)
conds.append(cond)
if stop is not None:
cond = F.lit(True)
for scol, value, dt in list(
zip(self._internal.index_spark_columns, stop, index_data_type)
)[::-1]:
compare = MultiIndex._comparator_for_monotonic_increasing(dt)
cond = F.when(scol.eqNullSafe(F.lit(value).cast(dt)), cond).otherwise(
compare(scol, F.lit(value).cast(dt), spark.Column.__lt__)
)
conds.append(cond)
return reduce(lambda x, y: x & y, conds), None, None
def _select_rows_by_iterable(
self, rows_sel: Iterable
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
rows_sel = list(rows_sel)
if len(rows_sel) == 0:
return F.lit(False), None, None
elif self._internal.index_level == 1:
index_column = self._kdf_or_kser.index.to_series()
index_data_type = index_column.spark.data_type
if len(rows_sel) == 1:
return (
index_column.spark.column == F.lit(rows_sel[0]).cast(index_data_type),
None,
None,
)
else:
return (
index_column.spark.column.isin(
[F.lit(r).cast(index_data_type) for r in rows_sel]
),
None,
None,
)
else:
raise LocIndexer._NotImplemented("Cannot select with MultiIndex with Spark.")
def _select_rows_else(
self, rows_sel: Any
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
if not isinstance(rows_sel, tuple):
rows_sel = (rows_sel,)
if len(rows_sel) > self._internal.index_level:
raise SparkPandasIndexingError("Too many indexers")
rows = [scol == value for scol, value in zip(self._internal.index_spark_columns, rows_sel)]
return (
reduce(lambda x, y: x & y, rows),
None,
self._internal.index_level - len(rows_sel),
)
def _get_from_multiindex_column(
self, key, missing_keys, labels=None, recursed=0
) -> Tuple[List[Tuple], Optional[List[spark.Column]], Any, bool, Optional[Tuple]]:
""" Select columns from multi-index columns. """
assert isinstance(key, tuple)
if labels is None:
labels = [(label, label) for label in self._internal.column_labels]
for k in key:
labels = [
(label, None if lbl is None else lbl[1:])
for label, lbl in labels
if (lbl is None and k is None) or (lbl is not None and lbl[0] == k)
]
if len(labels) == 0:
if missing_keys is None:
raise KeyError(k)
else:
missing_keys.append(key)
return [], [], [], False, None
if all(lbl is not None and len(lbl) > 0 and lbl[0] == "" for _, lbl in labels):
# If the head is '', drill down recursively.
labels = [(label, tuple([str(key), *lbl[1:]])) for i, (label, lbl) in enumerate(labels)]
return self._get_from_multiindex_column((str(key),), missing_keys, labels, recursed + 1)
else:
returns_series = all(lbl is None or len(lbl) == 0 for _, lbl in labels)
if returns_series:
labels = set(label for label, _ in labels)
assert len(labels) == 1
label = list(labels)[0]
column_labels = [label]
data_spark_columns = [self._internal.spark_column_for(label)]
data_dtypes = [self._internal.dtype_for(label)]
if label is None:
series_name = None
else:
if recursed > 0:
label = label[:-recursed]
series_name = label if len(label) > 1 else label[0]
else:
column_labels = [
None if lbl is None or lbl == (None,) else lbl for _, lbl in labels
]
data_spark_columns = [self._internal.spark_column_for(label) for label, _ in labels]
data_dtypes = [self._internal.dtype_for(label) for label, _ in labels]
series_name = None
return column_labels, data_spark_columns, data_dtypes, returns_series, series_name
def _select_cols_by_series(
self, cols_sel: "Series", missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
column_labels = [cols_sel._column_label]
data_spark_columns = [cols_sel.spark.column]
data_dtypes = [cols_sel.dtype]
return column_labels, data_spark_columns, data_dtypes, True, None
def _select_cols_by_spark_column(
self, cols_sel: spark.Column, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
column_labels = [
(self._internal.spark_frame.select(cols_sel).columns[0],)
] # type: List[Tuple]
data_spark_columns = [cols_sel]
return column_labels, data_spark_columns, None, True, None
def _select_cols_by_slice(
self, cols_sel: slice, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
start, stop = self._kdf_or_kser.columns.slice_locs(start=cols_sel.start, end=cols_sel.stop)
column_labels = self._internal.column_labels[start:stop]
data_spark_columns = self._internal.data_spark_columns[start:stop]
data_dtypes = self._internal.data_dtypes[start:stop]
return column_labels, data_spark_columns, data_dtypes, False, None
def _select_cols_by_iterable(
self, cols_sel: Iterable, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
from databricks.koalas.series import Series
if all(isinstance(key, Series) for key in cols_sel):
column_labels = [key._column_label for key in cols_sel]
data_spark_columns = [key.spark.column for key in cols_sel]
data_dtypes = [key.dtype for key in cols_sel]
elif all(isinstance(key, spark.Column) for key in cols_sel):
column_labels = [
(self._internal.spark_frame.select(col).columns[0],) for col in cols_sel
]
data_spark_columns = list(cols_sel)
data_dtypes = None
elif all(isinstance(key, bool) for key in cols_sel) or all(
isinstance(key, np.bool_) for key in cols_sel
):
if len(cast(Sized, cols_sel)) != len(self._internal.column_labels):
raise IndexError(
"Boolean index has wrong length: %s instead of %s"
% (len(cast(Sized, cols_sel)), len(self._internal.column_labels))
)
if isinstance(cols_sel, pd.Series):
if not cols_sel.index.sort_values().equals(self._kdf.columns.sort_values()):
raise SparkPandasIndexingError(
"Unalignable boolean Series provided as indexer "
"(index of the boolean Series and of the indexed object do not match)"
)
else:
column_labels = [
column_label
for column_label in self._internal.column_labels
if cols_sel[column_label if len(column_label) > 1 else column_label[0]]
]
data_spark_columns = [
self._internal.spark_column_for(column_label)
for column_label in column_labels
]
data_dtypes = [
self._internal.dtype_for(column_label) for column_label in column_labels
]
else:
column_labels = [
self._internal.column_labels[i] for i, col in enumerate(cols_sel) if col
]
data_spark_columns = [
self._internal.data_spark_columns[i] for i, col in enumerate(cols_sel) if col
]
data_dtypes = [
self._internal.data_dtypes[i] for i, col in enumerate(cols_sel) if col
]
elif any(isinstance(key, tuple) for key in cols_sel) and any(
not is_name_like_tuple(key) for key in cols_sel
):
raise TypeError(
"Expected tuple, got {}".format(
type(set(key for key in cols_sel if not is_name_like_tuple(key)).pop())
)
)
else:
if missing_keys is None and all(isinstance(key, tuple) for key in cols_sel):
level = self._internal.column_labels_level
if any(len(key) != level for key in cols_sel):
raise ValueError("All the key level should be the same as column index level.")
column_labels = []
data_spark_columns = []
data_dtypes = []
for key in cols_sel:
found = False
for label in self._internal.column_labels:
if label == key or label[0] == key:
column_labels.append(label)
data_spark_columns.append(self._internal.spark_column_for(label))
data_dtypes.append(self._internal.dtype_for(label))
found = True
if not found:
if missing_keys is None:
raise KeyError("['{}'] not in index".format(name_like_string(key)))
else:
missing_keys.append(key)
return column_labels, data_spark_columns, data_dtypes, False, None
def _select_cols_else(
self, cols_sel: Any, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
if not is_name_like_tuple(cols_sel):
cols_sel = (cols_sel,)
return self._get_from_multiindex_column(cols_sel, missing_keys)
class iLocIndexer(LocIndexerLike):
"""
Purely integer-location based indexing for selection by position.
``.iloc[]`` is primarily integer position based (from ``0`` to
``length-1`` of the axis), but may also be used with a conditional boolean Series.
Allowed inputs are:
- An integer for column selection, e.g. ``5``.
- A list or array of integers for row selection with distinct index values,
e.g. ``[3, 4, 0]``
- A list or array of integers for column selection, e.g. ``[4, 3, 0]``.
- A boolean array for column selection.
- A slice object with ints for row and column selection, e.g. ``1:7``.
Not allowed inputs which pandas allows are:
- A list or array of integers for row selection with duplicated indexes,
e.g. ``[4, 4, 0]``.
- A boolean array for row selection.
- A ``callable`` function with one argument (the calling Series, DataFrame
or Panel) and that returns valid output for indexing (one of the above).
This is useful in method chains, when you don't have a reference to the
calling object, but would like to base your selection on some value.
``.iloc`` will raise ``IndexError`` if a requested indexer is
out-of-bounds, except *slice* indexers which allow out-of-bounds
indexing (this conforms with python/numpy *slice* semantics).
See Also
--------
DataFrame.loc : Purely label-location based indexer for selection by label.
Series.iloc : Purely integer-location based indexing for
selection by position.
Examples
--------
>>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
... {'a': 100, 'b': 200, 'c': 300, 'd': 400},
... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
>>> df = ks.DataFrame(mydict, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing just the rows**
A scalar integer for row selection.
>>> df.iloc[1]
a 100
b 200
c 300
d 400
Name: 1, dtype: int64
>>> df.iloc[[0]]
a b c d
0 1 2 3 4
With a `slice` object.
>>> df.iloc[:3]
a b c d
0 1 2 3 4
1 100 200 300 400
2 1000 2000 3000 4000
**Indexing both axes**
You can mix the indexer types for the index and columns. Use ``:`` to
select the entire axis.
With scalar integers.
>>> df.iloc[:1, 1]
0 2
Name: b, dtype: int64
With lists of integers.
>>> df.iloc[:2, [1, 3]]
b d
0 2 4
1 200 400
With `slice` objects.
>>> df.iloc[:2, 0:3]
a b c
0 1 2 3
1 100 200 300
With a boolean array whose length matches the columns.
>>> df.iloc[:, [True, False, True, False]]
a c
0 1 3
1 100 300
2 1000 3000
**Setting values**
Setting value for all items matching the list of labels.
>>> df.iloc[[1, 2], [1]] = 50
>>> df
a b c d
0 1 2 3 4
1 100 50 300 400
2 1000 50 3000 4000
Setting value for an entire row
>>> df.iloc[0] = 10
>>> df
a b c d
0 10 10 10 10
1 100 50 300 400
2 1000 50 3000 4000
Set value for an entire column
>>> df.iloc[:, 2] = 30
>>> df
a b c d
0 10 10 30 10
1 100 50 30 400
2 1000 50 30 4000
Set value for an entire list of columns
>>> df.iloc[:, [2, 3]] = 100
>>> df
a b c d
0 10 10 100 100
1 100 50 100 100
2 1000 50 100 100
Set value with Series
>>> df.iloc[:, 3] = df.iloc[:, 3] * 2
>>> df
a b c d
0 10 10 100 200
1 100 50 100 200
2 1000 50 100 200
"""
@staticmethod
def _NotImplemented(description):
return SparkPandasNotImplementedError(
description=description,
pandas_function=".iloc[..., ...]",
spark_target_function="select, where",
)
@lazy_property
def _internal(self):
# Use resolved_copy to fix the natural order.
internal = super()._internal.resolved_copy
sdf = InternalFrame.attach_distributed_sequence_column(
internal.spark_frame, column_name=self._sequence_col
)
return internal.with_new_sdf(spark_frame=sdf.orderBy(NATURAL_ORDER_COLUMN_NAME))
@lazy_property
def _sequence_col(self):
# Use resolved_copy to fix the natural order.
internal = super()._internal.resolved_copy
return verify_temp_column_name(internal.spark_frame, "__distributed_sequence_column__")
def _select_rows_by_series(
self, rows_sel: "Series"
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
raise iLocIndexer._NotImplemented(
".iloc requires numeric slice, conditional "
"boolean Index or a sequence of positions as int, "
"got {}".format(type(rows_sel))
)
def _select_rows_by_spark_column(
self, rows_sel: spark.column
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
raise iLocIndexer._NotImplemented(
".iloc requires numeric slice, conditional "
"boolean Index or a sequence of positions as int, "
"got {}".format(type(rows_sel))
)
def _select_rows_by_slice(
self, rows_sel: slice
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
def verify_type(i):
if not isinstance(i, int):
raise TypeError(
"cannot do slice indexing with these indexers [{}] of {}".format(i, type(i))
)
has_negative = False
start = rows_sel.start
if start is not None:
verify_type(start)
if start == 0:
start = None
elif start < 0:
has_negative = True
stop = rows_sel.stop
if stop is not None:
verify_type(stop)
if stop < 0:
has_negative = True
step = rows_sel.step
if step is not None:
verify_type(step)
if step == 0:
raise ValueError("slice step cannot be zero")
else:
step = 1
if start is None and step == 1:
return None, stop, None
sdf = self._internal.spark_frame
sequence_scol = sdf[self._sequence_col]
if has_negative or (step < 0 and start is None):
cnt = sdf.count()
cond = []
if start is not None:
if start < 0:
start = start + cnt
if step >= 0:
cond.append(sequence_scol >= F.lit(start).cast(LongType()))
else:
cond.append(sequence_scol <= F.lit(start).cast(LongType()))
if stop is not None:
if stop < 0:
stop = stop + cnt
if step >= 0:
cond.append(sequence_scol < F.lit(stop).cast(LongType()))
else:
cond.append(sequence_scol > F.lit(stop).cast(LongType()))
if step != 1:
if step > 0:
start = start or 0
else:
start = start or (cnt - 1)
cond.append(((sequence_scol - start) % F.lit(step).cast(LongType())) == F.lit(0))
return reduce(lambda x, y: x & y, cond), None, None
def _select_rows_by_iterable(
self, rows_sel: Iterable
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
sdf = self._internal.spark_frame
if any(isinstance(key, (int, np.int, np.int64, np.int32)) and key < 0 for key in rows_sel):
offset = sdf.count()
else:
offset = 0
new_rows_sel = []
for key in list(rows_sel):
if not isinstance(key, (int, np.int, np.int64, np.int32)):
raise TypeError(
"cannot do positional indexing with these indexers [{}] of {}".format(
key, type(key)
)
)
if key < 0:
key = key + offset
new_rows_sel.append(key)
if len(new_rows_sel) != len(set(new_rows_sel)):
raise NotImplementedError(
"Duplicated row selection is not currently supported; "
"however, normalised index was [%s]" % new_rows_sel
)
sequence_scol = sdf[self._sequence_col]
cond = []
for key in new_rows_sel:
cond.append(sequence_scol == F.lit(int(key)).cast(LongType()))
if len(cond) == 0:
cond = [F.lit(False)]
return reduce(lambda x, y: x | y, cond), None, None
def _select_rows_else(
self, rows_sel: Any
) -> Tuple[Optional[spark.Column], Optional[int], Optional[int]]:
if isinstance(rows_sel, int):
sdf = self._internal.spark_frame
return (sdf[self._sequence_col] == rows_sel), None, 0
elif isinstance(rows_sel, tuple):
raise SparkPandasIndexingError("Too many indexers")
else:
raise iLocIndexer._NotImplemented(
".iloc requires numeric slice, conditional "
"boolean Index or a sequence of positions as int, "
"got {}".format(type(rows_sel))
)
def _select_cols_by_series(
self, cols_sel: "Series", missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
raise ValueError(
"Location based indexing can only have [integer, integer slice, "
"listlike of integers, boolean array] types, got {}".format(cols_sel)
)
def _select_cols_by_spark_column(
self, cols_sel: spark.Column, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
raise ValueError(
"Location based indexing can only have [integer, integer slice, "
"listlike of integers, boolean array] types, got {}".format(cols_sel)
)
def _select_cols_by_slice(
self, cols_sel: slice, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
if all(
s is None or isinstance(s, int) for s in (cols_sel.start, cols_sel.stop, cols_sel.step)
):
column_labels = self._internal.column_labels[cols_sel]
data_spark_columns = self._internal.data_spark_columns[cols_sel]
data_dtypes = self._internal.data_dtypes[cols_sel]
return column_labels, data_spark_columns, data_dtypes, False, None
else:
not_none = (
cols_sel.start
if cols_sel.start is not None
else cols_sel.stop
if cols_sel.stop is not None
else cols_sel.step
)
raise TypeError(
"cannot do slice indexing with these indexers {} of {}".format(
not_none, type(not_none)
)
)
def _select_cols_by_iterable(
self, cols_sel: Iterable, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
if all(isinstance(s, bool) for s in cols_sel):
cols_sel = [i for i, s in enumerate(cols_sel) if s]
if all(isinstance(s, int) for s in cols_sel):
column_labels = [self._internal.column_labels[s] for s in cols_sel]
data_spark_columns = [self._internal.data_spark_columns[s] for s in cols_sel]
data_dtypes = [self._internal.data_dtypes[s] for s in cols_sel]
return column_labels, data_spark_columns, data_dtypes, False, None
else:
raise TypeError("cannot perform reduce with flexible type")
def _select_cols_else(
self, cols_sel: Any, missing_keys: Optional[List[Tuple]]
) -> Tuple[
List[Tuple], Optional[List[spark.Column]], Optional[List[Dtype]], bool, Optional[Tuple]
]:
if isinstance(cols_sel, int):
if cols_sel > len(self._internal.column_labels):
raise KeyError(cols_sel)
column_labels = [self._internal.column_labels[cols_sel]]
data_spark_columns = [self._internal.data_spark_columns[cols_sel]]
data_dtypes = [self._internal.data_dtypes[cols_sel]]
return column_labels, data_spark_columns, data_dtypes, True, None
else:
raise ValueError(
"Location based indexing can only have [integer, integer slice, "
"listlike of integers, boolean array] types, got {}".format(cols_sel)
)
def __setitem__(self, key, value):
if is_list_like(value) and not isinstance(value, spark.Column):
iloc_item = self[key]
if not is_list_like(key) or not is_list_like(iloc_item):
raise ValueError("setting an array element with a sequence.")
else:
shape_iloc_item = iloc_item.shape
len_iloc_item = shape_iloc_item[0]
len_value = len(value)
if len_iloc_item != len_value:
if self._is_series:
raise ValueError(
"cannot set using a list-like indexer with a different length than "
"the value"
)
else:
raise ValueError(
"shape mismatch: value array of shape ({},) could not be broadcast "
"to indexing result of shape {}".format(len_value, shape_iloc_item)
)
super().__setitem__(key, value)
# Update again with resolved_copy to drop extra columns.
self._kdf._update_internal_frame(
self._kdf._internal.resolved_copy, requires_same_anchor=False
)
# Clean up implicitly cached properties to be able to reuse the indexer.
del self._internal
del self._sequence_col
| 37.669948 | 100 | 0.567327 |
4a20416f1a22430eb3fda82b7ad8f579e91ff4ca | 3,766 | py | Python | tests/client.py | RossLote/lingo24 | 44d33b2c7dd040ba90f4ea090038aaa22721c9df | [
"MIT"
] | null | null | null | tests/client.py | RossLote/lingo24 | 44d33b2c7dd040ba90f4ea090038aaa22721c9df | [
"MIT"
] | null | null | null | tests/client.py | RossLote/lingo24 | 44d33b2c7dd040ba90f4ea090038aaa22721c9df | [
"MIT"
] | 1 | 2021-04-21T06:58:29.000Z | 2021-04-21T06:58:29.000Z | import json
import requests_mock
from mock import Mock, patch
from lingo24.business_documents import (
Authenticator,
Client,
)
from .base import BaseTestCase
mock_time = Mock()
mock_time.return_value = 10000
class ClientTestCase(BaseTestCase):
def test_api_endpoint_url(self):
authenticator = Authenticator('xxx', 'yyy', 'https://www.example.com/callback')
default_client = Client(authenticator)
self.assertURLEqual(default_client.api_endpoint_url, 'https://api.lingo24.com/docs/v1/')
live_client = Client(authenticator, 'live')
self.assertURLEqual(live_client.api_endpoint_url, 'https://api.lingo24.com/docs/v1/')
demo_client = Client(authenticator, 'demo')
self.assertURLEqual(demo_client.api_endpoint_url, 'https://api-demo.lingo24.com/docs/v1/')
def test_make_url(self):
authenticator = Authenticator('xxx', 'yyy', 'https://www.example.com/callback')
client = Client(authenticator, 'demo')
url = client.make_url('abc/def')
self.assertEqual(url, 'https://api-demo.lingo24.com/docs/v1/abc/def')
@requests_mock.mock()
def test_status(self, m):
m.get('https://api-demo.lingo24.com/docs/v1/status', text=json.dumps({
'version': '1.2.3',
'date': 1234567890,
}))
authenticator = Authenticator('xxx', 'yyy', 'https://www.example.com/callback')
client = Client(authenticator, 'demo')
status = client.status
self.assertEqual(status.version, '1.2.3')
self.assertEqual(status.date, 1234567890)
@requests_mock.mock()
def test_authentication(self, m):
def check_auth(request):
return request.headers['Authorization'] == 'Bearer aaa'
m.get('https://api-demo.lingo24.com/docs/v1/foo', additional_matcher=check_auth, text='{}')
authenticator = Authenticator('xxx', 'yyy', 'https://www.example.com/callback')
authenticator.store.set({'access_token': 'aaa'})
client = Client(authenticator, 'demo')
client.api_get('foo')
@requests_mock.mock()
@patch('time.time', mock_time)
def test_authentication_expired_on_client(self, m):
def check_auth(request):
return request.headers['Authorization'] == 'Bearer ccc'
m.get('https://api-demo.lingo24.com/docs/v1/foo', additional_matcher=check_auth, text='{}')
m.post('https://api.lingo24.com/docs/v1/oauth2/access?refresh_token=bbb', text=json.dumps({
'access_token': 'ccc',
'refresh_token': 'ddd',
'expires_in': 123
}))
authenticator = Authenticator('xxx', 'yyy', 'https://www.example.com/callback')
authenticator.store.set({'access_token': 'aaa', 'refresh_token': 'bbb', 'expires_at': 1000})
client = Client(authenticator, 'demo')
client.api_get('foo')
@requests_mock.mock()
@patch('time.time', mock_time)
def test_authentication_expired_on_server(self, m):
def text_callback(request, context):
if request.headers['Authorization'] == 'Bearer ccc':
return '{}'
else:
context.status_code = 401
m.get('https://api-demo.lingo24.com/docs/v1/foo', text=text_callback)
m.post('https://api.lingo24.com/docs/v1/oauth2/access?refresh_token=bbb', text=json.dumps({
'access_token': 'ccc',
'refresh_token': 'ddd',
'expires_in': 123
}))
authenticator = Authenticator('xxx', 'yyy', 'https://www.example.com/callback')
authenticator.store.set({'access_token': 'aaa', 'refresh_token': 'bbb', 'expires_at': 50000})
client = Client(authenticator, 'demo')
client.api_get('foo')
| 40.934783 | 101 | 0.635688 |
4a204191986abeaec8ac1df5ebadbbe82b515f56 | 1,714 | py | Python | utils/processing.py | morganmcg1/antra | a34eb1e05ccd140501af7b8555110dcb999e4af0 | [
"Apache-2.0"
] | 1 | 2020-08-10T06:18:48.000Z | 2020-08-10T06:18:48.000Z | utils/processing.py | morganmcg1/antra | a34eb1e05ccd140501af7b8555110dcb999e4af0 | [
"Apache-2.0"
] | null | null | null | utils/processing.py | morganmcg1/antra | a34eb1e05ccd140501af7b8555110dcb999e4af0 | [
"Apache-2.0"
] | null | null | null | '''
Functions do process translations made by the model, removing special chars etc
'''
import re
def fastai_process_trans(trans):
trans_ls=[]
for s in trans:
#print(s)
tmp = s.replace('xxbos','')
tmp = tmp.replace('xxeos','')
tmp = tmp.replace(' .','.')
tmp = tmp.replace(' ,',',')
tmp = tmp.replace(' ?','?')
tmp = tmp.replace(' !','!')
#print(tmp[0])
if tmp.endswith('. '): tmp=tmp[:-1]
if tmp.endswith('? '): tmp=tmp[:-1]
if tmp.endswith('! '): tmp=tmp[:-1]
for spec in ['xxmaj ', 'xxup ']:
found=[]
for m in re.finditer(spec, tmp):
found.append(m.start())
for f in found:
m = tmp.find(spec)
if m != -1:
ml = m+len(spec)
tmp = tmp[:ml] + tmp[ml].upper() + tmp[ml+1:]
if m != 0:
tmp = tmp[:m] + tmp[ml:]
else:
tmp = tmp[ml:]
found=[]
xxwrep = 'xxwrep '
for m in re.finditer(xxwrep, tmp):
found.append(m.start())
for f in found:
m = tmp.find(xxwrep)
n = int(tmp[m+7]) # number of repetitions of word
pwrep = m+8 # position where repeated word starts
wrep = tmp[pwrep:].split()[0] # word to be repeated
lwrep = len(wrep) # length of repeated word
tmp = tmp[:m] + f"{wrep} " * n + tmp[pwrep+lwrep+1:]
# Remove space at start
if tmp[0] == ' ': tmp = tmp[1:]
trans_ls.append(tmp)
return trans_ls | 34.28 | 83 | 0.435239 |
4a2041cf26879b8c374dd867d4799f33d7512275 | 45,969 | py | Python | pylib/gyp/generator/cmake.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 77 | 2018-07-01T15:55:34.000Z | 2022-03-30T09:16:54.000Z | pylib/gyp/generator/cmake.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 116 | 2021-05-29T16:32:51.000Z | 2021-08-13T16:05:29.000Z | pylib/gyp/generator/cmake.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 53 | 2018-04-13T12:06:06.000Z | 2022-03-25T13:54:38.000Z | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
from __future__ import print_function
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
import gyp.xcode_emulation
try:
# maketrans moved to str in python3.
_maketrans = string.maketrans
except NameError:
_maketrans = str.maketrans
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(_maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, flavor,
output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
xcode_settings = None
if flavor == 'mac':
xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ))
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print(('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name))
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if xcode_settings:
cflags = xcode_settings.GetCflags(config_to_use)
cflags_c = xcode_settings.GetCflagsC(config_to_use)
cflags_cxx = xcode_settings.GetCflagsCC(config_to_use)
#cflags_objc = xcode_settings.GetCflagsObjC(config_to_use)
#cflags_objcc = xcode_settings.GetCflagsObjCC(config_to_use)
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# XCode settings
xcode_settings = config.get('xcode_settings', {})
for xcode_setting, xcode_value in xcode_settings.viewitems():
SetTargetProperty(output, cmake_target_name,
"XCODE_ATTRIBUTE_%s" % xcode_setting, xcode_value,
'' if isinstance(xcode_value, str) else ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1 and flavor != 'mac'
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' "')
output.write(RemovePrefix(lib, "$(SDKROOT)"))
output.write('"\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
flavor = gyp.common.GetFlavor(params)
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
# However, rsp files don't work correctly on Mac.
if flavor != 'mac':
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
if flavor == 'mac':
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[gyp_file], spec)
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, flavor, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print('Generating [%s]: %s' % (config_name, arguments))
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print('Building [%s]: %s' % (config_name, arguments))
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations']
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt as e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
| 36.570406 | 80 | 0.69338 |
4a2041d67402b523dbeac4b60821e513ddaba093 | 320,348 | py | Python | languages/de.py | stevenaldinger/eden | 68f6526ee2d56c1b9ead1b52f84afb232078cc81 | [
"MIT"
] | 1 | 2021-09-10T00:14:54.000Z | 2021-09-10T00:14:54.000Z | languages/de.py | Timileyin5/eden | c5d749651101dabc06d7dea8e1d39544258f0d37 | [
"MIT"
] | null | null | null | languages/de.py | Timileyin5/eden | c5d749651101dabc06d7dea8e1d39544258f0d37 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
{
"'Cancel' will indicate an asset log entry did not occur": "'Abbrechen' zeigt an, dass ein Asset Log Eintrag nicht eingetreten ist",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'Eine Position, die den geografischen Bereich für diese Region definiert. Dies kann ein Standort aus der Standorthierarchie, oder ein Gruppenstandort, oder ein Standort mit Grenzbereich sein.',
"Acronym of the organization's name, eg. IFRC.": 'Abkürzung des Organisationsnamen, z. B. IFRC.',
"Authenticate system's Twitter account": 'Authentifizierung für den Twitter Account des Systems',
"Can't import tweepy": 'Tweepy kann nicht importiert werden',
"Caution: doesn't respect the framework rules!": 'Achtung: Die Rahmenbedingungen des Frameworks werden nicht beachtet!',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'Beschreiben Sie den Arbeitsablauf der sich auf diesen Eintrag bezieht (z. B. \\ " ärztliche Untersuchung")',
"Error logs for '%(app)s'": 'Fehlerprotokolle für "%(app)s"',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": "Formatieren Sie die Liste der Attributwerte und die RGB-Wert zur Verwendung dieser als ein JSON-Objekt, z. B.: {Rot: '#FF0000 ', grün: '#00FF00 ', gelb: '#FFFF00 '}",
"Google Earth's Keyhole Markup Language": "Google Earth's Keyhole Markup Language",
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": 'Wenn ausgewählt, wird der Ort dieser Anlage immer aktualisiert, sobald der Standort der Person aktualisiert wird.',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'Wenn diese Konfiguration einen Bereich für die Regionenauswahl repräsentiert, geben Sie einen Namen für die Verwendung in der Auswahl. Der Name für eine persönliche Kartenkonfiguration wird mit dem Namen des Benutzers festgelegt.',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer, der diese Organisation definiert, automatisch als Mitarbeiter dieser Organisation zugeordnet sobald er sich anmeldet, ausgenommen die Domäne stimmt nicht mit dem Domänenfeld überein.',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": 'Wenn dies angekreuzt ist, wird es die Basisposition des Benutzers und dadurch gesteuert wo der Benutzer auf der Karte angezeigt wird.',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": "Wenn sie das Krankenhaus nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Krankenhaus hinzufügen' anklicken.",
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": "Wenn sie das Büro nicht in der Liste finden, können Sie ein neues hinzufügen, indem sie den Link 'Büro hinzufügen' anklicken.",
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'Wenn sie die Organisation nicht in der Liste sehen, dann können sie eine neue hinzufügen indem sie auf den Link "Organisation hinzufügen" klicken.',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": 'Anstelle der automatischen Synchronisation von anderen Peers über das Netz, können sie auch über Dateien synchronisieren, was nötig ist, wenn kein Netzwerk vorhanden ist. Sie können diese Seite verwenden um Sync Daten aus Dateien zu importieren and auch um Daten in Form von Sync Dateien zu exportieren. Ein Klick auf den Link rechts bringt Sie zu dieser Seite.',
"Integrity error: record can not be deleted while it is referenced by other records": "Datensatz kann nicht gelöscht werden solange andere Datensätze damit verknüpft sind",
"Level is higher than parent's": 'Die Stufe ist höher als das übergeordnete Element',
"Need a 'url' argument!": "Braucht eine 'url' als Argument!",
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "Optional. Der Name der Geometrie-Spalte. In PostGIS ist der Standardwert 'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": 'Übergeordnete Ebene muss höher als dieser Eintrag. Die Stufe seines Eltern Elements ist',
"Password fields don't match": 'Kennwortfelder stimmer nicht überein',
"Phone number to donate to this organization's relief efforts.": 'Telefonnummer für Spenden an diese Nothilfeorganisation.',
"Please come back after sometime if that doesn't help.": 'Wenn das nicht hilft, kommen Sie nach einiger Zeit bitte wieder.',
"Quantity in %s's Inventory": "Menge in %s's Bestand",
"Select a Room from the list or click 'Create Room'": "Wählen Sie einen Raum aus der Liste oder klicken Sie auf 'Raum hinzufügen'",
"Select a person in charge for status 'assigned'": 'Wählen Sie eine verantwortliche Person aus für den Status "zugeordnet"',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": "Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche auf der untersten Hierarchieebene einen übergeordneten Zuständigkeitsbereich brauchen. Beispiel: Wenn 'district' der kleinste Bereich in der Hierarchie ist, dann müssen alle speziellen Bereiche einen 'district' als übergeordnetes Element haben.",
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'Wählen Sie diese Option, wenn alle speziellen administrativen Zuständigkeitsbereiche einen übergeordneten Zuständigkeitsbereich in der Gebietshierarchie brauchen. Es kann dabei hilfreich sein eine "region" festzulegen, die den betroffenen Bereich repräsentiert.',
"Sorry, things didn't get done on time.": 'Leider konnten die Aufgaben nicht rechtzeitig ausgeführt werden.',
"Sorry, we couldn't find that page.": 'Leider konnte diese Seite nicht gefunden werden.',
"System's Twitter account updated": 'Der Twitter Account des Systems wurde aktualisiert',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": "Die Spender für dieses Projekt. Mehrere Werte können durch Halten der 'Steuerungstaste' (Strg / Ctrl) ausgewählt werden.",
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'Die URL der Bilddatei. Wenn Sie keine Grafikdatei hochladen, dann müssen Sie hier eine URL angeben.',
"The volunteer's role": "Rolle des Freiwilligen",
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einem Namen zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": "Um nach einem Körper zu suchen, geben Sie die Identifikationsmarken-Nummer des Körpers ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Körper.",
"To search for a hospital, enter any of the names or IDs of the hospital, or the organization name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben sie entweder den Namen, die ID, den Organisationsnamen oder ein Acronym jeweils getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": "Für die Suche nach einem Krankenhaus, geben Sie Namen oder die ID des Krankenhauses getrennt durch Leerzeichen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Krankenhäuser.",
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": "Um einen Ort zu suchen, geben Sie den Namen ein. Sie können % als Wildcard verwenden. Die Auswahl von Drücken 'Suchen' ohne Eingabe führt zur Auflistung aller Orte.",
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": "Um nach einer Person zu suchen, geben Sie durch Leerzeichen getrennt beliebig den Vor-, Mittel- oder Nachnamen ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne eine Eingabe führt zur Auflistung aller Personen.",
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": "Für die Suche nach einer Bewertung, geben Sie einen beliebigen Teil der Ticketnummer der Bewertung ein. Sie können % als Wildcard verwenden. Die Auswahl von 'Suchen' ohne Eingabe führt zur Auflistung aller Bewertungen.",
"Type the first few characters of one of the Person's names.": 'Geben Sie die ersten paar Zeichen des Namens einer Person ein.',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'Laden Sie hier die Grafikdatei hoch. Wenn sie keine Grafikdatei hochladen, dann müssen Sie im Feld eine URL auf eine im Web verfügbare Grafikdatei angeben.',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": 'Beim Synchronisieren der Daten mit anderen Installationen, können Konflikte auftreten wenn beide (oder mehrere) Parteien die gleichen Daten geändert haben, d. h. widersprüchliche Informationen vorliegen. Das Synchronisationsmodul versucht solche Konflikte automatisch zu beheben, was jedoch in manchen Fällen nicht möglich ist. In solchen Fällen ist es Ihre Aufgabe, diese Konflikte manuell zu beheben; klicken Sie auf den rechten Link, um auf diese Seite zu gelangen.',
"You haven't made any calculations": 'Sie haben keine Brechnungen gemacht',
"couldn't be parsed so NetworkLinks not followed.": 'konnte nicht interpretiert so dass Netzwerklinks nicht verfolgt werden.',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": 'Enthält ein GroundOverlay oder ScreenOverlay die in OpenLayers noch nicht unterstützt werden, es wird möglicherweise nicht richtig funktionieren.',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" ist ein optionaler Ausdruck wie "field1=\'newvalue\'\\ ". Sie können die Ergebnisse eines JOINs nicht aktualisieren oder löschen.',
'# of International Staff': '# der internationalen Mitarbeiter',
'# of National Staff': '# der nationalen Mitarbeiter',
'# of Vehicles': '# der Fahrzeuge',
'%(event)s already registered %(number)s times today': '%(event)s heute bereits %(number)s mal registriert',
'%(event)s already registered on %(timestamp)s': '%(event)s bereits registriert am %(timestamp)s',
'%(event)s already registered today': '%(event)s heute bereits registriert',
'%(event)s already registered today, not combinable': '%(event)s heute bereits registriert, nicht kombinierbar',
'%(event)s on': '%(event)s am',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\n Wenn der Typ des Requests "%(type)s" ist, geben Sie die %(type)s bitte auf der nächsten Seite ein.',
'%(number)s payment(s) not found': '%(number)s Auszahlung(en) nicht gefunden',
'%(number)s payment(s) registered': '%(number)s Auszahlung(en) registriert',
'%(number)s transferable cases found': '%(number)s transferierbare Fälle gefunden',
'%(system_name)s - Verify Email': '%(system_name)s - Email überprüfen',
'%s rows deleted': '%s gelöschte Zeilen',
'%s rows updated': '%s Zeilen aktualisiert',
'& then click on the map below to adjust the Lat/Lon fields': '& anschließend klicken Sie auf die Karte weiter unten um die Längen- und Breitengradwerte zu korrigieren',
'(Recipient)': '(Empfänger)',
'* Required Fields': '* erforderliche Felder',
'0-15 minutes': '0 - 15 Minuten',
'1 Assessment': '1 Bewertung',
'1 location, shorter time, can contain multiple Tasks': '1 Position, kürzere Zeit, kann mehrere Aufgaben beinhalten',
'1-3 days': '1-3 Tage',
'15-30 minutes': '15-30 Minuten',
'2 different options are provided here currently:': '2 verschiedene Optionen stehen hier derzeit zur Verfügung:',
'2x4 Car': 'Fahrzeug mit einer Antriebsachse',
'30-60 minutes': '30-60 Minuten',
'3W': 'Wer? Was? Wo?',
'4-7 days': '4-7 Tage',
'4x4 Car': 'Allradfahrzeug',
'8-14 days': '8-14 Tage',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'Es kann eine Zuordnung eines Symbol zu einer individuellen Position erfolgen, um damit die Symbolisierung der Objektklasse zu überschreiben.',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': 'Ein Referenzdokument wie z. B. eine Datei, URL oder Ansprechpartner zur Überprüfung dieser Daten. Sie können die ersten Zeichen eines vorhandenen Dokumentnamens eingeben um dieses zu referenzieren.',
'A brief description of the group (optional)': 'Eine kurze Beschreibung der Gruppe (optional)',
'A catalog of different Assessment Templates including summary information': 'Ein Katalog von verschiedenen Beurteilungsvorlagen inklusive einer Zusammenfassung',
'A file downloaded from a GPS containing a series of geographic points in XML format.': 'Eine Datei von einem GPS Gerät das eine Reihe von geographischen Positionen im XML-Format enthält.',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': 'Eine Datei im GPX-Format aus einem GPS Gerät deren Zeitstempel genutzt werden können, um sie mit den Zeitstempeln von Fotos zu verknüpfen und diese dann auf einer Karte darzustellen.',
'A library of digital resources, such as photos, documents and reports': 'Eine Bibliothek von digitalen Ressourcen, wie z. B. Fotos, Dokumente und Berichte',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': 'Eine Gebietsgruppe kann verwendet werden, um den Bereich eines betroffenen Gebietes zu definieren, falls dieses nicht mit einer vorhandenen administrativen Einheit zusammenfällt.',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': 'Eine Gebietsgruppe besteht aus mehreren Gebieten (häufig eine Gruppe von Verwaltungsregionen, die einen eigenen Zuständigkeitsbereich bilden).',
'A location group must have at least one member.': 'Eine Gebietsgruppe muss mindestens ein Element beinhalten.',
'A unique code to identify the status': 'Ein eindeutiger Code um den Status zu identifizieren',
'ABOUT THIS MODULE': 'ÜBER DIESES MODUL',
'ACCESS DATA': 'ZUGRIFFSDATEN',
'ANY': 'Irgendwelche',
'API is documented here': 'Die API ist hier dokumentiert',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20 Schnelle Evaluierung - angepasst für Neuseeland',
'Abbreviation': 'Abkürzung',
'Ability to Fill Out Surveys': 'Möglichkeit Umfragen auszufüllen',
'Ability to customize the list of details tracked at a Shelter': 'Möglichkeit die Liste der Detailangaben zu einer Unterkunft anzupassen',
'Ability to customize the list of human resource tracked at a Shelter': 'Möglichkeit die Liste der menschlichen Ressourcen einer Unterkunft anzupassen',
'Ability to customize the list of important facilities needed at a Shelter': 'Möglichkeit die Liste mit den wichtigen Einrichtungen, die in einer Unterkunft benötigt werden, anzupassen',
'Ability to view Results of Completed and/or partially filled out Surveys': 'Möglichkeit die Ergebnisse von abgeschlossen und/oder teilweise ausgefüllten Umfragen zu einzusehen',
'About Us': 'Über uns',
'About': 'Über',
'Accept Push': 'Akzeptiert Push',
'Access denied': 'Zugriff verweigert',
'Access to Shelter': 'Zugang zu Unterkünften',
'Access to education services': 'Zugang zu Ausbildungsdienstleistungen',
'Accessibility of Affected Location': 'Erreichbarkeit der betroffenen Region',
'Accompanied Child': 'Begleitetes Kind',
'Account Registered - Please Check Your Email': 'Benutzerkonto registriert - Bitte überprüfen Sie Ihre E-Mail',
'Account SID': 'SID des Accounts',
'Acronym': 'Abkürzung',
'Action Details': 'Details zur Maßnahme',
'Action Statistic': 'Maßnahmenstatistik',
'Action Type': 'Maßnahmenart',
'Action created': 'Maßnahme angelegt',
'Action deleted': 'Maßnahme gelöscht',
'Action successful - please wait...': 'Aktion erfolgreich - bitte warten...',
'Action updated': 'Maßnahme aktualisiert',
'Actionable by all targeted recipients': 'Bearbeitbar von allen adressierten Empfängern',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': 'Bearbeitbar nur von bestimmten Übungsteilnehmern; Übungsidentifikator sollte unter <note> auftauchen',
'Actioned?': 'Bearbeitet?',
'Actioning officer': 'Verantwortliche Person',
'Actions assigned to me': 'Mir zugeordnete Maßnahmen',
'Actions managed by me': 'Von mir verwaltete Maßnahmen',
'Actions taken as a result of this request.': 'Als Ergebnis auf diese Anfrage durchgeführte Maßnahmen.',
'Actions': 'Maßnahmen',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': 'Aktivieren Sie Ereignisse aus den SZENARIO Vorlagen um die passenden Ressourcen zuzuordnen (Menschen, Anlagen und Einrichtungen).',
'Active Appointment': 'Aktiver Termin',
'Active Problems': 'Aktive Probleme',
'Active': 'Aktiv',
'Activities matching Assessments': 'Aktivitäten passend zur Beurteilung',
'Activities of boys 13-17yrs before disaster': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren vor der Katastrophe',
'Activities of boys 13-17yrs now': 'Aktivitäten von Jungen im Alter zwischen 13-17 Jahren heute',
'Activities of boys <12yrs before disaster': 'Aktivitäten von Jungen unter 12 Jahren vor der Katastrophe',
'Activities of boys <12yrs now': 'Aktivitäten von Jungen unter 12 Jahren heute',
'Activities of children': 'Aktivitäten von Kindern',
'Activities of girls 13-17yrs before disaster': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren vor der Katastrophe',
'Activities of girls 13-17yrs now': 'Aktivitäten von Mädchen im Alter von 13-17 Jahren heute',
'Activities of girls <12yrs before disaster': 'Aktivitäten von Mädchen unter 12 Jahren vor der Katastrophe',
'Activities of girls <12yrs now': 'Aktivitäten von Mädchen unter 12 Jahre heute',
'Activities to follow up': 'Fällige Wiedervorlagen',
'Activities': 'Aktivitäten',
'Activity Added': 'Aktivität hinzugefügt',
'Activity Deleted': 'Aktivität gelöscht',
'Activity Details': 'Details zur Aktivität',
'Activity Details, Appointments, Notes': 'Aktivitäten-Details, Termine, Notizen',
'Activity Report': 'Bericht zur Aktivität',
'Activity Reports': 'Berichte zu Aktivitäten',
'Activity Statistic': 'Statistik zu Aktivitäten',
'Activity Type': 'Typ der Aktivität',
'Activity Types': 'Typen von Aktivität',
'Activity Updated': 'Aktivität aktualisiert',
'Activity added': 'Aktivität hinzugefügt',
'Activity deleted': 'Aktivität gelöscht',
'Activity updated': 'Aktivität aktualisiert',
'Activity': 'Aktivität',
'Add Action': 'Maßnahme hinzufügen',
'Add Activity Type': 'Aktivitätstyp hinzufügen',
'Add Address': 'Adresse hinzufügen',
'Add Alternative Item': 'Alternativen Artikel hinzufügen',
'Add Assessment Summary': 'Zusammenfassung der Beurteilung hinzufügen',
'Add Assessment': 'Beurteilung hinzufügen',
'Add Asset Log Entry - Change Label': 'Bestandsprotokoll Eintrag hinzufügen - Beschriftung verändern',
'Add Availability': 'Verfügbarkeit hinzufügen',
'Add Baseline Type': 'Basislinien-Typ hinzufügen',
'Add Baseline': 'Basislinie hinzufügen',
'Add Branch Organization':'Zweigorganisation hinzufügen',
'Add Bundle': 'Paket hinzufügen',
'Add Camp Service': 'Camp-Dienst hinzufügen',
'Add Camp Type': 'Camp Art hinzufügen',
'Add Camp': 'Camp hinzufügen',
'Add Certificate for Course': 'Zertifikat für Kurs hinzufügen',
'Add Certification': 'Zertifizierung hinzufügen',
'Add Competency': 'Qualifikation hinzufügen',
'Add Contact Information': 'Kontaktinformation hinzufügen',
'Add Contact': 'Kontaktperson hinzufügen',
'Add Credential': 'Qualifikation hinzufügen',
'Add Credentials': 'Qualifikationen hinzufügen',
'Add Disaster Victims': 'Katastrophenopfer hinzufügen',
'Add Distribution.': 'Verteilung hinzufügen.',
'Add Document': 'Dokument hinzufügen',
'Add Donor': 'Spender hinzufügen',
'Add Entry': 'Eintrag hinzufügen',
'Add Family Member': 'Familienmitglied hinzufügen',
'Add Flood Report': 'Flut Bericht hinzufügen',
'Add Group Member': 'Gruppenmitglied hinzufügen',
'Add Human Resource': 'Personal hinzufügen',
'Add Identity': 'Identität hinzufügen',
'Add Image': 'Bild hinzufügen',
'Add Impact Type': 'Auswirkungstyp Hinzufügen',
'Add Impact': 'Auswirkung hinzufügen',
'Add Item to Catalog': 'Artikel zu Katalog hinzufügen',
'Add Item to Commitment': 'Eintrag zur Zusage hinzufügen',
'Add Item to Inventory': 'Artikel zu Inventar hinzufügen',
'Add Item to Request': 'Artikel zur Anforderung hinzufügen',
'Add Item to Shipment': 'Artikel der Lieferung hinzufügen',
'Add Item': 'Artikel hinzufügen',
'Add Job Role': 'Tätigkeit hinzufügen',
'Add Key': 'Schlüssel hinzufügen',
'Add Kit': 'Ausstattung (Kit) hinzufügen',
'Add Layer to this Profile': 'Kartenebene zu diesem Profil hinzufügen',
'Add Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add Location': 'Standort hinzufügen',
'Add Log Entry': 'Protokolleintrag hinzufügen',
'Add Member': 'Mitglied hinzufügen',
'Add Membership': 'Mitgliedschaft hinzufügen',
'Add Message': 'Nachricht hinzufügen',
'Add Mission': 'Auftrag hinzufügen',
'Add Mobile Commons Settings': 'Mobile Commons Einstellungen hinzufügen',
'Add Need Type': 'Bedarfstyp hinzufügen',
'Add Need': 'Bedarf hinzufügen',
'Add New Assessment Summary': 'Neue Beurteilungsbeschreibung hinzufügen',
'Add New Baseline Type': 'Einen neuen Grundlinientyp hinzufügen',
'Add New Baseline': 'Eine neue Grundlinie hinzufügen',
'Add New Budget': 'Ein neues Budget hinzufügen',
'Add New Bundle': 'Ein neues Paket hinzufügen',
'Add New Camp Service': 'Neuen Camp Service hinzufügen',
'Add New Camp Type': 'Neuen Camp Typ hinzufügen',
'Add New Camp': 'Neues Camp hinzufügen',
'Add New Cluster Subsector': 'Neuen Cluster Unterbereich hinzufügen',
'Add New Cluster': 'Neuen Cluster hinzufügen',
'Add New Commitment Item': 'Zugesagten Artikel hinzufügen',
'Add New Document': 'Neues Dokument hinzufügen',
'Add New Donor': 'Neuen Spender hinzufügen',
'Add New Entry': 'Neuen Eintrag hinzufügen',
'Add New Event': 'Neues Ereignis hinzufügen',
'Add New Flood Report': 'Neuen Flutbericht hinzufügen',
'Add New Human Resource': 'Neue Human Resource hinzufügen',
'Add New Image': 'Neue Grafik hinzufügen',
'Add New Impact Type': 'Neuen Auswirkungstyp hinzufügen',
'Add New Impact': 'Neue Auswirkung hinzufügen',
'Add New Item to Kit': 'Neuen Artikel zur Ausstattung (Kit) hinzufügen',
'Add New Key': 'Neuen Schlüssel hinzufügen',
'Add New Level 1 Assessment': 'Stufe 1 Beurteilung hinzufügen',
'Add New Level 2 Assessment': 'Stufe 2 Beurteilung hinzufügen',
'Add New Member': 'Neues Mitglied hinzufügen',
'Add New Membership': 'Neue Mitgliedschaft hinzufügen',
'Add New Need Type': 'Neuen Bedarfstyp hinzufügen',
'Add New Need': 'Neuen Bedarf hinzufügen',
'Add New Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add New Problem': 'Neues Problem hinzufügen',
'Add New Rapid Assessment': 'Neue Schnell-Beurteilung hinzufügen',
'Add New Received Item': 'Neuen erhaltenen Artikel hinzufügen',
'Add New Record': 'Neuen Datensatz hinzufügen',
'Add New Request Item': 'Neuen Anfrageartikel hinzufügen',
'Add New Request': 'Neue Anfrage hinzufügen',
'Add New River': 'Neuen Fluss hinzufügen',
'Add New Role to User': 'Benutzer eine neue Rolle zuweisen',
'Add New Scenario': 'Neues Szenario hinzufügen',
'Add New Sent Item': 'Neuen gesendeten Artikel hinzufügen',
'Add New Setting': 'Neue Einstellung hinzufügen',
'Add New Solution': 'Neue Lösung hinzufügen',
'Add New Staff Type': 'Neue Mitarbeitertyp hinzufügen',
'Add New Subsector': 'Neuen Teilbereich hinzufügen',
'Add New Survey Answer': 'Neue Antwort zur Umfrage hinzufügen',
'Add New Survey Question': 'Neue Frage zur Umfrage hinzufügen',
'Add New Survey Series': 'Neue Umfrageserie hinzufügen',
'Add New Survey Template': 'Neue Umfragevorlage hinzufügen',
'Add New Team': 'Neues Team hinzufügen',
'Add New Ticket': 'Neues Ticket hinzufügen',
'Add New Track': 'Neuen Pfad hinzufügen',
'Add New User to Role': 'Neuen Benutzer der Rolle hinzufügen',
'Add New': 'Neu hinzufügen',
'Add Organization Domain': 'Organisationsdomain hinzufügen',
'Add Peer': 'Peer-Zugriffspunkt hinzufügen',
'Add Person': 'Person hinzufügen',
'Add Photo': 'Foto hinzufügen',
'Add PoI': 'PoI hinzufügen',
'Add Population Statistic': 'Neue Bevölkerungsstatistik hinzufügen',
'Add Position': 'Position hinzufügen',
'Add Problem': 'Problem hinzufügen',
'Add Question': 'Frage hinzufügen',
'Add Rapid Assessment': 'Schnell-Beurteilung hinzufügen',
'Add Record': 'Datensatz hinzufügen',
'Add Reference Document': 'Referenzdokument hinzufügen',
'Add Report': 'Bericht hinzufügen',
'Add Request': 'Anfrage hinzufügen',
'Add Residence Status':'Aufenthaltsstatus hinzufügen',
'Add Resource': 'Ressource hinzufügen',
'Add Section': 'Abschnitt hinzufügen',
'Add Setting': 'Einstellung hinzufügen',
'Add Site Needs': 'Standortbedarf hinzufügen',
'Add Skill Equivalence': 'Fähigkeitsäquivalenz hinzufügen',
'Add Skill Provision': 'Fähigkeitsbestimmung hinzufügen',
'Add Skill to Request': 'Fähigkeit zur Anfrage hinzufügen',
'Add Skill': 'Fähigkeit hinzufügen',
'Add Solution': 'Lösung hinzufügen',
'Add Staff Type': 'Mitarbeitertyp hinzufügen',
'Add Subscription': 'Abonnement hinzufügen',
'Add Subsector': 'Teilbereich hinzufügen',
'Add Survey Answer': 'Umfrageantwort hinzufügen',
'Add Survey Question': 'Umfrage Frage hinzufügen',
'Add Survey Series': 'Umfrage Serie hinzufügen',
'Add Survey Template': 'Umfrage Vorlage hinzufügen',
'Add Team Member': 'Teammitglied hinzufügen',
'Add Team': 'Team hinzufügen',
'Add Ticket': 'Ticket hinzufügen',
'Add Training': 'Schulung hinzufügen',
'Add Twilio Channel': 'Twilio Kanal hinzufügen',
'Add Twitter Channel': 'Twitter Kanal hinzufügen',
'Add Unit': 'Einheit hinzufügen',
'Add Vehicle Type': 'Fahrzeugtyp hinzufügen',
'Add Vehicle': 'Fahrzeug hinzufügen',
'Add Volunteer Availability': 'Verfügbarkeit von Freiwilligen hinzufügen',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': 'Fügen Sie ein Referenzdokument z. B. eine Datei, URL oder einen Ansprechpartner zur Überprüfung dieser Daten ein. Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt.',
'Add a Volunteer': 'Einen Freiwilligen hinzufügen',
'Add a new certificate to the catalog.': 'Hinzufügen eines neuen Zertifikats zum Katalog',
'Add a new competency rating to the catalog.': 'Neue Kompetenzeinstufung zum Katalog hinzufügen',
'Add a new course to the catalog.': 'Neuen Kurs zum Katalog hinzufügen',
'Add a new job role to the catalog.': 'Neue Tätigkeit zum Katalog hinzufügen',
'Add a new skill provision to the catalog.': 'Neue Bereitstellung einer Fähigkeit zum Katalog hinzufügen',
'Add a new skill to the catalog.': 'Neue Fähigkeit zum Katalog hinzufügen',
'Add a new skill type to the catalog.': 'Neue Fähigkeitsart zum Katalog hinzufügen.',
'Add new Group': 'Neue Gruppe hinzufügen',
'Add new Individual': 'Hinzufügen neues Individuum',
'Add new project.': 'Neues Projekt hinzufügen.',
'Add staff members': 'Mitarbeiter hinzufügen',
'Add strings manually': 'Texte händisch hinzufügen',
'Add to Bin': 'Zum Lagerbehälter hinzufügen',
'Add to Bundle': 'Zu Paket hinzufügen',
'Add to a Team': 'Zu einem Team hinzufügen',
'Add to budget': 'Zum Budget hinzufügen',
'Add volunteers': 'Freiwillige hinzufügen',
'Add': 'Hinzufügen',
'Add/Edit/Remove Layers': 'Hinzufügen/Bearbeiten/Entfernen von Kartenebenen',
'Added to Group': 'Zur Gruppe hinzugefügt',
'Added to Team': 'Zum Team hinzugefügt',
'Additional Beds / 24hrs': 'Zusätzliche Betten / 24 Std.',
'Address Details': 'Details zur Adresse',
'Address Type': 'Typ der Adresse',
'Address added': 'Adresse hinzugefügt',
'Address deleted': 'Adresse gelöscht',
'Address updated': 'Adresse aktualisiert',
'Address': 'Adresse',
'Addresses': 'Adressen',
'Adequate food and water available': 'Angemessene Nahrung und Wasser verfügbar',
'Adequate': 'Angemessen',
'Adjust Stock Levels': 'Lagerbestand anpassen',
'Adjust Stock': 'Lagerbestand anpassen',
'Admin Email': 'Email Administrator ',
'Admin Name': 'Name Administrator',
'Admin Tel': 'Telefonnummer Administrator',
'Admin': 'Administrator',
'Administration': 'Administration',
'Administrative support cost': 'Kosten für administrative Unterstützung',
'Admission from': 'Zugang von',
'Admissions': 'Zugänge',
'Admissions/24hrs': 'Zugänge / 24 Stunden',
'Admitted on': 'Zugang am',
'Adolescent (12-20)': 'Heranwachsende (12-20)',
'Adolescent participating in coping activities': 'Teenager Teilnahme an Aktivitäten kopieren',
'Adopted Child': 'Adoptiertes Kind',
'Adult (21-50)': 'Erwachsene (21-50)',
'Adult ICU': 'Erwachsene ICU',
'Adult Psychiatric': 'Erwachsener - psychiatrisch auffällig',
'Adult female': 'Erwachsener - weiblich',
'Adult male': 'Erwachsener - männlich',
'Adult': 'Erwachsener',
'Adults in prisons': 'Erwachsene in Gefängnissen',
'Adults': 'Erwachsene',
'Advanced Javascript Layers': 'Advanced Javascript Layers',
'Advanced': 'Erweitert',
'Advice at Check-in': 'Hinweis bei Check-in',
'Advice at Check-out': 'Hinweis bei Check-out',
'Advice at ID Check': 'Hinweis bei ID Prüfung',
'Advice': 'Hinweise',
'Advisory': 'Beratend',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': 'Nach einem Klick auf den Button, wird ein Satz von gekoppelten Elemente nacheinander gezeigt werden. Bitte wählen Sie diejenige Lösung aus jedem Paar, die sie gegenüber der anderen bevorzugen.',
'Age Group': 'Altersgruppe',
'Age group does not match actual age.': 'Altersgruppe passt nicht zum tatsächlichen Alter.',
'Age group': 'Altersgruppe',
'Age': 'Alter',
'Aggravating factors': 'Erschwerende Faktoren',
'Aggregate': 'Zusammenstellung',
'Agriculture': 'Landwirtschaft',
'Air Transport Service': 'Lufttransportsservice',
'Aircraft Crash': 'Flugzeugabsturz',
'Aircraft Hijacking': 'Flugzeugentführung',
'Aircraft Maximum Size': 'Maximale Größe des Flugzeugs',
'Airport Closure': 'Flughafenschließung',
'Airports': 'Flughäfen',
'Airspace Closure': 'Luftraumsperrung',
'Alcohol': 'Alkohol',
'All Actions': 'Alle Maßnahmen',
'All Activities': 'Alle Aktivitäten',
'All Cases': 'Alle Fälle',
'All Entities': 'Alle Organisationen/Einheiten',
'All Follow-ups': 'Alle Wiedervorlagen',
'All Inbound & Outbound Messages are stored here': 'Alle eingehenden und abgehenden Nachrichten werden hier gespeichert',
'All Records': 'Alle Datensätze',
'All Resources': 'Alle Ressourcen',
'All Tasks': 'Alle Aufgaben',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': 'Alle von der Sahana Software Foundation bereitgestellten Daten dieser Seite sind unter der Creative Commons Attribution licence lizenziert. Es stammen jedoch nicht alle Daten von hier. Bitte beachten Sie das Quellen-Feld des jeweiligen Eintrags.',
'All##filter_options': 'Alle',
'All': 'Alle',
'Allocate Group': 'Gruppe zuweisen',
'Allowance Information added': 'Information zum Taschengeld hinzugefügt',
'Allowance Information deleted': 'Information zum Taschengeld gelöscht',
'Allowance Information updated': 'Information zum Taschengeld aktualisiert',
'Allowance Information': 'Informationen zum Taschengeld',
'Allowance Payment': 'Taschengeldauszahlung',
'Allowance Payments': 'Taschengeldauszahlungen',
'Allowance Suspended': 'Taschengeld ausgesetzt',
'Allowance': 'Taschengeld',
'Allowances': 'Taschengelder',
'Allowed to push': 'Dürfen push',
'Allows a Budget to be drawn up': 'Ermöglicht ein Budget aufzustellen.',
'Allows authorized users to control which layers are available to the situation map.': 'Erlaubt berechtigten Benutzern zu steuern, welche Kartenebenen auf der Lagekarte verfügbar sind.',
'Alternative Item Details': 'Details zum alternativen Artikel',
'Alternative Item added': 'Alternativer Artikel hinzugefügt.',
'Alternative Item deleted': 'Alternativer Artikel gelöscht',
'Alternative Item updated': 'Alternativer Artikel aktualisiert',
'Alternative Item': 'Alternativer Artikel',
'Alternative Items': 'Alternative Artikel',
'Alternative places for studying': 'Alternative Orte für das Studium',
'Ambulance Service': 'Ambulanter Krankendienst',
'Amount': 'Betrag',
'An Assessment Template can be selected to create a Disaster Assessment. Within a Disaster Assessment, responses can be collected and results can analyzed as tables, charts and maps': 'Es kann eine Beurteilungsvorlage zur Erstellung einer Katastrophenbeurteilung ausgewählt werden. Innerhalb der Katastrophenbeurteilung können Antworten gesammmelt und Ergebnisse in Form von Tabellen, Graphiken und Karten erzeugt werden.',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': 'Ein Aufnahmesystem, ein Warenhausmanagementsystem, Warenlieferungsverfolgung, Versorgungskettenmanagement, Beschaffung und andere Anlagen-und Verwaltungsfunktionen.',
'An item which can be used in place of another item': 'Ein Artikel, der anstatt eines anderen Artikels verwendet werden kann',
'Analysis of Completed Surveys': 'Analyse von abgeschlossenen Umfragen',
'Animal Die Off': 'Tiere Sterben',
'Animal Feed': 'Tierfutter',
'Announcements': 'Aktuelle Hinweise',
'Anonymize': 'Anonymisieren',
'Anthropology': 'Anthropologie',
'Antibiotics available': 'Antibiotika verfügbar',
'Antibiotics needed per 24h': 'Menge an Antibiotika die pro 24h benötigt wird',
'Any##filter_options': 'Mindestens eine',
'Apparent Age': 'Offensichtliches Alter',
'Apparent Gender': 'Offensichtliches Geschlecht',
'Application Deadline': 'Anwendung Frist',
'Application Permissions': 'Anwendungsberechtigungen',
'Application': 'Anwendung',
'Apply changes': 'Änderungen übernehmen',
'Appointment Details': 'Details zum Termin',
'Appointment Type Details': 'Details zur Terminart',
'Appointment Type added': 'Terminart hinzugefügt',
'Appointment Type deleted': 'Terminart gelöscht',
'Appointment Type updated': 'Terminart aktualisiert',
'Appointment Type': 'Terminart',
'Appointment Types': 'Terminarten',
'Appointment added': 'Termin hinzugefügt',
'Appointment deleted': 'Termin gelöscht',
'Appointment updated': 'Termin aktualisiert',
'Appointments with future dates can not be marked as completed': 'Termine mit Datum in der Zukunft können nicht als beendet markiert werden',
'Appointments': 'Termine',
'Approve': 'Bestätigen',
'Approved': 'Bestätigt',
'Approver': 'Bestätigende Stelle',
'Archive': 'Archiv',
'Archived Cases': 'Archivierte Fälle',
'Archived': 'Archiviert',
'Arctic Outflow': 'Arktischer Abfluss',
'Are you sure you want to delete the selected details?': 'Sind Sie sicher dass Sie die ausgewählten Details löschen wollen?',
'Are you sure you want to delete this record?': 'Sind Sie sicher dass Sie diesen Datensatz löschen wollen?',
'Areas inspected': 'Untersuchte Gebiete',
'Assessment Details': 'Details zur Beurteilung',
'Assessment Reported': 'Beurteilung gemeldet',
'Assessment Summaries': 'Zusammenfassungen der Beurteilung',
'Assessment Summary Details': 'Details zur Zusammenfassung der Beurteilung',
'Assessment Summary added': 'Zusammenfassung der Beurteilung hinzugefügt',
'Assessment Summary deleted': 'Zusammenfassung der Beurteilung gelöscht',
'Assessment Summary updated': 'Zusammenfassung der Beurteilung aktualisiert',
'Assessment Templates': 'Beurteilungsvorlage',
'Assessment added': 'Beurteilung hinzugefügt',
'Assessment admin level': 'Admin Ebene zur Beurteilung',
'Assessment deleted': 'Beurteilung gelöscht',
'Assessment timeline': 'Beurteilungszeitachse',
'Assessment updated': 'Beurteilung aktualisiert',
'Assessment': 'Beurteilung',
'Assessments Needs vs. Activities': 'Bedarf für Beurteilungen gegenüber den Aktivitäten',
'Assessments and Activities': 'Beurteilungen und Aktivitäten',
'Assessments': 'Beurteilungen',
'Assessor': 'Beurteilender',
'Asset Details': 'Details zur Anlage',
'Asset Log Details': 'Anlage Protokoll Details',
'Asset Log Empty': 'Anlage Protokoll leer',
'Asset Log Entry Added - Change Label': 'Anlage Protokolleintrag hinzugefügt - Beschriftung ändern',
'Asset Log Entry deleted': 'Anlage Protokolleintrag gelöscht',
'Asset Log Entry updated': 'Anlage Protokolleintrag aktualisiert',
'Asset Management': 'Anlageverwaltung',
'Asset Number': 'Anlagenummer',
'Asset added': 'Anlage hinzugefügt',
'Asset deleted': 'Anlage gelöscht',
'Asset removed': 'Anlage entfernt',
'Asset updated': 'Anlage aktualisiert',
'Asset': 'Anlage',
'Assets are resources which are not consumable but are expected back, so they need tracking.': 'Anlagen sind Ressourcen, die nicht verbrauchbar sind aber zurück erwartet werden, daher müssen sie nachverfolgt werden.',
'Assets': 'Anlagen',
'Assign ': 'Zuordnung ',
'Assign Group': 'Gruppe zuordnen',
'Assign Shelter': 'Unterkunft zuordnen',
'Assign Staff': 'Mitarbeiter zuordnen',
'Assign another Role': 'Weitere Rollen zuweisen',
'Assign to Org.': 'Der Org. zuordnen',
'Assign to Organization': 'Der Organisation zuordnen',
'Assign to Person': 'Der Person zuordnen',
'Assign to Site': 'Dem Standort zuordnen',
'Assign': 'Zuordnen',
'Assigned By': 'Zugeordnet von',
'Assigned To': 'Zugeordnet zu',
'Assigned to Organization': 'Zur Organisation zugeordnet',
'Assigned to Person': 'Zur Person zugeordnet',
'Assigned to Site': 'Zum Standort zugeordnet',
'Assigned to me': 'Mir zugeordnet',
'Assigned to': 'Zugeordnet zu',
'Assigned': 'Zugeordnet',
'Assume this event type if no type was specified for an event': 'Diesen Ereignistyp annehmen wenn für ein Ereignis kein Typ angegeben wurde',
'Asylum Application': 'Asylantrag',
'Asylum Process': 'Asylverfahren',
'At/Visited Location (not virtual)': '/ In Augenschein genommener Ort (nicht virtuell)',
'Attachment of': 'Anlage zu',
'Attachments': 'Anhänge',
'Attend to information sources as described in <instruction>': 'Sich um Informationsquellen kümmern wie im Abschnitt beschrieben',
'Attribution': 'Eigenschaften',
'Authentication Required': 'Anmeldung erforderlich',
'Author': 'Autor',
'Authorities': 'Ämter/Behörden',
'Automatically create this appointment for new cases': 'Termin für neue Fälle automatisch anlegen',
'Availability of bath handicap facilities': 'Verfügbarkeit eines behindertengerechten Bades',
'Availability': 'Verfügbarkeit',
'Available Alternative Inventories': 'Verfügbare alternative Bestände',
'Available Bath': 'Verfügbarkeit von Bädern',
'Available Beds': 'Verfügbare Betten',
'Available Capacity': 'Verfügbare Kapazität',
'Available Inventories': 'Verfügbare Bestände',
'Available Messages': 'Verfügbare Nachrichten',
'Available Records': 'Verfügbare Datensätze',
'Available Shower': 'Dusche vorhanden',
'Available databases and tables': 'Verfügbare Datenbanken und Tabellen',
'Available for Location': 'Verfügbar für Ort',
'Available from': 'Verfügbar von',
'Available in Viewer?': 'Verfügbar in Lagedarstellung?',
'Available of shower handicap facilities': 'Verfügbarkeit einer behindertengerechten Dusche',
'Available until': 'Verfügbar bis',
'Available': 'Verfügbar',
'Avalanche': 'Lawine',
'Average': 'Durchschnitt',
'Avoid the subject event as per the <instruction>': 'Vermeiden das Thema Ereignis als je<instruction>',
'Awards': 'Auszeichnungen',
'BAMF Ref.No.': 'BAMF Az.',
'BAMF Reference Number': 'BAMF Aktenzeichen',
'BAMF Registration': 'BAMF Registrierung',
'BEA Registration': 'BEA Registrierung',
'BFV Arrival': 'BFV Ankunft',
'Back to %(appname)s': 'Zurück zu %(appname)s',
'Back to Check-in/Check-out': 'Zurück zu Check-in/Check-out',
'Back to Users List': 'Zurück zur Benutzerliste',
'Back': 'Zurück',
'Background Color for Text blocks': 'Hintergrundfarbe für Textblöcke',
'Background Color': 'Hintergrundfarbe',
'Baldness': 'Kahlköpfigkeit',
'Banana': 'Banane',
'Bank/micro finance': 'Bank/Mikro Finanzierung',
'Barge Capacity': 'Frachtschiffkapazitäten',
'Barricades are needed': 'Barrikaden sind erforderlich',
'Base Layer?': 'Basis Kartenebene?',
'Base Location': 'Basis Standort/Region',
'Base Site Set': 'Basisstandort definieren',
'Baseline Data': 'Referenzdatum Daten',
'Baseline Number of Beds': 'Referenzdatum Anzahl von Betten',
'Baseline Type Details': 'Referenzdatumstyp Details',
'Baseline Type added': 'Referenzdatumstyp hinzugefügt',
'Baseline Type deleted': 'Referenzdatumstyp gelöscht',
'Baseline Type updated': 'Referenzdatumstyp aktualisiert',
'Baseline Type': 'Referenzdatumstyp',
'Baseline Types': 'Referenzdatumstypen',
'Baseline added': 'Referenzdatum hinzugefügt',
'Baseline deleted': 'Referenzdatum gelöscht',
'Baseline number of beds of that type in this unit.': 'Referenzdatum Anzahl von Betten dieses Typs in dieser Einheit.',
'Baseline updated': 'Referenzdatum aktualisiert',
'Baselines Details': 'Referenzdaten Details',
'Baselines': 'Referenzdaten',
'Basic Assessment Reported': 'Grundlegende Beurteilung berichtet',
'Basic Assessment': 'Grundlegende Beurteilung',
'Basic Details': 'Grundlegende Details',
'Basic reports on the Shelter and drill-down by region': 'Grundlegende Berichte über Unterkunft und Drill-down nach Region',
'Bath Availability': 'Bad vorhanden',
'Bath Handicap Facilities': 'Behindertengerechtes Bad',
'Bath with handicap facilities': 'Bad mit behindertengerechter Einrichtung',
'Baud rate to use for your modem - The default is safe for most cases': 'Baudrate für das Modem - der Standardwert in den meisten Fällen ausreichend',
'Beam': 'Träger',
'Bed Capacity per Unit': 'Bettenkapazität pro Einheit',
'Bed Capacity': 'Bettenkapazität',
'Bed Type': 'Bett-Typ',
'Bed type already registered': 'Bett-Typ bereits registriert',
'Below ground level': 'Unter dem Erdgeschoss',
'Beneficiaries': 'Begünstigte',
'Beneficiary Type': 'Typ des Begünstigten',
'Beneficiary': 'Begünstigter',
'Bin': 'Lagerbehälter',
'Biological Hazard': 'Biologische Gefahr',
'Biscuits': 'Kekse',
'Blizzard': 'Schneesturm',
'Blood Type (AB0)': 'Blutgruppe (ABO)',
'Blowing Snow': 'Schneewehen',
'Boat': 'Boot',
'Bodies found': 'Leichen gefunden',
'Bodies recovered': 'Leichen geborgen',
'Body Recovery Request': 'Leichenbergungsanforderung',
'Body Recovery Requests': 'Leichenbergungsanforderungen',
'Body': 'Body',
'Bomb Explosion': 'Bombenexplosion',
'Bomb Threat': 'Bombendrohung',
'Bomb': 'Bombe',
'Border Color for Text blocks': 'Rahmenfarbe für Textblöcke',
'Both': 'Beides',
'Branch Organization Details':'Details zum Zweigorganisation',
'Branch Organization added':'Zweigorganisation hinzugefügt',
'Branch Organization deleted':'Zweigorganisation gelöscht',
'Branch Organization updated':'Zweigorganisation aktualisiert',
'Branch Organizations':'Zweigorganisationen',
'Branch of': 'Zweigorganisation von',
'Branches': 'Zweigorganisationen',
'Brand Details': 'Details zur Marke',
'Brand added': 'Marke hinzugefügt',
'Brand deleted': 'Marke gelöscht',
'Brand updated': 'Marke aktualisiert',
'Brand': 'Marke',
'Brands': 'Marken',
'Breakdown': 'Aufgliederung',
'Breakfast': 'Frühstück',
'Bricks': 'Ziegelsteine',
'Bridge Closed': 'Brücke ist geschlossen',
'Bucket': 'Eimer',
'Budget Details': 'Details zum Budget',
'Budget Updated': 'Budget aktualisiert',
'Budget added': 'Budget hinzugefügt',
'Budget deleted': 'Budget gelöscht',
'Budget updated': 'Budget aktualisiert',
'Budget': 'Budget',
'Budgeting Module': 'Budget Modul',
'Buffer': 'Puffer',
'Bug': 'Programmfehler',
'Building Assessments': 'Gebäudebeurteilungen',
'Building Collapsed': 'Gebäude zusammengebrochen',
'Building Name': 'Name des Gebäudes',
'Building Safety Assessments': 'Bewertung Gebäudesicherheit',
'Building Short Name/Business Name': 'Gebäude Kurzname / Firmenname',
'Building or storey leaning': 'Gebäude- oder Stockwerkneigung',
'Building': 'Gebäude',
'Built using the Template agreed by a group of NGOs working together as the': 'Erstellt unter Verwendung einer abgestimmten Vorlage einer Gruppe von NGOs unter dem Namen',
'Bulk Status Update': 'Massen-Statusaktualisierung',
'Bulk Uploader': 'Upload von Massendaten',
'Bundle Contents': 'Produktpaket Inhalt',
'Bundle Details': 'Produktpaket Details',
'Bundle Updated': 'Produktpaket aktualisiert',
'Bundle added': 'Produktpaket hinzugefügt',
'Bundle deleted': 'Produktpaket gelöscht',
'Bundle updated': 'Produktpaket aktualisiert',
'Bundle': 'Produktpaket',
'Bundles': 'Produktpakete',
'Burn ICU': 'Verbrennungseinheit',
'Burn': 'Brennen',
'Burned/charred': 'Verbrannt / verkohlt',
'By Einrichtung': 'Nach Einrichtung',
'By Facility': 'Nach Einrichtung',
'By Inventory': 'Nach Bestand',
'By': 'Nach',
'BÜMA valid until': 'BÜMA gültig bis',
'CBA Women': 'Frauen CBA',
'CSS file %s not writable - unable to apply theme!': 'CSS Datei %s nicht beschreibbar - Motiv kann nicht angewendet werden!',
'CTN': 'CTN',
'Calculate': 'Starte Berechnung',
'Camp Coordination/Management': 'Camp Koordinierung / Management',
'Camp Service Details': 'Details zu Camp Leistung',
'Camp Service added': 'Camp Leistung hinzugefügt',
'Camp Service deleted': 'Camp Leistung gelöscht',
'Camp Service updated': 'Leistung des Camps aktualisiert',
'Camp Services': 'Leistungen des Camps',
'Camp Type Details': 'Details zum Camp Typ',
'Camp Type added': 'Camp Typ hinzugefügt',
'Camp Type deleted': 'Camp Typ gelöscht',
'Camp Type updated': 'Camp Typ aktualisiert',
'Camp Type': 'Camp Typ',
'Camp Types and Services': 'Camp Typen und Leistungen',
'Camp Types': 'Camp Typen',
'Camp added': 'Camp hinzugefügt',
'Camp deleted': 'Camp gelöscht',
'Camp updated': 'Camp aktualisiert',
'Camp': 'Camp',
'Campaign ID': 'Kampagnen ID',
'Camps': 'Camps',
'Can only disable 1 record at a time!': 'Ein Datensatz kann nur einzeln deaktiviert werden!',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'Kann PoIs nur aus einer OpenStreetMap Datei (.osm) oder einem mirror lesen.',
'Cancel Log Entry': 'Protokolleintrag abbrechen',
'Cancel Shipment': 'Lieferung stornieren',
'Cancel': 'Abbrechen',
'Canceled': 'Abgebrochen',
'Cancelled': 'Abgesagt',
'Candidate Matches for Body %s': 'Übereinstimmung des Kandidaten mit Körper %s',
'Canned Fish': 'Fischkonserven',
'Cannot be empty': 'Darf nicht leer sein',
'Cannot disable your own account!': 'Eigenes Konto kann nicht deaktiviert werden.',
'Capacity (Max Persons)': 'Kapazität (Maximale Zahl von Personen)',
'Capacity evaluated adding all defined housing unit capacities': 'Die Kapazität der Unterkunft wurde ermittelt aus der Summe der Kapazität der einzelnen Unterkunftseinheiten',
'Capacity of the housing unit for people who need to stay both day and night': 'Kapazität der Unterkunftseinheit für Personen die tags und nachts dort untergebracht sind',
'Capacity of the shelter as a number of people': 'Kapazität der Unterkunft in Zahl von Personen',
'Capacity': 'Maximale Kapazität',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': 'Erfassung von Informationen über Opfergruppen einer Katastrophe (Touristen, Fahrgäste, Familien, etc.)',
'Capture Information on each disaster victim': 'Erfassung von Informationen über jedes Opfer einer Katastrophe.',
'Capturing the projects each organization is providing and where': 'Erfassen der Projekte, die von jeder Organisation bereitgestellt werden und wo',
'Cardiology': 'Kardiologie',
'Cargo Pier Depth': 'Wassertiefe Frachtpier',
'Case Archived': 'Fall Archiviert',
'Case Closed': 'Fall Abgeschlossen',
'Case Consulting': 'Fallberatung',
'Case Details': 'Details zum Fall',
'Case Flag Details': 'Details zur Fall Flagge',
'Case Flag added': 'Fall Flagge hinzugefügt',
'Case Flag updated': 'Fall Flagge aktualisiert',
'Case Flags': 'Fall Flaggen',
'Case Number': 'Fallnummer',
'Case Statistic': 'Fallstatistik',
'Case Status upon Completion': 'Fallstatus nach Durchführung',
'Case Status': 'Fallstatus',
'Case Statuses': 'Fallstatus',
'Case added': 'Fall angelegt',
'Case closed on': 'Fall abgeschlossen am',
'Case details updated': 'Fall aktualisiert',
'Case': 'Fall',
'Cases with this flag are not transferable': 'Fälle mit dieser Flagge sind nicht transferierbar',
'Cases with this status are closed': 'Fälle mit diesem Status sind abgeschlossen',
'Cases with this status are not transferable': 'Fälle mit diesem Status sind nicht transferierbar',
'Cases': 'Fälle',
'Cash': 'Bargeld',
'Cassava': 'Maniok',
'Casual Labor': 'Gelegenheitsarbeit',
'Casualties': 'Todesopfer',
'Catalog Details': 'Details zum Katalog',
'Catalog Item added': 'Katalog Eintrag hinzugefügt',
'Catalog Item deleted': 'Katalog Eintrag gelöscht',
'Catalog Item updated': 'Katalog Eintrag aktualisiert',
'Catalog Items': 'Katalog Einträge',
'Catalog added': 'Katalog hinzugefügt',
'Catalog deleted': 'Katalog gelöscht',
'Catalog updated': 'Katalog aktualisiert',
'Catalog': 'Katalog',
'Catalogs': 'Kataloge',
'Categories': 'Kategorien',
'Category': 'Kategorie',
'Ceilings, light fixtures': 'Höchstgrenzen, Licht Ausstattungsmerkmal',
'Central point to record details on People': 'Zentrale Personenregistrierungsstelle',
'Certificate Catalog': 'Zertifikatskatalog',
'Certificate Details': 'Details zum Zertifikat',
'Certificate Status': 'Status des Zertifikats',
'Certificate added': 'Zertifikat hinzugefügt',
'Certificate deleted': 'Zertifikat gelöscht',
'Certificate updated': 'Zertifikat aktualisiert',
'Certificate': 'Zertifikat',
'Certificates': 'Zertifikate',
'Certification Details': 'Zertifizierungsdetails',
'Certification added': 'Zertifizierung hinzugefügt',
'Certification deleted': 'Zertifizierung gelöscht',
'Certification updated': 'Zertifizierung aktualisiert',
'Certification': 'Zertifizierung',
'Certifications': 'Zertifizierungen',
'Certifying Organization': 'Zertifizierende Organisation',
'Change Password': 'Passwort ändern',
'Channel': 'Kanal',
'Check ID': 'ID Prüfen',
'Check Request': 'Anfrage prüfen',
'Check Transferability': 'Transferierbarkeit prüfen',
'Check for errors in the URL, maybe the address was mistyped.': 'Prüfen Sie auf Fehler in der URL, vielleicht wurde die Adresse falsch eingegeben.',
'Check if the URL is pointing to a directory instead of a webpage.': 'Prüfen Sie ob die URL auf ein Verzeichnis anstelle einer Webseite verweist',
'Check outbox for the message status': 'Überprüfen sie den Status der Nachricht im Nachrichtenausgang',
'Check to delete': 'Anwahl zum Löschen',
'Check transferability for all current cases': 'Transferierbarkeit für alle aktuellen Fälle prüfen',
'Check': 'Prüfen',
'Check-in date': 'Check-In Datum',
'Check-in denied': 'Check-in verweigert',
'Check-in overdue': 'Check-in überfällig',
'Check-out date': 'Check-Out Datum',
'Check-out denied': 'Check-out verweigert',
'Checked': 'Geprüft',
'Checked-in successfully!': 'Check-in erfolgreich!',
'Checked-out successfully!': 'Check-out erfolgreich!',
'Checklist created': 'Prüfliste erstellt',
'Checklist deleted': 'Prüfliste gelöscht',
'Checklist of Operations': 'Checkliste für Operationen',
'Checklist updated': 'Checkliste aktualisiert',
'Checklist': 'Prüfliste',
'Checkpoint Advice': 'Checkpoint Hinweise',
'Chemical Hazard': 'Chemische Gefahr',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': 'Chemische, Biologische, Radiologische, Nukleare order höchst explosive Gefahr oder Angriff',
'Chicken': 'Huhn',
'Child (2-11)': 'Kind (2-11)',
'Child (< 18 yrs)': 'Kind (< 18 Jahre)',
'Child Abduction Emergency': 'Kindesentführung Notfall',
'Child headed households (<18 yrs)': 'Kindgeführte Haushalte (<18 Jahre)',
'Child under 1 year': 'Kind unter 1 Jahr',
'Child': 'Kind',
'Children (2-5 years)': 'Kinder (2-5 Jahre)',
'Children (5-15 years)': 'Kinder (5-15 Jahre)',
'Children (< 2 years)': 'Kinder (< 2 Jahre)',
'Children in adult prisons': 'Kinder in Gefängnissen für Erwachsene',
'Children in boarding schools': 'Kinder in Internaten',
'Children in homes for disabled children': 'Kinder in Unterkünften für behinderte Kinder',
'Children in juvenile detention': 'Kinder in Jugendstrafheimen',
'Children in orphanages': 'Kinder in Waisenhäusern',
'Children living on their own (without adults)': 'Alleinlebende Kinder (ohne Erwachsene)',
'Children not enrolled in new school': 'Kinder, die nicht in der neuen Schule registriert sind',
'Children orphaned by the disaster': 'Durch die Katastrophe verwaiste Kinder',
'Children separated from their parents/caregivers': 'Von Ihren Eltern/Betreuern getrennte Kinder',
'Children that have been sent to safe places': 'Kinder die an sichere Orte gesendet wurden',
'Children unter 1 year': 'Kinder unter 1 Jahr',
'Children who have disappeared since the disaster': 'Kinder, die seit der Katastrophe verschwunden sind',
'Children': 'Kinder',
'Chinese (Taiwan)': 'Chinesisch (Taiwan)',
'Cholera Treatment Capability': 'Cholera Behandlungsmöglichkeiten',
'Cholera Treatment Center': 'Cholera Behandlungscenter',
'Cholera Treatment': 'Cholera Behandlung',
'Cholera-Treatment-Center': 'Cholera-Behandlung-Center',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': 'Wählen Sie eine neue Meldung basierend der neuen Bewertung und Teamurteil. Schwerwiegende Bedingungen, die das gesamte Gebäude betreffen sind der Grund für eine UNSICHER Markierung. Lokalisierte schwere und insgesamt moderate Bedingungen können möglicherweise eine eingeschränkte Verwendung erfordern. Platziere GEPRÜFT Plakat am Haupteingang Positionieren Sie alle anderen Schilder auf jeden wichtigen Eingang.',
'Church': 'Kirche',
'City / Town / Village': 'Stadt / Ort / Dorf',
'City': 'Ort/Stadt',
'Civil Emergency': 'Ziviler Notfall',
'Cladding, glazing': 'Verkleidung, Verglasung',
'Clear Filter': 'Filter zurücksetzen',
'Clear': 'Löschen',
'Click on the link %(url)s to reset your password': 'Klicken sie auf den Link %(url)s um ihr Kennwort zurückzusetzen',
'Click on the link %(url)s to verify your email': 'Klicken sie auf den Link %(url)s zum Überprüfen ihrer EMail Adresse',
'Click where you want to open Streetview': 'Auswahl um Streetview zu öffnen',
'Client Age': 'Alter Klient',
'Client Date of Birth': 'Geburtsdatum Klient',
'Client Nationality': 'Nationalität Klient',
'Client Registration': 'Personenregistrierung',
'Client Reservation': 'Personenreservierung',
'Client was already checked-in': 'Person war bereits eingecheckt',
'Client was already checked-out': 'Person war bereits ausgecheckt',
'Clients': 'Klienten',
'Clinical Laboratory': 'Klinisches Labor',
'Clinical Operations': 'Klinikbetrieb',
'Clinical Status': 'Klinischer Status',
'Closed Cases': 'Abgeschlossene Fälle',
'Closed at': 'Geschlossen am',
'Closed': 'Geschlossen',
'Closes Response Action': 'Schliesst Maßnahme ab',
'Clothing': 'Kleidung',
'Cluster Details': 'Details zum Cluster',
'Cluster Distance': 'Cluster Abstand',
'Cluster Subsector Details': 'Cluster Teilbereich Details',
'Cluster Subsector added': 'Cluster Teilbereich hinzugefügt',
'Cluster Subsector deleted': 'Cluster Teilbereich gelöscht',
'Cluster Subsector updated': 'Cluster Teilbereich aktualisiert',
'Cluster Subsector': 'Cluster Teilsektor',
'Cluster Subsectors': 'Cluster Teilsektoren',
'Cluster Threshold': 'Cluster Schwellwert',
'Cluster added': 'Cluster hinzugefügt',
'Cluster deleted': 'Cluster gelöscht',
'Cluster updated': 'Cluster aktualisiert',
'Cluster': 'Cluster',
'Cluster(s)': 'Cluster',
'Clusters': 'Cluster',
'Cold Wave': 'Kältewelle',
'Collapse, partial collapse, off foundation': 'Zusammengefallen, teilweise zusammengefallen, ohne Unterbau',
'Collective center': 'Kollektivcenter',
'Color for Underline of Subheadings': 'Farbe der Unterstreichungslinie von untergeordneten Überschriften',
'Color of Buttons when hovering': 'Farbe von Schaltflächen beim drüberstreichen',
'Color of bottom of Buttons when not pressed': 'Farbe der unteren Seite von Schaltflächen die nicht gedrückt sind',
'Color of bottom of Buttons when pressed': 'Farbe der unteren Seite von Schaltflächen beim Drücken von Tasten',
'Color of dropdown menus': 'Farbe des Dropdown-Menüs',
'Color of selected Input fields': 'Farbe der ausgewählten Eingabefelder',
'Color of selected menu items': 'Farbe ausgewählter Menüpunkte',
'Columns, pilasters, corbels': 'Säulen, Pfeiler, Konsolen',
'Combined Method': 'Kombinierte Methode',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': 'Kommen Sie später noch einmal wieder. Jeder der diese Seite besucht hat derzeit wahrscheinlich das gleiche Problem wie Sie :-( .',
'Come back later.': 'Kommen Sie doch später noch einmal wieder :-( ',
'Comments permitted?': 'Kommentare zugelassen?',
'Comments': 'Kommentare',
'Commercial/Offices': 'Kommerziell / Büros',
'Commit Date': 'Datum der Einstellung',
'Commit Status': 'Status der Zusage',
'Commit from %s': 'Einstellung von %s',
'Commit': 'Zusage',
'Commiting a changed spreadsheet to the database': 'Ein verändertes Spreadsheet in der Datenbank einstellen.',
'Commitment Added': 'Zusage hinzugefügt',
'Commitment Canceled': 'Zusage abgebrochen',
'Commitment Details': 'Details zur Zusage',
'Commitment Item Details': 'Details zum zugesagten Artikel',
'Commitment Item added': 'Zugesagten Artikel hinzugefügt',
'Commitment Item deleted': 'Zugesagten Artikel gelöscht',
'Commitment Item updated': 'Zugesagten Artikel aktualisiert',
'Commitment Items': 'Zugesagte Artikel',
'Commitment Status': 'Status der Zusage',
'Commitment Updated': 'Zusage aktualisiert',
'Commitment': 'Zusage',
'Commitments': 'Zusagen',
'Committed By': 'Zugesagt durch',
'Committed Items': 'Zugesagte Artikel',
'Committed Skills': 'Zugesagte Fähigkeiten',
'Committed': 'Zugesagt',
'Committing Inventory': 'Zusageninventar',
'Communication problems': 'Kommunikationsprobleme',
'Community Health Center': 'Gesundheitszentrum der Gemeinschaft',
'Community Member': 'Mitglied der Gemeinschaft',
'Competencies': 'Kompetenzen',
'Competency Details': 'Details zu den Kompetenzen',
'Competency Rating Catalog': 'Kompetenzbewertungskatalog',
'Competency Rating Details': 'Details zur Kompetenzbewertung',
'Competency Rating added': 'Kompetenzbewertung hinzugefügt',
'Competency Rating deleted': 'Kompetenzbewertung gelöscht',
'Competency Rating updated': 'Kompetenzbewertung aktualisiert',
'Competency Ratings': 'Kompetenzbewertungen',
'Competency added': 'Kompetenz hinzugefügt',
'Competency deleted': 'Kompetenz gelöscht',
'Competency updated': 'Kompetenz aktualisiert',
'Competency': 'Kompetenz',
'Complete Stock Adjustment': 'Anpassen des gesamten Bestandes',
'Complete': 'Vollständig',
'Completed on': 'Beendet am',
'Completed': 'Beendet',
'Completion Question': 'Abschlussfrage',
'Complexion': 'Gesichtsfarbe',
'Compose': 'Erstellen',
'Compromised': 'Gefährdet',
'Concrete frame': 'Betonrahmen',
'Concrete shear wall': 'Betonscherwand',
'Condition': 'Bedingung',
'Conduct a Disaster Assessment': 'Durchführung einer Katastrophenbeurteilung',
'Configuration': 'Konfiguration',
'Configurations': 'Konfigurationen',
'Configure Run-time Settings': 'Laufzeiteinstellungen konfigurieren',
'Confirm Shipment Received': 'Bestätigen der erhaltenen Lieferung',
'Confirmed': 'Bestätigt',
'Confirming Organization': 'Organisation bestätigen',
'Confiscated by': 'Beschlagnahmt durch',
'Confiscated on': 'Beschlagnahmt am',
'Confiscation': 'Beschlagnahmung',
'Conflict Details': 'Details zum Konflikt',
'Conflict Resolution': 'Konfliktlösung',
'Connect Parser': 'Verbindungsparser',
'Connection': 'Verbindung',
'Consignment Note': 'Warenbegleitschein',
'Constraints Only': 'Nur Bedingungen',
'Consultant in charge': 'Zuständiger Berater',
'Consultant': 'Berater',
'Consumable': 'Verbrauchsartikel',
'Contact Data': 'Kontakt Daten',
'Contact Description': 'Kontaktbeschreibung',
'Contact Details': 'Details zum Kontakt',
'Contact Info': 'Kontaktinformationen',
'Contact Information Added': 'Konraktinformationen hinzugefügt.',
'Contact Information Deleted': 'Kontaktinformationen gelöscht',
'Contact Information Updated': 'Kontakt Informationen aktualisiert',
'Contact Information': 'Kontaktinformationen',
'Contact Method': 'Kontaktmethode',
'Contact Name': 'Name des Ansprechpartners',
'Contact Person / Camp Owner': 'Kontaktperson / Camp-Betreiber',
'Contact Person': 'Kontaktperson',
'Contact Phone': 'Telefonnummer des Kontaktes',
'Contact Us': 'Kontaktieren Sie uns',
'Contact details': 'Details zum Kontakt',
'Contact information added': 'Kontaktinformationen hinzugefügt',
'Contact information deleted': 'Kontaktinformationen gelöscht',
'Contact information updated': 'Kontaktinformationen aktualisiert',
'Contact us': 'Kontaktieren Sie uns',
'Contact': 'Kontakt',
'Contacts': 'Kontakte',
'Content Management System': 'Content Management System',
'Content Management': 'Content Management',
'Content': 'Inhalt',
'Contents': 'Inhalte',
'Contract End Date': 'Ablaufzeit des Vertrags',
'Contributor': 'Mitwirkung',
'Conversion Tool': 'Umrechnungstool',
'Cooking NFIs': 'Kochen NFIs',
'Cooking Oil': 'Speiseöl',
'Coordinate Conversion': 'Koordinatentransformation',
'Coping Activities': 'Bewältigungsaktivitäten',
'Copy': 'Kopieren',
'Cost Type': 'Kostentyp',
'Cost per Megabyte': 'Kosten pro Megabyte',
'Cost per Minute': 'Kosten pro Minute',
'Counseling Theme Details': 'Details zum Beratungsthema',
'Counseling Theme created': 'Beratungsthema angelegt',
'Counseling Theme deleted': 'Beratungsthema gelöscht',
'Counseling Theme updated': 'Beratungsthema aktualisiert',
'Counseling Themes': 'Beratungsthemen',
'Count': 'Anzahl',
'Country of Residence': 'Land des Wohnsitzes',
'Country': 'Land',
'County / District': 'Kreis / Bezirk',
'County': 'Bezirk',
'Course Catalog': 'Katalog der Kurse',
'Course Certificate Details': 'Details zum Kurszertifikat ',
'Course Certificate added': 'Kurszertifikat hinzugefügt',
'Course Certificate deleted': 'Kurszertifikat gelöscht',
'Course Certificate updated': 'Kurszertifikat aktualisiert',
'Course Certificates': 'Kurszertifikate',
'Course Details': 'Details zum Kurs',
'Course added': 'Kurs hinzugefügt',
'Course deleted': 'Kurs gelöscht',
'Course updated': 'Kurs aktualisiert',
'Course': 'Kurs',
'Create & manage Distribution groups to receive Alerts': 'Erstellen und Verwalten von Verteilergruppen um Warnhinweise zu empfangen',
'Create Action': 'Maßnahme anlegen',
'Create Activity Report': 'Aktivitätsreport erstellen',
'Create Activity Type': 'Aktivitätstyp erstellen',
'Create Activity': 'Aktivität erstellen',
'Create Airport': 'Fluhafen erstellen',
'Create Allowance Information': 'Information zum Taschengeld erstellen',
'Create Appointment Type': 'Terminart erstellen',
'Create Appointment': 'Termin erstellen',
'Create Assessment': 'Beurteilung erstellen',
'Create Asset': 'Anlage erstellen',
'Create Bed Type': 'Bettentyp erstellen',
'Create Brand': 'Marke erstellen',
'Create Budget': 'Budget erstellen',
'Create Bundle': 'Produktpaket erstellen',
'Create Case Flag': 'Fall Flagge erstellen',
'Create Case Status': 'Fallstatus erstellen',
'Create Case': 'Fall anlegen',
'Create Catalog Item': 'Katalogeintrag erstellen',
'Create Catalog': 'Katalog erstellen',
'Create Certificate': 'Zertifikat erstellen',
'Create Checklist': 'Prüfliste erstellen',
'Create Cholera Treatment Capability Information': 'Fügen Sie Informationen zur Möglichkeit der Behandlung von Cholerafällen hinzu',
'Create Cluster Subsector': 'Cluster Teilbereich erstellen',
'Create Cluster': 'Cluster erstellen',
'Create Competency Rating': 'Kompetenzbewertung erstellen',
'Create Contact': 'Kontaktperson erstellen',
'Create Counseling Theme': 'Beratungsthema anlegen',
'Create Course': 'Kurs erstellen',
'Create Dead Body Report': 'Leichenbericht erstellen',
'Create Department': 'Abteilung erstellen',
'Create Depository': 'Verwahrungsort anlegen',
'Create Event Type': 'Ereignistyp erstellen',
'Create Event': 'Neues Ereignis erstellen',
'Create Facility Type': 'Einrichtungstyp erstellen',
'Create Facility': 'Einrichtung erstellen',
'Create Feature Layer': 'Kartenebene für Objektart erstellen',
'Create Group Entry': 'Gruppeneintrag erstellen',
'Create Group': 'Gruppe erstellen',
'Create Heliport': 'Hubschrauberlandeplatz erstellen',
'Create Hospital': 'Krankenhaus erstellen',
'Create Identification Report': 'Identifizierungsbericht erstellen',
'Create Impact Assessment': 'Folgenabschätzung erstellen',
'Create Incident Report': 'Vorfallbericht erstellen',
'Create Incident Type': 'Vorfalltyp erstellen',
'Create Incident': 'Vorfall erstellen',
'Create Item Category': 'Element Kategorie erstellen',
'Create Item Pack': 'Artikelgruppe erstellen',
'Create Item Type': 'Gegenstandsart anlegen',
'Create Item': 'Neuen Artikel anlegen',
'Create Job Title': 'Tätigkeitsbezeichnung erstellen',
'Create Kit': 'Ausstattung (Kit) anlegen',
'Create Kitting': 'Kit zusammenstellen',
'Create Layer': 'Kartenebene anlegen',
'Create Location Hierarchy': 'Standorthierarchie anlegen',
'Create Location': 'Standort anlegen',
'Create Map Profile': 'Kartenkonfiguration anlegen',
'Create Map Style': 'Kartensymbolisierung erstellen',
'Create Marker': 'Marker/Symbol anlegen',
'Create Member': 'Mitglied erstellen',
'Create Membership Type': 'Mitgliedstyp erstellen',
'Create Mobile Impact Assessment': 'Erstellen Sie Mobile Folgenabschätzung',
'Create Note': 'Notiz erstellen',
'Create Office Type': 'Bürotyp anlegen',
'Create Office': 'Büro anlegen',
'Create Organization Type': 'Organisationstyp anlegen',
'Create Organization': 'Organisation anlegen',
'Create Personal Effects': 'Persönlicher Habe anlegen',
'Create PoI Type': 'PoI-Typ erstellen',
'Create Point of Interest': 'PoI erstellen',
'Create Post': 'POST erstellen',
'Create Program': 'Programm erstellen',
'Create Project': 'Projekt anlegen',
'Create Projection': 'Kartenprojektion anlegen',
'Create Rapid Assessment': 'Schnell-Beurteilung anlegen',
'Create Report': 'Bericht anlegen',
'Create Repository': 'Repository anlegen',
'Create Request Template': 'Anfragevorlage anlegen',
'Create Request': 'Anfrage anlegen',
'Create Residence Permit Type':'Aufenthaltserlaubnistyp anlegen',
'Create Residence Status Type':'Aufenthaltsstatustyp anlegen',
'Create Residence Status':'Aufenthaltsstatus anlegen',
'Create Residents Report': 'Bewohnerliste anlegen',
'Create Resource': 'Ressource anlegen',
'Create River': 'Neuen Fluss anlegen',
'Create Role': 'Neue Rolle anlegen',
'Create Room': 'Neues Zimmer anlegen',
'Create Scenario': 'Neues Szenario anlegen',
'Create Seaport': 'Seehafen erstellen',
'Create Sector': 'Neuen Bereich anlegen',
'Create Seized Item': 'Beschlagnahmung anlegen',
'Create Series': 'Serie erstellen',
'Create Service Contact Type': 'Leistungsträgerart anlegen',
'Create Service Contact':'Leistungsträger anlegen',
'Create Service Profile': 'Neues Leistungsprofil anlegen',
'Create Shelter Flag': 'Unterkunftsflagge anlegen',
'Create Shelter Inspection': 'Unterkunftsinspektion anlegen',
'Create Shelter Service': 'Neue Unterkunftsleistung anlegen',
'Create Shelter Status': 'Unterkunftsstatus erstellen',
'Create Shelter Type': 'Neuen Unterkunftstyp anlegen',
'Create Shelter': 'Neue Unterkunft anlegen',
'Create Skill Type': 'Art der Qualifikation / Fähigkeit anlegen',
'Create Skill': 'Fähigkeiten / Qualifikationen anlegen',
'Create Staff Member': 'Neuen Mitarbeiter anlegen',
'Create Staff Type': 'Mitarbeitertyp erstellen',
'Create Status': 'Neuen Status anlegen',
'Create Supplier': 'Neuen Lieferanten anlegen',
'Create Task': 'Neue Aufgabe anlegen',
'Create Theme': 'Neues Thema anlegen',
'Create Training Event': 'Neuen Schulungskurs anlegen',
'Create User': 'Neuen Benutzer anlegen',
'Create Vehicle Type': 'Fahrzeugtyp erstellen',
'Create Vehicle': 'Fahrzeug erstellen',
'Create Volunteer Role': 'Freiwilligenrolle erstellen',
'Create Volunteer': 'Neuen Freiwilligen anlegen',
'Create Warehouse Type': 'Warenlagertyp erstellen',
'Create Warehouse': 'Neues Warenlager anlegen',
'Create a Person': 'Neue Person anlegen',
'Create a group entry in the registry.': 'Erstellen Sie eine neue Gruppe in der Registry.',
'Create a new permit type':'Neuen Erlaubnistyp anlegen',
'Create a new status type':'Neuen Statustyp anlegen',
'Create': 'Anlegen',
'Create, enter, and manage surveys.': 'Erstellen, Eingabe und Verwaltung von Umfragen.',
'Created By': 'Erstellt von',
'Created On': 'Erstellt am',
'Creation of Surveys': 'Erstellung von Umfragen',
'Credential Details': 'Details zur Qualifikation',
'Credential added': 'Qualifikation hinzugefügt',
'Credential deleted': 'Qualifikation gelöscht',
'Credential updated': 'Qualifikation aktualisiert',
'Credentialling Organization': 'Bescheinigende Organisation',
'Credentials': 'Qualifikationen',
'Credit Card': 'Kreditkarte',
'Crime': 'Kriminalität',
'Criteria': 'Kriterien',
'Crop Image': 'Bild beschneiden',
'Currency': 'Währung',
'Current Address': 'Aktuelle Adresse',
'Current Appointments': 'Aktuelle Termine',
'Current Cases': 'Aktuelle Fälle',
'Current Entries': 'Aktuelle Einträge',
'Current Group Members': 'Aktuelle Gruppemmitglieder',
'Current Home Address': 'Aktuelle Heimatadresse',
'Current Identities': 'Aktuelle Identitäten',
'Current Location': 'Aktueller Standort',
'Current Log Entries': 'Aktuelle Protokolleinträge',
'Current Memberships': 'Aktuelle Mitgliedschaften',
'Current Needs': 'Aktuelle Bedarfsmeldungen',
'Current Population Availability (Day and Night)': 'Aktuelle maximale Belegungszahl (Tag und Nacht)',
'Current Population': 'Aktuelle Belegungzahl',
'Current Records': 'Aktuelle Datensätze',
'Current Registrations': 'Aktuellen Registrierungen',
'Current Residents': 'Aktuelle Bewohner',
'Current Status': 'Aktueller Status',
'Current Team Members': 'Aktuelle Team Mitglieder',
'Current Total': 'Aktuelle Summe',
'Current Twitter account': 'Aktueller Benutzeraccount bei Twitter',
'Current community priorities': 'Aktuelle Priorisierung in der Community',
'Current general needs': 'Aktueller allgemeiner Bedarf',
'Current greatest needs of vulnerable groups': 'Wichtigste Bedürfnisse der gefährdeten Gruppen',
'Current health problems': 'Derzeitige Gesundheitsprobleme',
'Current number of patients': 'Aktuelle Anzahl von Patienten',
'Current problems, categories': 'Aktuelle Probleme, Kategorien',
'Current problems, details': 'Aktuelle Probleme, Details',
'Current request': 'Aktuelle Anfrage',
'Current response': 'Aktuelle Antwort',
'Current session': 'Aktuelle Sitzung',
'Current': 'Aktuell',
'Currently no Certifications registered': 'Derzeit sind keine Zertifizierungen registriert',
'Currently no Competencies registered': 'Derzeit sind keine Kompetenzen registriert',
'Currently no Course Certificates registered': 'Derzeit sind keine Kurszertifikate registriert',
'Currently no Credentials registered': 'Derzeit sind keine Qualifikationen registriert',
'Currently no Missions registered': 'Derzeit sind keine Aufträge registriert',
'Currently no Skill Equivalences registered': 'Derzeit sind keine Fähigkeits-Vergleichbarkeiten registriert',
'Currently no Trainings registered': 'Derzeit keine Schulungen registriert',
'Currently no entries in the catalog': 'Derzeit keine Einträge im Katalog',
'Customer number, file reference or other reference number': 'Kunden-Nr., Aktenzeichen oder anderes GeschZ',
'Customs Capacity': 'Zollkapazität',
'Customs Warehousing Storage Capacity': 'Zollwarenlager Kapazität',
'DNA Profile': 'DNA Profil',
'DNA Profiling': 'DNS-Profiling',
'Dam Overflow': 'Dam Überlauf',
'Damage': 'Beschädigung',
'Dangerous Person': 'Gefährliche Person',
'Dashboard': 'Dashboard',
'Data uploaded': 'Daten hochgeladen',
'Data': 'Daten',
'Database': 'Datenbank',
'Date & Time': 'Datum und Zeit',
'Date Actioned': 'Erledigt am',
'Date Available': 'Verfügbar ab',
'Date Created': 'Erstellt am',
'Date Due': 'Fällig am',
'Date Joined': 'Eintrittsdatum',
'Date Modified': 'Geändert am',
'Date Needed By': 'Benötigt ab',
'Date Published': 'Veröffentlicht am',
'Date Question': 'Gefragt am',
'Date Received': 'Erhalten am',
'Date Released': 'Datum der Veröffentlichung',
'Date Requested': 'Angefordert am',
'Date Required Until': 'Benötigt bis',
'Date Required': 'Benötigt am',
'Date Resigned': 'Datum der Kündigung',
'Date Sent': 'Gesendet am',
'Date Taken': 'Aufgenommen am',
'Date Until': 'Datum bis',
'Date and Time': 'Datum und Zeit',
'Date and time this report relates to.': 'Datum und Uhrzeit auf die sich dieser Bericht bezieht.',
'Date for Follow-up': 'Wiedervorlage am',
'Date is required when marking the appointment as completed': 'Datumsangabe erforderlich wenn der Termin als beendet markiert werden soll',
'Date of Birth': 'Geburtsdatum',
'Date of Entry': 'Einreisedatum',
'Date of Latest Information on Beneficiaries Reached': 'Datum von aktuellen Informationen der Finanzhilfen erreicht',
'Date of Report': 'Datum des Berichts',
'Date of payment required': 'Auszahlungsdatum erforderlich',
'Date unknown': 'Datum unbekannt',
'Date': 'Datum',
'Date/Time of Find': 'Datum/Zeit des Fundes',
'Date/Time when found': 'Datum / Uhrzeit, wann festgestellt',
'Date/Time when last seen': 'Datum / Uhrzeit, wann zuletzt gesehen',
'Date/Time': 'Datum/Zeit',
'Day': 'Tag',
'Days': 'Tage',
'De-duplicate': 'Bestätige Duplikat',
'De-duplicator': 'Duplikate entfernen',
'Dead Body Details': 'Details zur Leiche ',
'Dead Body Reports': 'Leichenbericht',
'Dead Body': 'Leiche',
'Dead body report added': 'Leichenbericht hinzugefügt',
'Dead body report deleted': 'Leichenbericht gelöscht',
'Dead body report updated': 'Leichenbericht aktualisiert',
'Deaths in the past 24h': 'Tote der letzten 24h',
'Deaths/24hrs': 'Todesfälle/24std',
'Decimal Degrees': 'Dezimalgrade',
'Decision': 'Entscheidung',
'Decomposed': 'Zerlegt',
'Default Base layer?': 'Standard Hintergrundkartenebene?',
'Default Closure Status': 'Standard Abschluss-Status',
'Default Event Type': 'Standard Ereignistyp',
'Default Height of the map window.': 'Standardhöhe des Kartenfensters',
'Default Initial Status': 'Standard Anfangsstatus',
'Default Location': 'Standard Gebiet/Standort',
'Default Map': 'Standard-Kartenfenster',
'Default Marker': 'Standardsymbol',
'Default Realm = All Entities the User is a Staff Member of': ' Automatische Auswahl = Alle Organisationen/Einheiten bei den der Benutzer momentan als Mitarbeiter registriert ist',
'Default Realm': 'Automatische Auswahl',
'Default Status': 'Standard Status',
'Default Width of the map window.': 'Standardbreite des Kartenfensters.',
'Default map question': 'Standard Kartenfrage',
'Default synchronization policy': 'Standard-Synchronisationsverfahren',
'Default?': 'Standard?',
'Defecation area for animals': 'Kotbereich für Tiere',
'Defect Details': 'Details zum Mangel',
'Defect found': 'Festgesteller Mangel',
'Defect': 'Mangel',
'Defects': 'Mängel',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': 'Definieren Sie Szenarien für die Zuordnung der entsprechenden Ressourcen (Menschen, Anlagen und Einrichtungen).',
'Defines the icon used for display of features on handheld GPS.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf mobilen GPS-Geräten verwendet wird.',
'Defines the icon used for display of features on interactive map & KML exports.': 'Definiert das Symbol, welches für die Anzeige der Objekte auf der interaktiven Karte sowie für die KML Exporte verwendet wird.',
'Defines the marker used for display & the attributes visible in the popup.': 'Definiert das Symbol, das für die Anzeige und die Attribute im Popup-Fenster verwendet wird.',
'Degrees must be a number between -180 and 180': 'Grad muss eine Zahl zwischen -180 und 180 sein.',
'Delete Action': 'Maßnahme löschen',
'Delete Activity': 'Aktivität löschen',
'Delete Allowance Information': 'Informationen zum Taschengeld löschen',
'Delete Alternative Item': 'Alternativen Artikel löschen',
'Delete Appointment Type': 'Terminart löschen',
'Delete Appointment': 'Termin löschen',
'Delete Assessment Summary': 'Zusammenfassung der Beurteilung löschen',
'Delete Assessment': 'Beurteilung löschen',
'Delete Asset Log Entry': 'Löschen des Protokolleintrags der Anlage',
'Delete Asset': 'Anlage löschen',
'Delete Baseline Type': 'Lösche Typ des Referenzdatums',
'Delete Baseline': 'Referenzdatum löschen',
'Delete Branch':'Zweigorganisation löschen',
'Delete Brand': 'Lösche Marke',
'Delete Budget': 'Lösche Budget',
'Delete Bundle': 'Produktpaket löschen',
'Delete Case Flag': 'Fall Flagge löschen',
'Delete Case Status': 'Fallstatus löschen',
'Delete Catalog Item': 'Lösche Katalogeintrag',
'Delete Catalog': 'Katalog löschen',
'Delete Certificate': 'Zertifikat löschen',
'Delete Certification': 'Delete Zertifizierung',
'Delete Cluster Subsector': 'Cluster Teilbereich löschen',
'Delete Cluster': 'Cluster löschen',
'Delete Commitment Item': 'Zugesagten Artikel löschen',
'Delete Commitment': 'Zusage löschen',
'Delete Competency Rating': 'Kompetenzbewertung löschen',
'Delete Competency': 'Kompetenz löschen',
'Delete Contact Information': 'Kontaktinformation löschen',
'Delete Counseling Theme': 'Beratungsthema löschen',
'Delete Course Certificate': 'Lösche Kurszertifikat',
'Delete Course': 'Lösche Kurs',
'Delete Credential': 'Qualifikation löschen',
'Delete Defect': 'Mangel löschen',
'Delete Depository': 'Verwahrungsort löschen',
'Delete Document': 'Dokument löschen',
'Delete Donor': 'Spender löschen',
'Delete Entry': 'Eintrag löschen',
'Delete Event Type': 'Ereignistyp löschen',
'Delete Event': 'Ereignis löschen',
'Delete Facility Type': 'Einrichtungstyp löschen',
'Delete Facility': 'Einrichtung löschen',
'Delete Feature Layer': 'Lösche Objekt Kartenebene',
'Delete Group': 'Gruppe löschen',
'Delete Hospital': 'Krankenhaus löschen',
'Delete Image': 'Grafik löschen',
'Delete Impact Type': 'Löschen des Auswirkungstyps',
'Delete Impact': 'Auswirkung löschen',
'Delete Incident Report': 'Vorfallbericht löschen',
'Delete Item Category': 'Artikel Kategorie löschen',
'Delete Item Pack': 'Artikelgruppe löschen',
'Delete Item Type': 'Gegenstandsart löschen',
'Delete Item': 'Eintrag löschen',
'Delete Job Role': 'Tätigkeit löschen',
'Delete Key': 'Schlüssel löschen',
'Delete Kit': 'Ausstattung (Kit) löschen',
'Delete Layer': 'Ebene löschen',
'Delete Level 1 Assessment': 'Stufe 1 Beurteilung löschen',
'Delete Level 2 Assessment': 'Stufe 2 Beurteilung löschen',
'Delete Location': 'Standort löschen',
'Delete Map Profile': 'Kartenkonfiguration löschen',
'Delete Marker': 'Marker/Symbol löschen',
'Delete Membership': 'Mitgliedschaft löschen',
'Delete Message': 'Nachricht löschen',
'Delete Mission': 'Auftrag löschen',
'Delete Need Type': 'Anforderungstyp löschen',
'Delete Need': 'Anforderung löschen',
'Delete Office Type': 'Bürotyp löschen',
'Delete Office': 'Büro löschen',
'Delete Organization Type': 'Organisationstyp löschen',
'Delete Organization': 'Organisation löschen',
'Delete Peer': 'Peer löschen',
'Delete Person': 'Benutzer löschen',
'Delete Photo': 'Foto löschen',
'Delete Population Statistic': 'Bevölkerungsstatistik löschen',
'Delete Position': 'Position löschen',
'Delete Project': 'Projekt löschen',
'Delete Projection': 'Koordinatensystemprojektion löschen',
'Delete Rapid Assessment': 'Schnell-Beurteilung löschen',
'Delete Received Item': 'Erhaltenen Artikel löschen',
'Delete Received Shipment': 'Erhaltene Lieferung löschen',
'Delete Record': 'Datensatz löschen',
'Delete Report': 'Bericht löschen',
'Delete Request Item': 'Lösche das Anfrageelement',
'Delete Request': 'Lösche die Anfrage',
'Delete Residence Permit Type':'Aufenthaltserlaubnistyp anlegen',
'Delete Residence Status Type':'Aufenthaltsstatusart löschen',
'Delete Residence Status':'Aufenthaltsstatus löschen',
'Delete Residents Report': 'Bewohnerliste löschen',
'Delete Resource': 'Lösche die Ressource',
'Delete Room': 'Raum löschen',
'Delete Scenario': 'Szenario löschen',
'Delete Section': 'Lösche Abschnitt',
'Delete Sector': 'Lösche Bereich',
'Delete Seized Item': 'Beschlagnahmung löschen',
'Delete Sent Item': 'Lösche gesendeten Artikel',
'Delete Sent Shipment': 'Lösche gesendete Lieferung',
'Delete Service Contact Type': 'Leistungsträgerart löschen',
'Delete Service Contact':'Leistungsträger löschen',
'Delete Service Profile': 'Service-Profil löschen',
'Delete Setting': 'Einstellung löschen',
'Delete Shelter Flag': 'Unterkunftsflagge löschen',
'Delete Shelter Inspection': 'Unterkunftsinspektion löschen',
'Delete Shelter': 'Unterkunft löschen',
'Delete Site Needs': 'Standortbedarf löschen',
'Delete Skill Equivalence': 'Fähigkeits-Vergleichbarkeit löschen',
'Delete Skill Provision': 'Fähigkeits-Bereitstellung löschen',
'Delete Skill Type': 'Löschen des Typs der Befähigung',
'Delete Skill': 'Befähigung löschen',
'Delete Staff Type': 'Mitarbeitertyp löschen',
'Delete Status': 'Status löschen',
'Delete Subscription': 'Abonnement löschen',
'Delete Subsector': 'Teilbereich löschen',
'Delete Survey Answer': 'Umfrage - Antwort Löschen',
'Delete Survey Question': 'Umfrage - Frage löschen',
'Delete Survey Series': 'Umfrage Serie löschen',
'Delete Survey Template': 'Umfrage Vorlage löschen',
'Delete Training': 'Schulung löschen',
'Delete Unit': 'Einheit löschen',
'Delete User': 'Benutzer löschen',
'Delete Volunteer': 'Freiwilligen löschen',
'Delete Warehouse': 'Warenlager löschen',
'Delete from Server?': 'Vom Server löschen?',
'Delete': 'Löschen',
'Deliver To': 'Liefern an',
'Delphi Decision Maker': 'Delphi Entscheidungsträger',
'Demographic': 'Demografisch',
'Demonstrations': 'Vorführungen',
'Dental Examination': 'Zahnärztliche Prüfung',
'Dental Profile': 'Zahnärztliches Profil',
'Deny Check-in': 'Check-in verweigern',
'Deny Check-out': 'Check-out verweigern',
'Deny the person to check-in when this flag is set': 'Check-in der Person verweigern wenn diese Flagge gesetzt ist',
'Deny the person to check-out when this flag is set': 'Check-out der Person verweigern wenn diese Flagge gesetzt ist',
'Department / Unit': 'Abteilung / Einheit',
'Department Catalog': 'Abteilungskatalog',
'Departures': 'Abgänge',
'Dependent Person': 'Abhängige Person',
'Depositories': 'Verwahrungsorte',
'Depository Details': 'Details zum Verwahrungsort',
'Depository created': 'Verwahrungsort angelegt',
'Depository deleted': 'Verwahrungsort gelöscht',
'Depository updated': 'Verwahrungsort aktualisiert',
'Depository': 'Verwahrungsort',
'Describe the condition of the roads to your hospital.': 'Beschreiben Sie den Zustand der Strassen zu Ihrem Krankenhaus.',
'Describe the meaning, reasons and potential consequences of this status': 'Beschreiben Sie die Bedeutung, Gründe und möglichen Konsequenzen dieses Status',
'Description of Contacts': 'Beschreibung der Kontakte',
'Description of defecation area': 'Beschreibung der Sanitäranlagen',
'Description of drinking water source': 'Beschreibung der Herkunft des Trinkwassers',
'Description of sanitary water source': 'Beschreibung der Herkunft des Sanitärwassers',
'Description of water source before the disaster': 'Beschreibung der Herkunft des Wassers vor der Katastrophe',
'Description': 'Beschreibung',
'Desire to remain with family': 'Wunsch bei der Familie zu bleiben',
'Destination': 'Ziel',
'Destroyed': 'Zerstört',
'Detailed Description/URL': 'Genaue Beschreibung/URL',
'Details field is required!': 'Detailfeld ist erforderlich!',
'Dialysis': 'Dialyse',
'Diaphragms, horizontal bracing': 'Membranen, horizontal stützen',
'Diarrhea': 'Durchfall',
'Dignitary Visit': 'Besuch des Würdenträgers',
'Direction': 'Richtung',
'Disable': 'Deaktivieren',
'Disabled participating in coping activities': 'Behinderte beteiligen sich an Bewältigungsaktivitäten',
'Disabled': 'Deaktiviert',
'Disabled?': 'Behindert?',
'Disappeared': 'Untergetaucht',
'Disaster Assessments': 'Katastrophenbeurteilungen',
'Disaster Victim Identification': 'Katastrophen Opferidentifikation',
'Disaster Victim Registry': 'Katastrophen Opferverzeichnis',
'Disaster clean-up/repairs': 'Katastrophen Reinigung/Reparaturen',
'Discharge (cusecs)': 'Ausfluss',
'Discharges/24hrs': 'Abfluss/24 Stunden',
'Discussion Forum on item': 'Diskussionsforum über Eintrag',
'Discussion Forum': 'Diskussionsforum',
'Disease vectors': 'Krankheitsvektoren',
'Dispensary': 'Ambulatorium',
'Displaced Populations': 'Heimatlose Bevölkerung',
'Displaced': 'Vertriebenen',
'Display Polygons?': 'Anzeige Polygone?',
'Display Routes?': 'Anzeige Routen?',
'Display Tracks?': 'Anzeige Wege?',
'Display Waypoints?': 'Anzeige Wegpunkte?',
'Distance between defecation area and water source': 'Distanz zwischen Sanitärbereich und Wasserquelle',
'Distance from %s:': 'Abstand von %s:',
'Distance(Kms)': 'Distanz (km)',
'Distribution groups': 'Verteilergruppen',
'Distribution': 'Verteilung',
'District': 'Bezirk',
'Do you really want to delete these records?': 'Sollen diese Datensätze wirklich gelöscht werden?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': 'Möchten Sie diese erhaltene Lieferung stornieren? Die Artikel werden aus dem Bestand entfernt werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': 'Möchten Sie diese abgeschickte Sendung wirklich stornieren? Die Artikel werden an die Bestandserfassung zurückgegeben werden. Diese Aktion kann NICHT rückgängig gemacht werden!',
'Do you want to receive this shipment?': 'Wollen Sie die Lieferung empfangen?',
'Do you want to send these Committed items?': 'Wollen Sie die zugesagten Artikel schicken?',
'Do you want to send this shipment?': 'Wollen Sie diese Lieferung abschicken?',
'Document Details': 'Details zum Dokument',
'Document Scan': 'Dokumentkopie',
'Document added': 'Dokument hinzugefügt',
'Document deleted': 'Dokument gelöscht',
'Document updated': 'Dokument aktualisiert',
'Documents and Photos': 'Dokumente und Fotos',
'Documents': 'Dokumente',
'Does this facility provide a cholera treatment center?': 'Verfügt diese Einrichtung über ein Behandlungscenter für Cholera?',
'Doing nothing (no structured activity)': 'Untätig (keine strukturierte Aktivität)',
'Dollars': 'Dollar',
'Domain': 'Domäne',
'Domestic chores': 'Hausarbeit',
'Donated': 'Gespendet',
'Donating Organization': 'Spendende Organisationen',
'Donation Certificate': 'Spendenzertifikat',
'Donation Phone #': 'Spender Telefon #',
'Donation': 'Spende',
'Donations Needed': 'Spenden benötigt',
'Donations': 'Spenden',
'Donor Details': 'Details zum Spender',
'Donor added': 'Spender hinzugefügt',
'Donor deleted': 'Spender gelöscht',
'Donor updated': 'Spender aktualisiert',
'Donor': 'Spender',
'Donors Report': 'Bericht zu Spendern',
'Donors': 'Spender',
'Door frame': 'Türrahmen',
'Download PDF': 'PDF herunterladen',
'Download Template': 'Vorlage herunterladen',
'Draft': 'Entwurf',
'Drainage': 'Abfluß',
'Draw on Map': 'Auf Karte anzeigen',
'Drawing up a Budget for Staff & Equipment across various Locations.': 'Aufstellung eines Budgets für Mitarbeiter und Ausrüstung über mehrere Standorte',
'Drill Down by Group': 'Recherche nach Gruppe',
'Drill Down by Incident': 'Recherche nach Vorfall',
'Drill Down by Shelter': 'Recherche nach Unterkunft',
'Driver Phone Number': 'Telefonnummer des Fahrers',
'Drivers': 'Fahrer',
'Driving License': 'Führerschein',
'Drop-off Location for Goods?': 'Sammelstelle für Sachspenden?',
'Drought': 'Dürre',
'Drugs': 'Drogen',
'Dry Dock': 'Trockendock',
'Due Follow-ups': 'Fällige Wiedervorlagen',
'Dug Well': 'Schachtbrunnen',
'Duplicate?': 'Duplikat?',
'Dust Storm': 'Staub Sturm',
'Dwelling': 'Wohnstätte',
'EMS Reason': 'EMS Grund',
'ER Status Reason': 'Status Notaufnahme Grund',
'ER Status': 'Status Notaufnahme',
'ESRI Shapefile': 'ESRI Shapefile',
'Early Recovery': 'Frühe Besserung / Bergung',
'Earthquake': 'Erdbeben',
'EasyOpt No.': 'EasyOpt Nr.',
'EasyOpt Number': 'EasyOpt Nummer',
'Edit Action': 'Maßnahme bearbeiten',
'Edit Activity': 'Aktivität bearbeiten',
'Edit Address': 'Adresse bearbeiten',
'Edit Allowance Information': 'Informationen zum Taschengeld bearbeiten',
'Edit Alternative Item': 'Alternativen Artikel bearbeiten',
'Edit Application': 'Anwendung bearbeiten',
'Edit Appointment Type': 'Terminart bearbeiten',
'Edit Appointment': 'Termin bearbeiten',
'Edit Assessment Summary': 'Zusammenfassung fuer die Beurteilung bearbeiten',
'Edit Assessment': 'Beurteilung bearbeiten',
'Edit Asset Log Entry': 'Protokolleintrag der Beurteilung bearbeiten',
'Edit Asset': 'Beurteilung bearbeiten',
'Edit Baseline Type': 'Bearbeiten des Typs des Referenzdatums',
'Edit Baseline': 'Referenzdatum bearbeiten',
'Edit Branch Organization':'Zweigorganisation bearbeiten',
'Edit Brand': 'Marke bearbeiten',
'Edit Budget': 'Budget bearbeiten',
'Edit Bundle': 'Produktpaket bearbeiten',
'Edit Camp Service': 'Camp Leistung bearbeiten',
'Edit Camp Type': 'Camptyp bearbeiten',
'Edit Camp': 'Camp bearbeiten',
'Edit Case Details': 'Details zum Fall bearbeiten',
'Edit Case Flag': 'Fall Flagge bearbeiten',
'Edit Case Status': 'Fallstatus bearbeiten',
'Edit Catalog Item': 'Katalogeintrag bearbeiten',
'Edit Catalog': 'Katalog bearbeiten',
'Edit Certificate': 'Zertifikat bearbeiten',
'Edit Certification': 'Zertifizierung bearbeiten',
'Edit Cluster Subsector': 'Cluster Teilbereich bearbeiten',
'Edit Cluster': 'Cluster bearbeiten',
'Edit Commitment Item': 'Zugesagten Artikel bearbeiten',
'Edit Commitment': 'Zusage bearbeiten',
'Edit Competency Rating': 'Kompetenzbewertung bearbeiten',
'Edit Competency': 'Kompetenz bearbeiten',
'Edit Contact Information': 'Kontaktinformation bearbeiten',
'Edit Contact': 'Kontakt bearbeiten',
'Edit Contents': 'Inhalt bearbeiten',
'Edit Counseling Theme': 'Beratungsthema bearbeiten',
'Edit Course Certificate': 'Kurszertifikat bearbeiten',
'Edit Course': 'Kurs bearbeiten',
'Edit Credential': 'Qualifikation bearbeiten',
'Edit Dead Body Details': 'Leichendetails bearbeiten',
'Edit Depository': 'Verwahrungsort bearbeiten',
'Edit Description': 'Beschreibung bearbeiten',
'Edit Details': 'Details bearbeiten',
'Edit Disaster Victims': 'Katastrophenopfer bearbeiten',
'Edit Document': 'Dokument bearbeiten',
'Edit Donor': 'Spender bearbeiten',
'Edit Email Settings': 'Email Einstellungen bearbeiten',
'Edit Entry': 'Eintrag bearbeiten',
'Edit Event Type': 'Ereignistyp bearbeiten',
'Edit Event': 'Ereignis bearbeiten',
'Edit Facility Type': 'Einrichtungstyp bearbeiten',
'Edit Facility': 'Einrichtung bearbeiten',
'Edit Family Member': 'Familienmitglied bearbeiten',
'Edit Feature Layer': 'Edit Objektlayer',
'Edit Flood Report': 'Flut Bericht Bearbeiten',
'Edit Gateway Settings': 'Gateway-Einstellungen bearbeiten',
'Edit Group': 'Gruppe bearbeiten',
'Edit Hospital': 'Krankenhaus bearbeiten',
'Edit Human Resource': 'Personelle Ressource bearbeiten',
'Edit Identification Report': 'Identifizierungsbericht bearbeiten',
'Edit Identity': 'Identität bearbeiten',
'Edit Image Details': 'Bild Details bearbeiten',
'Edit Impact Type': 'Typ der Auswirkung bearbeiten',
'Edit Impact': 'Auswirkungen bearbeiten',
'Edit Incident Report': 'Vorfallsbericht bearbeiten',
'Edit Inventory Item': 'Artikel des Bestands bearbeiten',
'Edit Item Category': 'Kategorie des Artikel bearbeiten',
'Edit Item Pack': 'Artikelgruppe bearbeiten',
'Edit Item Type': 'Gegenstandsart bearbeiten',
'Edit Item': 'Artikel bearbeiten',
'Edit Job Role': 'Tätigkeit bearbeiten',
'Edit Key': 'Schlüssel bearbeiten',
'Edit Kit': 'Ausstattung (Kit) bearbeiten',
'Edit Layer': 'Kartenebene bearbeiten',
'Edit Level %d Locations?': 'Bearbeiten von Level %en Standorten?',
'Edit Level 1 Assessment': 'Stufe 1 Beurteilung bearbeiten',
'Edit Level 2 Assessment': 'Stufe 2 Beurteilung bearbeiten',
'Edit Location': 'Standort (Position) bearbeiten',
'Edit Log Entry': 'Protokolleintrag bearbeiten',
'Edit Map Profile': 'Kartenkonfiguration bearbeiten',
'Edit Map Services': 'Kartendienste bearbeiten',
'Edit Marker': 'Marker/Symbol bearbeiten',
'Edit Membership': 'Mitgliedschaft bearbeiten',
'Edit Message': 'Nachricht bearbeiten',
'Edit Messaging Settings': 'Messaging-Einstellungen bearbeiten',
'Edit Mission': 'Auftrag bearbeiten',
'Edit Modem Settings': 'Modem Settings bearbeiten',
'Edit Need Type': 'Bedarfstyp bearbeiten',
'Edit Need': 'Bedarf bearbeiten',
'Edit Note': 'Notiz bearbeiten',
'Edit Office': 'Büro bearbeiten',
'Edit Options': 'Optionen bearbeiten',
'Edit Organization': 'Organisation bearbeiten',
'Edit Parameters': 'Parameter bearbeiten',
'Edit Peer Details': 'Details zu Peer bearbeiten',
'Edit Person Details': 'Details zur Person bearbeiten',
'Edit Personal Effects Details': 'Details zur persönlichen Habe bearbeiten',
'Edit Photo': 'Foto bearbeiten',
'Edit Population Statistic': 'Bevölkerungsstatistik bearbeiten',
'Edit Position': 'Position bearbeiten',
'Edit Problem': 'Problem bearbeiten',
'Edit Project': 'Projekt bearbeiten',
'Edit Projection': 'Kartenprojektion bearbeiten',
'Edit Rapid Assessment': 'Schnell-Beurteilung bearbeiten',
'Edit Received Item': 'Erhaltenen Artikel bearbeiten',
'Edit Received Shipment': 'Erhaltene Lieferung bearbeiten',
'Edit Record': 'Datensatz bearbeiten',
'Edit Registration Details': 'Details zur Registrierung bearbeiten',
'Edit Registration': 'Registrierung bearbeiten',
'Edit Request Item': 'Anfrage zu Artikel bearbeiten',
'Edit Request': 'Anfrage bearbeiten',
'Edit Residence Permit Type':'Aufenthaltserlaubnistyp bearbeiten',
'Edit Residence Status Type':'Aufenthaltsstatustyp bearbeiten',
'Edit Residence Status':'Aufenthaltsstatus bearbeiten',
'Edit Residents Report': 'Bewohnerliste bearbeiten',
'Edit Resource': 'Ressource bearbeiten',
'Edit River': 'Fluss bearbeiten',
'Edit Role': 'Rolle bearbeiten',
'Edit Room': 'Raum bearbeiten',
'Edit Scenario': 'Szenario bearbeiten',
'Edit Sector': 'Bereich bearbeiten',
'Edit Seized Item': 'Beschlagnahmung bearbeiten',
'Edit Sent Item': 'Gesendeten Artikel bearbeiten',
'Edit Service Contact Types': 'Leistungsträgerarten bearbeiten',
'Edit Service Contacts':'Leistungsträger bearbeiten',
'Edit Setting': 'Einstellung bearbeiten',
'Edit Settings': 'Einstellungen bearbeiten',
'Edit Shelter Flag': 'Unterkunftsflagge bearbeiten',
'Edit Shelter Inspection': 'Unterkunftsinspektion bearbeiten',
'Edit Shelter Service': 'Unterkunft Leistung bearbeiten',
'Edit Shelter Type': 'Typ der Unterkunft bearbeiten',
'Edit Shelter': 'Unterkunft bearbeiten',
'Edit Site Needs': 'Standortbedarf ändern',
'Edit Skill Equivalence': 'Fähigkeits-Vergleichbarkeit bearbeiten',
'Edit Skill Provision': 'Fähigkeits-Bereitstellung bearbeiten',
'Edit Skill Type': 'Typ der Fähigkeit bearbeiten',
'Edit Skill': 'Fähigkeit bearbeiten',
'Edit Solution': 'Lösung bearbeiten',
'Edit Staff Type': 'Typ von Mitarbeitern bearbeiten',
'Edit Subscription': 'Abonnement bearbeiten',
'Edit Subsector': 'Teilbereich bearbeiten',
'Edit Surplus Meals Quantity': 'Anzahl überzähliger Essen bearbeiten',
'Edit Survey Answer': 'Umfrage - Antwort bearbeiten',
'Edit Survey Question': 'Umfrage - Frage bearbeiten',
'Edit Survey Series': 'Umfrage - Serie bearbeiten',
'Edit Survey Template': 'Umfrage Vorlage bearbeiten',
'Edit Task': 'Aufgabe bearbeiten',
'Edit Team': 'Team bearbeiten',
'Edit Theme': 'Thema bearbeiten',
'Edit Themes': 'Themen bearbeiten',
'Edit Ticket': 'Ticket bearbeiten',
'Edit Track': 'Route bearbeiten',
'Edit Training': 'Schulung bearbeiten',
'Edit Tropo Settings': 'Tropo Einstellungen bearbeiten',
'Edit User': 'Benutzer bearbeiten',
'Edit Volunteer Availability': 'Verfügbarkeit von Freiwilligem bearbeiten',
'Edit Volunteer Details': 'Details zu Freiwilligem bearbeiten',
'Edit Warehouse': 'Warenlager bearbeiten',
'Edit Weather Widget': 'Wetter-Widget bearbeiten',
'Edit current record': 'Aktuellen Datensatz bearbeiten',
'Edit message': 'Nachricht bearbeiten',
'Edit': 'Bearbeiten',
'Editable?': 'Bearbeitbar?',
'Education materials received': 'Ausbildungsmaterialien erhalten',
'Education materials, source': 'Herkunft der Ausbildungsmaterialien',
'Education': 'Bildung',
'Effects Inventory': 'Auswirkungsbestandliste',
'Effort (Hours)': 'Zeitaufwand (Stunden)',
'Eggs': 'Eier',
'Either a shelter or a location must be specified': 'Es muss entweder eine Unterkunft oder ein Standort angegeben werden',
'Either file upload or document URL required.': 'Es ist entweder ein Dateiupload oder ein URL erforderlich',
'Either file upload or image URL required.': 'Es ist entweder ein Dateiupload oder eine Bild-URL erforderlich',
'Elderly person headed households (>60 yrs)': 'Von älteren Menschen (>60 Jahren) geführte Haushalte',
'Electrical': 'elektrisch',
'Electrical, gas, sewerage, water, hazmats': 'Elektrik, Gas, Abwasser, Wasser, Gefahrgut',
'Elevated': 'Erhöht',
'Elevation': 'Höhe',
'Elevators': 'Aufzüge',
'Eligible for Allowance': 'Berechtigt für Taschengeld',
'Email Address': 'E-Mail-Adresse',
'Email Channels (Inbound)': 'E-Mail Kanäle (eingehend)',
'Email InBox': 'E-Mail Eingang',
'Email Settings': 'E-Mail-Einstellungen',
'Email settings updated': 'E-Mail-Einstellungen aktualisiert',
'Email': 'E-Mail',
'Embalming': 'Einbalsamierung',
'Embassy': 'Botschaft',
'Emergencies': 'Notfälle',
'Emergency Capacity Building project': 'Notfall-Kompetenzbildungsprojekt',
'Emergency Contacts': 'Notfallkontakte',
'Emergency Department': 'Notfall-Abteilung',
'Emergency Shelter': 'Notunterkunft',
'Emergency Support Facility': 'Notfall-Unterstützungseinrichtung',
'Emergency Support Service': 'Notfall-Unterstützungsdienst',
'Emergency Telecommunications': 'Notfall-Telekommunikation',
'Emergency': 'Notfall',
'Enable Crop': 'Beschneiden aktivieren',
'Enable/Disable Layers': 'Layer aktivieren/deaktivieren',
'Enabled': 'Aktiviert',
'Enabled?': 'Aktiviert?',
'End Date': 'Enddatum',
'End date should be after start date': 'Enddatum muss nach dem Startdatum liegen',
'End date': 'Enddatum',
'End of Period': 'Ende des Zeitraums',
'Enter a GPS Coord': 'Geben Sie eine GPS Koordinate ein',
'Enter a name for the spreadsheet you are uploading (mandatory).': 'Geben Sie einen Namen für die Tabelle, die Sie hochladen an (obligatorisch).',
'Enter a new support request.': 'Geben Sie eine neue Unterstützungsanfrage ein.',
'Enter a unique label!': 'Geben Sie eine eindeutige Bezeichnung ein!',
'Enter a valid date before': 'Geben Sie zuvor eine gültiges Datum ein',
'Enter a valid email': 'Geben Sie eine gültige E-Mail-Adresse ein',
'Enter a valid future date': 'Geben Sie ein gültiges, zukünftiges Datum ein',
'Enter an integer greater or equal to 0': 'Geben Sie eine ganze Zahl grösser oder gleich 0 ein',
'Enter or scan ID': 'ID eingeben/scannen',
'Enter some characters of the ID or name to start the search, then select from the drop-down': 'Geben Sie die ersten Zeichen der ID oder des Namens ein um die Suche zu starten, und wählen Sie dann aus der Liste aus',
'Enter some characters to bring up a list of possible matches': 'Geben Sie einige Zeichen ein um eine Liste möglicher Übereinstimmungen anzuzeigen',
'Enter some characters to bring up a list of possible matches.': 'Geben Sie einige Zeichen ein um eine Liste von möglichen Übereinstimmungen anzuzeigen.',
'Enter tags separated by commas.': 'Geben Sie die Tags mit Komma getrennt ein.',
'Enter the same password as above': 'Wiederholen Sie das Kennwort von oben',
'Entered': 'Eingegeben',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'Die Eingabe einer Telefonnummer ist freiwillig, sie erlaubt Ihnen aber SMS-Nachrichten zu abonnieren und zu empfangen.',
'Entitlement Period': 'Anspruchszeitraum',
'Entry added': 'Eintrag hinzugefügt',
'Entry deleted': 'Eintrag gelöscht',
'Entry updated': 'Eintrag aktualisiert',
'Environment': 'Umgebung/Umwelt',
'Equipment': 'Ausrüstung',
'Error Tickets': 'Fehlertickets',
'Error encountered while applying the theme.': 'Bei der Anwendung des Themas ist ein Fehler aufgetreten.',
'Error in message': 'Fehler in der Nachricht',
'Errors': 'Fehler',
'Essential Staff': 'Unverzichtbarer Mitarbeiter',
'Essential Staff?': 'Unverzichtbarer Mitarbeiter?',
'Est. Delivery Date': 'Geschätztes Lieferdatum',
'Established by': 'Festgestellt durch',
'Established on': 'Festgestellt am',
'Estimated # of households who are affected by the emergency': 'Geschätzte Anzahl von Haushalten, die vom Notfall betroffen sind',
'Estimated # of people who are affected by the emergency': 'Geschätzte Anzahl von Menschen, die vom Notfall betroffen sind',
'Estimated Delivery Date': 'Voraus. Liefertermin',
'Estimated Overall Building Damage': 'Geschätzter allgemeiner Gebäudeschaden',
'Estimated Population': 'Geschätzte Bevölkerungszahl',
'Estimated total number of people in institutions': 'Geschätzte Gesamtzahl von Menschen in Einrichtungen',
'Euros': 'Euro',
'Evacuating': 'Evakuieren',
'Evacuees Capacity (Day and Night)': 'Evakuierungspotential (Tag und Nacht)',
'Evacuees Capacity (Night only)': 'Evakuierungspotential (nur Nacht)',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': 'Informationen in dieser Nachricht bewerten. (Dieser Wert sollte NICHT in öffentlichen Warnung verwendet werden.)',
'Event Details': 'Details zum Ereignis',
'Event Registration': 'Ereignisregistrierung',
'Event Type Details': 'Details zum Ereignistyp',
'Event Type created': 'Ereignistyp angelegt',
'Event Type deleted': 'Ereignistyp gelöscht',
'Event Type updated': 'Ereignistyp aktualisiert',
'Event Type': 'Ereignistyp',
'Event Types': 'Ereignistypen',
'Event added': 'Ereignis hinzugefügt',
'Event deleted': 'Ereignis gelöscht',
'Event registered': 'Ereignis registriert',
'Event updated': 'Ereignis aktualisiert',
'Event': 'Ereignis',
'Events': 'Ereignisse',
'Example': 'Beispiel',
'Exceeded': 'Überschritten',
'Excellent': 'Ausgezeichnet',
'Exclude cases with this flag from certain reports': 'Fälle mit dieser Flagge von bestimmten Berichten ausschließen',
'Exclude contents': 'Inhalte ausschließen',
'Exclude from Reports': 'Von Berichten ausschliessen',
'Excreta disposal': 'Entsorgung von Exkrementen',
'Execute a pre-planned activity identified in <instruction>': 'Ausführen einer vorausgeplanten Aktivität, identifiziert in <instruction>',
'Exercise': 'Übung',
'Exercise?': 'Übung?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': 'Übungen bedeuten, dass alle Anzeigen eine Wassermarke & alle Benachrichtigungen ein Präfix haben.',
'Existing Placard Type': 'Vorhandener Plakattyp',
'Existing food stocks': 'Vorhandener Lebensmitelvorrat',
'Existing location cannot be converted into a group.': 'Vorhandener Standort kann nicht in eine Gruppe transformiert werden.',
'Exits': 'Ausgänge',
'Experience': 'Erfahrung',
'Expiration Date': 'Ablaufdatum',
'Expiration Report': 'Ablaufbericht',
'Expired?': 'Abgelaufen?',
'Expiring Staff Contracts Report': 'Berichte zu ablaufenden Mitarbeiterverträgen',
'Expiry (month)': 'Ablauf (Monat)',
'Expiry (months)': 'Ablauf (Monate)',
'Expiry Date': 'Ablaufdatum',
'Explosive Hazard': 'Explosionsgefahr',
'Export Data': 'Daten exportieren',
'Export Database as CSV': 'Datenbank als CSV exportieren',
'Export as': 'Exportieren als',
'Export in %(format)s format': 'Exportieren im %(format)s-Format',
'Export in GPX format': 'Als GPX Format exportieren',
'Export in KML format': 'Als KML Format exportieren',
'Export in OSM format': 'Als OSM Format exportieren',
'Export in PDF format': 'In PDF Format exportieren',
'Export in RSS format': 'In RSS Format exportieren',
'Export in XLS format': 'In XLS Format exportieren',
'Exterior Only': 'Nur Externe',
'Exterior and Interior': 'Externe und Interne',
'External (Hospital / Police)': 'Extern (Krankenhaus/Polizei)',
'External Cooperation': 'Externe Kooperation',
'External': 'Extern',
'Eye Color': 'Augenfarbe',
'Facebook Channels': 'Facebook Kanäle',
'Facial hair, color': 'Gesichtsbehaarung, Farbe',
'Facial hair, type': 'Gesichtsbehaarung, Art',
'Facial hear, length': 'Gesichtsbehaarung, Länge',
'Facilities': 'Einrichtungen',
'Facility Contact': 'Kontakt für Einrichtung',
'Facility Details': 'Details zur Einrichtung',
'Facility Operations': 'Einrichtungsmanagement',
'Facility Status': 'Status der Einrichtung',
'Facility Type': 'Einrichtungstyp',
'Facility Types': 'Einrichtungstypen',
'Facility added': 'Einrichtung hinzugefügt',
'Facility or Location': 'Einrichtung oder Standort',
'Facility removed': 'Einrichtung entfernt',
'Facility updated': 'Einrichtung aktualisiert',
'Facility': 'Einrichtung',
'Facility': 'Einrichtung',
'Fail': 'Fehlgeschlagen',
'Failed!': 'Fehlgeschlagen!',
'Fair': 'Mäßig',
'Falling Object Hazard': 'Gefahr durch herabstürzende Objekte',
'Families': 'Familien',
'Families/HH': 'Familien/HH',
'Family Member Details': 'Details zum Familienmitglied',
'Family Member added': 'Familienmitglied hinzugefügt',
'Family Member removed': 'Familienmitglied entfernt',
'Family Member updated': 'Familienmitglied aktualisiert',
'Family Members': 'Familienmitglieder',
'Family Reunification': 'Familienzusammenführung',
'Family Role': 'Familienrolle',
'Family Transferable': 'Familie Transferierbar',
'Family tarpaulins received': 'Familien hat Planen erhalten',
'Family tarpaulins, source': 'Herkunft der Planen für Familie',
'Family': 'Familie',
'Family/friends': 'Familie/Freunde',
'Farmland/fishing material assistance, Rank': 'Ackerland/Materialhilfe für Fischerei, Rang',
'Fatalities': 'Verstorbene',
'Father': 'Vater',
'Faulty Electric Appliance': 'Defektes Elektrogerät',
'Feature Layer added': 'Objekt-Layer hinzugefügt',
'Feature Layer deleted': 'Objekt-Layer gelöscht',
'Feature Layer updated': 'Objekt-Layer aktualisiert',
'Feature Layers': 'Objekt-Ebenen',
'Feature Namespace': 'Namespace des Objekts',
'Feature Request': 'Objekt-Anfrage',
'Feature Type': 'Objektart',
'Features Include': 'Beinhaltete Objekte',
'Federal State': 'Bundesland',
'Feeds': 'Newsfeeds',
'Female headed households': 'Weiblich geführte Haushalte',
'Female': 'Weiblich',
'Few': 'Wenige',
'Field Hospital': 'Feldlazarett',
'Field': 'Feld',
'File': 'Datei',
'Fill in Latitude': 'Geben Sie den Breitengrad ein',
'Fill in Longitude': 'Geben Sie den Längengrad ein',
'Filter Field': 'Filter Feld',
'Filter Options': 'Filteroptionen',
'Filter Tweets by the date they were tweeted on': 'Filtere Tweets nach dem Datum der Sendung',
'Filter Tweets by who tweeted them': 'Filtere Tweets nach sendender Person',
'Filter Value': 'Filter Wert',
'Filter by Date': 'Nach Datum filtern',
'Filter by Location': 'Nach Standort filtern',
'Filter by Organization': 'Nach Organisation filtern',
'Filter by Tag': 'Nach Tag filtern',
'Find Dead Body Report': 'Suche Leichenbericht',
'Find Hospital': 'Krankenhaus finden',
'Find Person Record': 'Personendatensatz finden',
'Find Volunteers': 'Freiwillige finden',
'Find a Person Record': 'Suche einen Personendatensatz',
'Find': 'Suchen',
'Fingerprint': 'Fingerabdruck',
'Fingerprinting': 'Fingerabdrücke machen',
'Fingerprints': 'Fingerabdrücke',
'Finished Jobs': 'Erledigte Jobs',
'Fire suppression and rescue': 'Feuer - Eindämmung und Rettung',
'Fire': 'Feuer',
'First Name': 'Vorname',
'First name': 'Vorname',
'First': 'Erste',
'Fishing': 'Fischerei',
'Flags': 'Flaggen',
'Flash Flood': 'Sturzflut',
'Flash Freeze': 'Schockfrost',
'Flexible Impact Assessments': 'Flexible Folgenabschätzungen',
'Flood Alerts show water levels in various parts of the country': 'Flut Alarme zeigen Wasserstände in verschiedenen Teilen des Landes.',
'Flood Alerts': 'Flut Alarme',
'Flood Depth': 'Fluthöhe',
'Flood Report Details': 'Details zum Flutbericht',
'Flood Report added': 'Flutbericht hinzugefügt',
'Flood Report deleted': 'Flutbericht gelöscht',
'Flood Report updated': 'Flutbericht aktualisiert',
'Flood Report': 'Flutbericht',
'Flood Reports': 'Flutberichte',
'Flood': 'Flut',
'Flow Status': 'Status des Ablaufs',
'Fog': 'Nebel',
'Folder': 'Ordner',
'Follow up': 'Wiedervorlage',
'Follow-up required': 'Wiedervorlage erforderlich',
'Food Distribution Statistics': 'Essensausgabe-Statistik',
'Food Distribution overdue': 'Essensausgabe überfällig',
'Food Distribution': 'Essensausgabe',
'Food Supply': 'Lebensmittelversorgung',
'Food assistance': 'Lebensmittel Hilfe',
'Food': 'Lebensmittel',
'Footer file %s missing!': 'Fußzeile Datei %s fehlt!',
'Footer': 'Fußzeile',
'For Entity': 'Für Organisation/Einheit',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': 'Für eine Land wäre dies der ISO2-Code, für eine Stadt wäre es der Flughafen Code.',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': 'Für jeden Sync-Partner gibt es einen standard Sync Job, der nach einem vordefiniertem Zeitintervall ausgeführt wird. Sie können auch mehrere Sync Jobs festlegen welche nach ihren Anforderungen entsprechend ausgeführt werden. Klicken Sie auf den Link rechts um zu beginnen.',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': 'Für erweiterte Sicherheit empfiehlt sich die Eingabe eines Benutzernamens und Passworts. Bitte benachrichtigen Sie die Administratoren der anderen Geräte in Ihrem Unternehmen damit diese die Zugangsdaten unter dem Punkt Synchronization -> Sync-Partner einrichten.',
'For live help from the Sahana community on using this application, go to': 'Für direkte Hilfe von der Sahana Community zur Anwendung dieses Programmes, gehen Sie zu',
'For messages that support alert network internal functions': 'Für Nachrichten, die Netzwerkswarnungen interner Funktionen unterstützen',
'For more details on the Sahana Eden system, see the': 'Weitere Informationen zum Sahana Eden System finden Sie unter',
'For more information, see': 'Weitere Informationen finden Sie unter',
'For': 'Für',
'Forest Fire': 'Waldbrand',
'Formal camp': 'Offizielles Camp',
'Forms': 'Formulare',
'Found': 'Gefunden',
'Foundations': 'Stiftungen',
'Free for domestic animals': 'Haustiere zugelassen',
'Free places': 'Freie Plätze',
'Freezing Drizzle': 'Gefrierender Nieselregen',
'Freezing Rain': 'Gefrierender Regen',
'Freezing Spray': 'Kältespray',
'French': 'Französisch',
'Friday': 'Freitag',
'From ': 'Von ',
'From Address': 'Herkunftsadresse',
'From Adress': 'Herkunftsadresse',
'From Facility': 'Von Einrichtung',
'From Inventory': 'Aus dem Bestand',
'From Location': 'Vom Standort',
'From Organization': 'Von der Organisation',
'From': 'Von',
'Fulfil. Status': 'Status der Bedarfsdeckung',
'Fulfill Status': 'Status der Bedarfsdeckung',
'Fulfillment Status': 'Auftragserfüllungsstatus',
'Full beard': 'Vollbart',
'Full': 'vollständig, voll, ganz',
'Fullscreen Map': 'Großbild Karte',
'Functions available': 'Verfügbare Funktionen',
'Funding Organization': 'Finanzierende Organisation',
'Funding': 'Finanzierung',
'Funeral': 'Beerdigung',
'Further Action Recommended': 'Weitere Aktivität empfohlen',
'GIS Reports of Shelter': 'GIS-Berichte der Unterkünfte',
'GIS integration to view location details of the Shelter': 'GIS-Integration um Details zum Standort der Unterkunft zu erhalten',
'GPS Marker': 'GPS Markierung/Symbol',
'GPS Track File': 'GPS Track Datei',
'GPS Track': 'GPS Track',
'GPS eXchange format': 'GPS Geräte Austauschformat',
'GPX Track': 'GPX Track',
'GRN Number': 'GRN Nummer',
'GRN': 'GRN',
'GU Done': 'GU erledigt',
'Gap Analysis Map': 'Karte zur Lückenanalyse',
'Gap Analysis Report': 'Bericht zur Lückenanalyse',
'Gap Analysis': 'Lückenanalyse',
'Gap Map': 'Lückenkarte',
'Gap Report': 'Bericht über Lücken',
'Gateway Settings': 'Gateway-Einstellungen',
'Gateway settings updated': 'Gateway-Einstellungen aktualisiert',
'Gateway': 'Gateway',
'Gender': 'Geschlecht',
'General Comment': 'Allgemeine Bemerkung',
'General Counsel': 'Allgemeine Beratung',
'General Medical/Surgical': 'Allgemein - Medizinisch/Chirurgisch',
'General emergency and public safety': 'Allgemein - Notfall und öffentliche Sicherheit',
'General information on demographics': 'Allgemein - Informationen zur Demographie',
'General': 'Allgemein',
'Geocode': 'Geocodierung',
'Geocoder Selection': 'Geocoder Auswahl',
'Geometry Name': 'Name der Geometrie',
'Geophysical (inc. landslide)': 'Geophysikalisch (inc. Erdrutsch)',
'Geotechnical Hazards': 'Geotechnische Gefahren',
'Geotechnical': 'Geotechnisch',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Das Modul Geraldo steht innerhalb dier aktiven Python Umgebung nicht zur Verfügung - für die PDF-Ausgabe muss es nachinstalliert werden.',
'German': 'Deutsch',
'Get incoming recovery requests as RSS feed': 'Empfangen von eingehenden Bergungsanforderungen als RSS-Feed',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'Kurze Beschreibung des Bildes, z. B. was ist wo auf dem Bild zu sehen ist (nicht verpflichtend).',
'Give information about where and when you have seen them': 'Geben Sie Information wo und wann Sie sie gesehen haben',
'Global Messaging Settings': 'Globale Nachrichteneinstellungen',
'Go to Request': 'Zur Anfrage',
'Go': 'Los',
'Goatee': 'Spitzbart',
'Good Condition': 'Guter Zustand',
'Good': 'Gut',
'Goods Received Note': 'Warenempfangsbestätigung',
'Government District': 'Regierungsbezirk',
'Government UID': 'Regierungs-UID',
'Government building': 'Regierungsgebäude',
'Government': 'Regierung',
'Grade': 'Klasse',
'Greek': 'Griechisch',
'Green': 'Grün',
'Ground movement, fissures': 'Untergrundbewegung, Risse',
'Ground movement, settlement, slips': 'Untergrundbewegung, Bodensenkung, Abrutsche',
'Group Description': 'Gruppenbeschreibung',
'Group Details': 'Gruppendetails',
'Group Head': 'Gruppenleiter',
'Group Member added': 'Gruppenmitglied hinzugefügt',
'Group Members': 'Gruppenmitglieder',
'Group Memberships': 'Gruppenzugehörigkeiten',
'Group Name': 'Gruppenname',
'Group Size Day': 'Gruppengröße Tag',
'Group Size Night': 'Gruppengröße Nacht',
'Group Size': 'Gruppengröße',
'Group Title': 'Gruppentitel',
'Group Type': 'Gruppentyp',
'Group added': 'Gruppe hinzugefügt',
'Group deleted': 'Gruppe gelöscht',
'Group description': 'Gruppenbeschreibung',
'Group updated': 'Gruppe aktualisiert',
'Group': 'Gruppe',
'Grouped by': 'Gruppiert nach',
'Groups removed': 'Gruppen entfernt',
'Groups': 'Gruppen',
'Guest': 'Gast',
'HELP': 'HILFE',
'HR Manager': 'Personalmanager',
'Hail': 'Hagel',
'Hair Color': 'Haarfarbe',
'Hair Length': 'Haarlänge',
'Hair Style': 'Haarschnitt',
'Has data from this Reference Document been entered into Sahana?': 'Wurden Daten von diesem Referenzdokument in Sahana eingetragen?',
'Has the Certificate for receipt of the shipment been given to the sender?': 'Wurde das Zertifikat für den Empfang der Lieferung an den Absender übergeben?',
'Has the GRN (Goods Received Note) been completed?': 'Wurde die Warenempfangsmeldung (GRN) ausgefüllt?',
'Hazard Pay': 'Gefahrenzulage',
'Hazardous Material': 'Gefahrgut',
'Hazardous Road Conditions': 'Gefährliche Strassenverhältnisse',
'Head of Family': 'Familienoberhaupt',
'Header Background': 'Hintergrund der Kopfzeile',
'Header background file %s missing!': 'Hintergrund der Kopfzeile Datei %s fehlt!',
'Headquarters': 'Hauptquartiere',
'Health Care': 'Gesundsheitsfürsorge',
'Health care assistance, Rank': 'Unterstützung Gesundsheitspflege, Rang',
'Health center with beds': 'Gesundheitszentrum mit Betten',
'Health center without beds': 'Gesundheitszentrum ohne Betten',
'Health center': 'Gesundheitszentrum',
'Health services status': 'Status des Gesundheitswesens',
'Health': 'Gesundheit',
'Healthcare Worker': 'Arbeiter im Gesundheitswesen',
'Heat Wave': 'Hitzewelle',
'Heat and Humidity': 'Wärme und Feuchtigkeit',
'Height (cm)': 'Höhe (cm)',
'Height (m)': 'Höhe (m)',
'Height': 'Höhe',
'Height': 'Höhe',
'Heliports': 'Hubschrauberlandeplätze',
'Help Wanted': 'Hilfe benötigt',
'Help': 'Hilfe',
'Helps to monitor status of hospitals': 'Hilfe um den Status von Krankenhäusern zu überwachen',
'Helps to report and search for missing persons': 'Hilfe beim Melden von und bei der Suche nach vermissten Personen',
'Here are the solution items related to the problem.': 'Hier sind die mit diesem Problem verbundenen Lösungselemente.',
'Heritage Listed': 'Erbe aufgelistet',
'Hide Picture': 'Bild verstecken',
'Hide Table': 'Tabelle verstecken',
'Hide': 'Verstecken',
'Hierarchy Level 0 Name (i.e. Country)': 'Hierachiestufe 0 Name (d.h. Land)',
'Hierarchy Level 1 Name (e.g. State or Province)': 'Hierachiestufe 1 Name (z. B. Land oder Provinz / Gebiet)',
'Hierarchy Level 2 Name (e.g. District or County)': 'Hierachiestufe 2 Name (z. B. Bezirk)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'Hierachiestufe 3 Name (z. B. Ort / Stadt / Dorf)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'Hierachiestufe 4 Name (z.B. Nachbarschaft)',
'Hierarchy Level 5 Name': 'Hierarchie Stufe 5 Name',
'Hierarchy': 'Hierarchie',
'High Tide Depth': 'Tiefe bei maximaler Tide',
'High Water': 'Hochwasser',
'High': 'Hoch',
'Highest Priority Open Requests': 'Offene Anfragen höchster Priorität',
'History': 'Geschichte',
'Hit the back button on your browser to try again.': 'Verwenden Sie die Back Schaltfläche ihres Browsers um es erneut zu versuchen.',
'Holiday Address': 'Urlaubsadresse',
'Home Address': 'Heimatsadresse',
'Home Country': 'Land des Wohnsitzes',
'Home Crime': 'Häusliche Kriminalität',
'Home': 'Startseite',
'Hospital Details': 'Details zum Krankenhaus',
'Hospital Status Report': 'Statusbericht zum Krankenhaus',
'Hospital information added': 'Krankenhausinformationen hinzugefügt',
'Hospital information deleted': 'Krankenhausinformationen gelöscht',
'Hospital information updated': 'Krankenhausinformationen aktualisiert',
'Hospital status assessment.': 'Beurteilung des Zustand des Krankenhauses',
'Hospital': 'Krankenhaus',
'Hospitals': 'Krankenhäuser',
'Hotplate': 'Kochplatte',
'Hour': 'Stunde',
'Hours (Average)': 'Stunden (Durchschnitt)',
'Hours (Total)': 'Stunden (Gesamt)',
'Hours by Program Import': 'Stunden gem. Programm Import',
'Hours by Program Report': 'Stunden nach Programmbericht',
'Hours by Role Import': 'Stunden gem. Rollen Import',
'Hours by Role Report': 'Stunden nach Rollenbericht',
'Hours by': 'Stunden gem.',
'Hours': 'Stunden',
'Household kits received': 'Haushaltsbausätze (-kits) erhalten',
'Household kits, source': 'Herkunft der Haushaltbausätze (-kits)',
'Housing Unit Capacity': 'Maximale Belegungzahl für Unterkunftseinheit',
'Housing Unit Day and Night Capacity': 'Maximale Tag und Nacht Belegungszahl für Unterkunftseinheit',
'Housing Unit Name': 'Name der Unterkunftseinheit',
'Housing Unit': 'Unterkunftseinheit',
'Housing Units': 'Unterkunftseinheiten',
'How does it work?': 'Wie funktioniert das?',
'How is this person affected by the disaster? (Select all that apply)': 'Wie ist diese Person von der Katastrophe betroffen? (Wählen Sie alles Zutreffende aus)',
'How long will the food last?': 'Wie lange werden die Lebensmittel reichen?',
'How many Boys (0-17 yrs) are Dead due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Boys (0-17 yrs) are Injured due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Boys (0-17 yrs) are Missing due to the crisis': 'Wie viele Jungen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Girls (0-17 yrs) are Dead due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise umgekommen',
'How many Girls (0-17 yrs) are Injured due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind durch die Krise verletzt worden',
'How many Girls (0-17 yrs) are Missing due to the crisis': 'Wieviele Mädchen (0-17 Jahre) sind aufgrund der Krise verschollen',
'How many Men (18 yrs+) are Dead due to the crisis': 'Wieviele Männer (18 Jahre+) sind durch die Krise umgekommen',
'How many Men (18 yrs+) are Injured due to the crisis': 'Wie viele Männer (18 + Jahre) wurden wegen der Krise verletzt',
'How many Men (18 yrs+) are Missing due to the crisis': 'Wie viele Männer (18 + Jahre) sind aufgrund der Krise verschollen',
'How many Women (18 yrs+) are Dead due to the crisis': 'Wieviele Frauen (18+ Jahre) sind durch die Krise umgekommen',
'How many Women (18 yrs+) are Injured due to the crisis': 'Wieviele Frauen (18+ Jahre) wurden wegen der Krise verletzt',
'How many Women (18 yrs+) are Missing due to the crisis': 'Wie viele Frauen (18 Jahre und älter) sind aufgrund der Krise verschollen',
'How many days will the supplies last?': 'Wie viele Tage werden die Waren reichen?',
'How many new cases have been admitted to this facility in the past 24h?': 'Wie viele neue Fälle wurden während der letzten 24 Stunden dieser Einrichtung zugewiesen?',
'How many of the patients with the disease died in the past 24h at this facility?': 'Wie viele der Patienten mit dieser Krankheit sind in den letzten 24 Stunden in dieser Einrichtung gestorben?',
'How many patients with the disease are currently hospitalized at this facility?': 'Wieviele Patienten mit dieser Krankheit sind momentan in dieser Einrichtung in Behandlung?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'Wie viele Details sind sichtbar. Eine hohe Zoom-Stufe bedeutet viele Details, aber keine gute Übersicht. Eine niedrige Zoom-Stufe führt zu einer guten Übersicht, es fehlen aber die Details.',
'Hub': 'Zentrum',
'Human Resource Details': 'Details zur Personalressource',
'Human Resource Management': 'Management der Personalressourcen',
'Human Resource added': 'Personalressource hinzugefügt',
'Human Resource removed': 'Personalressource entfernt',
'Human Resource updated': 'Personalressource aktualisiert',
'Human Resource': 'Personalressource',
'Human Resources': 'Personalressourcen',
'Humanitarian NGO': 'Humanitäre NGO',
'Humanitarian Use': 'Humanitäre Zwecke',
'Hurricane Force Wind': 'Wind in Hurrikanstärke',
'Hurricane': 'Wirbelsturm',
'Hygiene kits received': 'Hygienekits empfangen',
'Hygiene kits, source': 'Herkunft der Hygienekits',
'Hygiene practice': 'Hygienepraxis',
'Hygiene problems': 'Hygieneprobleme',
'I am available in the following area(s)': 'Ich stehe in folgenden Bereichen zur Verfügung',
'IATA': 'IATA',
'ICAO': 'ICAO',
'ID Tag Number': 'Identifikations-Etikett-Nummer',
'ID Tag': 'Identifikationsetikett',
'ID Type': 'ID-Typ',
'ID/Ref.No.': 'ID/Az.',
'Ice Pressure': 'Eisdruck',
'Iceberg': 'Eisberg',
'Identification Report': 'Indentifizierungsbericht',
'Identification Reports': 'Identifizierungsberichte',
'Identification Status': 'Status der Identifizierung',
'Identification': 'Identifizierung',
'Identified as': 'Identifiziert als',
'Identified by': 'Identifiziert durch',
'Identity Details': 'Details zur Identität',
'Identity added': 'Identität hinzugefügt',
'Identity deleted': 'Identität gelöscht',
'Identity updated': 'Identität aktualisiert',
'Identity': 'Identität',
'If a ticket was issued then please provide the Ticket ID.': 'Wenn ein Ticket ausgestellt wurde, bitte die Ticket-ID angeben.',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'Wenn ein Benutzer sicherstellt, dass er oder sie eine Email-Adresse in dieser Domäne besitzt, wird das Approver Feld dazu verwendet, um zu bestimmen ob und von wem weitere Genehmigungen erforderlich sind.',
'If it is a URL leading to HTML, then this will downloaded.': 'Handelt es sich um eine URL zu einer HTML Seite, dann wird diese heruntergeladen.',
'If neither are defined, then the Default Marker is used.': 'Wenn nichts davon definiert wurde, wird der Standard Marker (Symbol) verwendet.',
'If no marker defined then the system default marker is used': 'Wenn keine Markierung (Symbolisierung) definiert ist dann wird die im System festgelegte Standardmarkierung verwendet',
'If no, specify why': 'Wenn nein, geben Sie bitte einen Grund dafür an',
'If none are selected, then all are searched.': 'Wird keine ausgewählt, werden alle durchsucht.',
'If the location is a geographic area, then state at what level here.': 'Wenn der Ort ein geographisches Gebiet ist, geben Sie bitte eine entsprechende Stufe an',
'If the request type is "Other", please enter request details here.': 'Wenn der Anfragetyp "Andere" ist, geben Sie bitte hier weitere Details zur Anfrage ein.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'Wenn dieses Feld ausgefüllt ist, dann wird ein Benutzer mit der gleichen Domainadresse automatisch als Mitarbeiter dieser Organisation zugeordnet.',
'If this is set to True then mails will be deleted from the server after downloading.': "Wenn dies auf 'Wahr' gesetzt ist, dann werden die Mails nach dem Herunterladen vom Server gelöscht.",
'If this record should be restricted then select which role is required to access the record here.': 'Wenn der Zugriff auf diesen Datensatz beschränkt werden soll, wählen Sie hier die Rolle aus, die für den Zugriff erforderlich ist.',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'Wenn dieser Eintrag beschränkt werden soll, dann wählen Sie hier aus, welche Rolle(n) für den Zugriff auf den Eintrag berechtigt sind.',
'If yes, specify what and by whom': 'Wenn ja, geben Sie an, was und von wem',
'If yes, which and how': 'Wenn ja, welche und wie',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': 'Wenn Sie kein Referenzdokument angeben, wird stattdessen ihre Mailadresse angezeigt damit die Daten verifiziert werden können.',
'If you know what the Geonames ID of this location is then you can enter it here.': 'Wenn sie die Geonames ID des Standortes wissen, dann können Sie diese hier eingeben.',
'If you know what the OSM ID of this location is then you can enter it here.': 'Wenn sie die OSM ID dieser des Standortes wissen, dann können Sie diese hier eingeben.',
'If you need to add a new document then you can click here to attach one.': 'Wenn sie ein neues Dokument hinzufügen wollen, dann können sSie hier Klicken um eines anzufügen.',
'If you want several values, then separate with': 'Wenn Sie mehrere Werte möchten, dann trennen Sie diese mit',
'If you would like to help, then please <b>sign up now</b>': 'Wenn Sie helfen möchten, <b>registrieren</b> sich sich jetzt',
'Ignore Errors?': 'Fehler ignorieren?',
'Illegal Immigrant': 'Illegaler Einwanderer',
'Illiterate': 'Analphabet',
'Image Details': 'Details zum Bild',
'Image Tags': 'Tags für Bild',
'Image Type': 'Typ des Bilds',
'Image Upload': 'Bild hochladen',
'Image added': 'Bild hinzugefügt',
'Image deleted': 'Bild gelöscht',
'Image updated': 'Bild aktualisiert',
'Image': 'Bild',
'Imagery': 'Bilddaten',
'Images': 'Bilder',
'Impact Assessments': 'Folgenabschätzung',
'Impact Details': 'Details zur Folge/Auswirkung',
'Impact Type Details': 'Details zum Typ der Auswirkung',
'Impact Type added': 'ATyp der Auswirkung hinzugefügt',
'Impact Type deleted': 'Typ der Auswirkung gelöscht',
'Impact Type updated': 'Typ der Auswirkung aktualisiert',
'Impact Type': 'Auswirkungsarten',
'Impact Types': 'Auswirkungsarten',
'Impact added': 'Auswirkung hinzugefügt',
'Impact deleted': 'Auswirkung gelöscht',
'Impact updated': 'Auswirkung aktualisiert',
'Impacts': 'Auswirkungen',
'Import & Export Data': 'Import & Export von Daten',
'Import Branch Organizations':'Zweigorganisationen importieren',
'Import Catalog Items': 'Importiere Katalogartikel',
'Import Data': 'Import von Daten',
'Import Event Types': 'Importiere Ereignistypen',
'Import File': 'Datei importieren',
'Import Heliports': 'Hubschrauberlandeplätze importieren',
'Import Hours': 'Import Stundenliste',
'Import Incident Types': 'Ereignistypen importieren',
'Import Locations': 'Gebiete/Standorte importieren',
'Import Participant List': 'Import Teilnehmerliste',
'Import Projects': 'Projekte importieren',
'Import Staff': 'Mitarbeiter importieren',
'Import Suppliers': 'Lieferanten importieren',
'Import Template Layout': 'Import Vorlagenlayout',
'Import Templates': 'Import Vorlagen',
'Import Training Participants': 'Kursteilnehmer importieren',
'Import Updates': 'Aktualisierungen importieren',
'Import Users': 'Import von Benutzern',
'Import Volunteers': 'Freiwillige importieren',
'Import Warehouse Stock': 'Warenlagerbestand importieren',
'Import Warehouses': 'Warenlager importieren',
'Import and Export': 'Import und Export',
'Import from CSV': 'Import einer CSV-Datei',
'Import from OpenStreetMap': 'Import aus OpenStreetMap',
'Import from Ushahidi Instance': 'Import aus Ushahidi Instanz',
'Import if Master': 'Import wenn Master',
'Import multiple tables as CSV': 'Mehrere Tabellen als CSV importieren',
'Import': 'Import',
'Important': 'Wichtig',
'Importantly where there are no aid services being provided': 'Bedeutsam wo keine Hilfsleistungen angeboten werden',
'Importing data from spreadsheets': 'Importieren von Daten aus Tabellendokumenten',
'Improper decontamination': 'Unzureichende Dekontamination',
'Improper handling of dead bodies': 'Unzureichende Behandlung von Leichen',
'In Catalogs': 'In Katalogen',
'In Inventories': 'In den Beständen',
'In Process': 'In Bearbeitung',
'In Progress': 'In Bearbeitung',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': 'Beim Aufbau des Fensters wird die Karte maximiert um das Fenster auszufüllen, daher ist es nicht notwendig hier einen grossen Wert festzulegen.',
'InBox': 'Eingang',
'Inactive': 'Inaktiv',
'Inactive/Disappeared': 'Inaktiv/Untergetaucht',
'Inbound Mail Settings': 'Eingehende Mail-Einstellungen',
'Incident Categories': 'Kategorien für Vorfälle ',
'Incident Report Details': 'Details zum Vorfall-Bericht',
'Incident Report added': 'Vorfall-Bericht hinzugefügt',
'Incident Report deleted': 'Vorfall-Bericht gelöscht',
'Incident Report updated': 'Vorfall-Bericht aktualisiert',
'Incident Report': 'Vorfall-Bericht',
'Incident Reporting System': 'Vorfall-Berichtsystem',
'Incident Reporting': 'Vorfall-Berichtswesen',
'Incident Reports': 'Vorfall-Berichte',
'Incident Timeline': 'Zeitplan der Ereignisse',
'Incident Type': 'Vorfallstyp',
'Incident Types': 'Typen von Vorfällen',
'Incident': 'Vorfall',
'Incidents': 'Vorfälle',
'Incoming Shipment canceled': 'Eingehende Sendung abgebrochen',
'Incoming Shipment updated': 'Eingehende Sendung aktualisiert',
'Incoming': 'Eingehend',
'Incomplete': 'Unvollständig',
'Indirect support cost HQ': 'Indirekte Unterstützungskosten Hauptquartier',
'Individuals': 'Einzelpersonen',
'Industrial Crime': 'Industrielle Kriminalität',
'Industrial': 'Industriell',
'Industry Fire': 'Industriefeuer',
'Infant (0-1)': 'Säugling (0-1)',
'Infectious Disease (Hazardous Material)': 'Ansteckende Krankheit (gefährliches Material)',
'Infectious Disease': 'Ansteckende Krankheit',
'Infectious Diseases': 'Infektionskrankheiten',
'Infestation': 'Aktivierung',
'Informal Leader': 'Informeller Leiter',
'Informal camp': 'Informelles Camp',
'Information / Guidance': 'Information / Beratung',
'Information gaps': 'Informationenlücken',
'Infusion catheters available': 'Infusionskatheter verfügbar',
'Infusion catheters need per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusion catheters needed per 24h': 'Benötigte Infusionskatheter pro 24h',
'Infusions available': 'Infusionen verfügbar',
'Infusions needed per 24h': 'Benötigte Infusionen pro 24h',
'Initial Situation Details': 'Details zur Ausgangssituation',
'Initials': 'Namenskürzel',
'Inspected': 'Geprüft',
'Inspection Date': 'Prüfdatum',
'Inspection date and time': 'Datum und Uhrzeit der Überprüfung',
'Inspection time': 'Zeit der Überprüfung',
'Inspector ID': 'Prüfer-ID',
'Instant Porridge': 'Hafer Fertigbrei',
'Institution': 'Institution',
'Instructions for handling of the case': 'Anweisungen zur Handhabung des Falls',
'Instructions': 'Anweisungen',
'Instructor': 'Ausbilder',
'Insufficient Privileges': 'Fehlende Berechtigung',
'Insufficient vars: Need module, resource, jresource, instance': 'Unzureichende vars: Benötige module, resource, jresource, instance',
'Insufficient': 'Nicht ausreichend',
'Intake Items': 'Annahme Güter',
'Integrated bath within housing unit': 'Bad in der Unterkunftseinheit vorhanden',
'Integrated shower within housing unit': 'Dusche in der Unterkunftseinheit vorhanden',
'Intergovernmental Organization': 'Zwischenstaatliche Organisation',
'Interior walls, partitions': 'Innere Wände, Partitionen',
'Internal Communication': 'Interne Mitteilung',
'Internal Resource': 'Interne Ressource',
'Internal Resources': 'Interne Ressourcen',
'Internal Shipment': 'Interne Lieferung',
'Internal State': 'Interner Zustand',
'International NGO': 'Internationale NGO',
'International Organization': 'Internationale Organisation',
'Intervention Details': 'Details zu Maßnahmen',
'Intervention Types': 'Maßnahmenarten',
'Interventions': 'Maßnahmen',
'Interview taking place at': 'Ort des Interviews',
'Invalid Case': 'Ungültiger Fall',
'Invalid Cases': 'Ungültige Fälle',
'Invalid Query': 'Ungültige Abfrage',
'Invalid event type: %s': 'Ungültiger Ereignistyp: %s',
'Invalid request!': 'Ungültige Anfrage!',
'Invalid ticket': 'Ungültiges Ticket',
'Invalid': 'Ungültig',
'Inventories': 'Bestände',
'Inventory Item Details': 'Details zu einzelnem Bestandsartikel',
'Inventory Item updated': 'Bestandsartikel aktualisiert',
'Inventory Item': 'Bestandsartikel',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': 'Bestandsartikel umfassen sowohl Verbrauchsmaterialien als auch solche die am Bestimmungsort in Anlagen umgewandelt werden.',
'Inventory Items': 'Bestandsartikel',
'Inventory Management': 'Lagerbestandsverwaltung',
'Inventory of Effects': 'Bestand von Vermögenswerten',
'Inventory': 'Bestand',
'Is editing level L%d locations allowed?': 'Ist die Bearbeitung von Level L%d Standorten zulässig?',
'Is it safe to collect water?': 'Ist es sicher Wasser zu sammeln?',
'Is this a strict hierarchy?': 'Ist dies eine strenge Hierarchie?',
'Issuing Authority': 'Ausstellende Behörde',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'Es erfasst nicht nur die Orte wo sie aktiv sind, sondern erfasst auch Informationen über den Umfang der Projekte die sie im jeweiligen Gebiet durchführen.',
'Item Added to Shipment': 'Artikel der Lieferung hinzugefügt',
'Item Catalog Details': 'Details zum Artikelkatalog',
'Item Categories': 'Artikelkategorien',
'Item Category Details': ' Details zur Artikelkategorie',
'Item Category added': 'Artikelkategorie hinzugefügt',
'Item Category deleted': 'Artikelkategorie gelöscht',
'Item Category updated': 'Artikelkategorie aktualisiert',
'Item Category': 'Artikelkategorie',
'Item Details': 'Details zum Artikel',
'Item Pack Details': 'Details zum Artikelpaket ',
'Item Pack added': 'Artikelpaket hinzugefügt',
'Item Pack deleted': 'Artikelpaket gelöscht',
'Item Pack updated': 'Artikelpaket aktualisiert',
'Item Packs': 'Artikelpaket',
'Item Tracking Status': 'Artikel Verfolgungsstatus',
'Item Type Details': 'Details zur Gegenstandsart',
'Item Type created': 'Gegenstandsart angelegt',
'Item Type deleted': 'Gegenstandsart gelöscht',
'Item Type updated': 'Gegenstandsart aktualisiert',
'Item Types': 'Gegenstandsarten',
'Item added to Inventory': 'Artikel zum Bestand hinzugefügt',
'Item added to shipment': 'Artikel der Lieferung hinzugefügt',
'Item added': 'Artikel hinzugefügt',
'Item already in Bundle!': 'Artikel bereits in Produktpaket!',
'Item already in Kit!': 'Artikel bereits in Ausstattung (Kit)!',
'Item already in budget!': 'Artikel bereits im Budget!',
'Item deleted': 'Artikel gelöscht',
'Item removed from Inventory': 'Artikel aus dem Bestand entfernt',
'Item updated': 'Artikel aktualisiert',
'Item': 'Artikel',
'Item/Description': 'Artikel/Beschreibung',
'Items in Category are Vehicles': 'Artikel in dieser Kategorie sind Fahrzeuge',
'Items in Category can be Assets': 'Artikel in der Kategorie können als Anlagen verwendet werden',
'Items': 'Artikel',
'Items/Description': 'Artikel/Beschreibung',
'Japanese': 'Japanisch',
'Jerry can': 'Kanister',
'Jew': 'Jude',
'Jewish': 'Jüdisch',
'Job Role Catalog': 'Katalog für Tätigkeiten',
'Job Role Details': 'Details zur Tätigkeit',
'Job Role added': 'Tätigkeit hinzugefügt',
'Job Role deleted': 'Tätigkeit entfernt',
'Job Role updated': 'Tätigkeit aktualisiert',
'Job Role': 'Tätigkeit',
'Job Roles': 'Tätigkeiten',
'Job Seeking': 'Arbeitssuche',
'Job Title Catalog': 'Katalog der Tätigkeitsbezeichnungen',
'Job Title': 'Tätigkeitsbezeichnung',
'Job Titles': 'Tätigkeitsbezeichnungen',
'Journal Entry Details': 'Details zum Journaleintrag',
'Journal entry added': 'Journaleintrag hinzugefügt',
'Journal entry deleted': 'Journaleintrag gelöscht',
'Journal entry updated': 'Journaleintrag aktualisiert',
'Key Details': 'Details zum Schlüssel',
'Key added': 'Schlüssel hinzugefügt',
'Key deleted': 'Schlüssel gelöscht',
'Key updated': 'Schlüssel aktualisiert',
'Key': 'Schlüssel',
'Keys': 'Schlüssel',
'Kit Contents': 'Inhalt der Ausstattung (Kit)',
'Kit Details': 'Details zur Ausstattung (Kit)',
'Kit Updated': 'Ausstattung (Kit) aktualisiert',
'Kit added': 'Ausstattung (Kit) hinzugefügt',
'Kit deleted': 'Ausstattung (Kit) gelöscht',
'Kit updated': 'Ausstattung (Kit) aktualisiert',
'Kit': 'Ausstattung (Kit)',
'Kit?': 'Ausstattung (Kit)?',
'Kits': 'Ausstattungen (Kits)',
'Kitting': 'Ausstattung zusammenstellen',
'Kittings': 'Ausstattungszusammenstellungen',
'Knife': 'Messer',
'Known Identities': 'Bekannte Identitäten',
'Known incidents of violence against women/girls': 'Bekannte Fälle von Gewalt gegen Frauen/Mädchen',
'Known incidents of violence since disaster': 'Bekannte Fällen von Gewalt seit der Katastrophe',
'LICENSE': 'LIZENZ',
'Labor Market Integration': 'Arbeitsmarkt-Integration',
'Lack of material': 'Mangel an Material',
'Lack of school uniform': 'Fehlende Schuluniformen',
'Lack of supplies at school': 'Fehlende Vorräte an der Schule',
'Lack of transport to school': 'Fehlender Transportmöglichkeiten zur Schule',
'Lactating women': 'Stillende frauen',
'Lahar': 'Mure',
'Landslide': 'Erdrutsch',
'Language / Communication Mode': 'Sprache / Verständigungsmodus',
'Language': 'Sprache',
'Last Check-in': 'Letzter Check-in',
'Last Check-out': 'Letzter Check-out',
'Last Downloaded': 'Zuletzt heruntergeladen',
'Last Name': 'Nachname',
'Last Pull': 'Letzter Pull',
'Last Push': 'Letzter Push',
'Last known location': 'Letzte bekannte Position',
'Last seen on': 'Zuletzt gesehen am',
'Last synchronization time': 'Zeitpunkt der letzte Synchronisierung',
'Last updated by': 'Letzte Aktualisierung durch',
'Last updated on': 'Letzte Aktualisierung am',
'Last updated': 'Letzte Aktualisierung',
'Last': 'Letzte',
'Latest Information': 'Aktuelle Informationen',
'Latitude %(lat)s is invalid, should be between %(lat_min)s & %(lat_max)s': 'Breitengrad %(lat)s ist ungültig, muss zwischen %(lat_min)s & %(lat_max)s',
'Latitude & Longitude': 'Breitengrad und Längengrad',
'Latitude is North-South (Up-Down).': 'Breitengrad ist Nord-Süd (Oben-Unten).',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'Der Breitengrad ist Null am Äquator, Positiv auf der nördlichen und negativ auf der südlichen Erdhalbkugel.',
'Latitude of Map Center': 'Breitengrad der Kartenmitte',
'Latitude of far northern end of the region of interest.': 'Nördlichster Breitengrad der betroffenen Region',
'Latitude of far southern end of the region of interest.': 'Südlichster Breitengrad der betroffenen Region',
'Latitude': 'Breitengrad',
'Latrines': 'Toiletten',
'Law enforcement, military, homeland and local/private security': 'Executive, Militär und andere lokale/private Sicherheitsagenturen',
'Layer Poperties': 'Kartenebenen anpassen',
'Layer added': 'Layer hinzugefügt',
'Layer deleted': 'Layer gelöscht',
'Layer updated': 'Layer aktualisiert',
'Layer': 'Kartenebene',
'Layers updated': 'Kartenebenen aktualisiert',
'Layers': 'Kartenebenen',
'Lead Implementer': 'Hauptimplementierer',
'Leader': 'Leiter',
'Left Voluntarily': 'Freiwillig ausgereist',
'Legal Advice': 'Rechtberatung',
'Legal Counsel': 'Rechtsberatung',
'Legally Departed': 'Legal abgereist',
'Legend Format': 'Format der Legende',
'Legend': 'Legende',
'Length (m)': 'Länge (m)',
'Less Options': 'Weniger Optionen',
'Level 1 Assessment Details': 'Stufe 1 Beurteilung - Details',
'Level 1 Assessment added': 'Stufe 1 Beurteilung hinzugefügt',
'Level 1 Assessment deleted': 'Stufe 1 Beurteilung entfernt',
'Level 1 Assessment updated': 'Stufe 1 Beurteilung aktualisiert',
'Level 1 Assessments': 'Stufe 1 Beurteilungen',
'Level 1': 'Stufe 1',
'Level 2 Assessment Details': 'Stufe 2 Beurteilung - Details',
'Level 2 Assessment added': 'Stufe 2 Beurteilung hinzugefügt',
'Level 2 Assessment deleted': 'Stufe 2 Beurteilung entfernt',
'Level 2 Assessment updated': 'Stufe 2 Beurteilung aktualisiert',
'Level 2 Assessments': 'Stufe 2 Beurteilungen',
'Level 2 or detailed engineering evaluation recommended': 'Stufe 2 oder detaillierte technische Evaluierung empfohlen',
'Level 2': 'Stufe 2',
'Level 3': 'Stufe 3',
'Level of Award': 'Stufe der Auszeichnung',
'Level': 'Stufe',
'Library support not available for OpenID': 'OpenID wird von Bibliothek nicht unterstützt',
'License Plate': 'Nummernschild',
'LineString': 'LineString',
'Link to this result': 'Link zu dieser Liste',
'List / Add Baseline Types': 'Arten von Referenzdaten auflisten / hinzufügen',
'List / Add Impact Types': 'Arten von Auswirkungen auflisten / hinzufügen',
'List / Add Services': 'Leistungen auflisten / hinzufügen',
'List / Add Types': 'Typen auflisten / hinzufügen',
'List Actions': 'Liste Maßnahmen',
'List Activities': 'Aktivitäten auflisten',
'List All Assets': 'Alle Anlagen auflisten',
'List All Catalog Items': 'Auflisten aller Artikel aus dem Katalog',
'List All Commitments': 'Auflisten aller Zusagen',
'List All Entries': 'Alle Einträgen auflisten',
'List All Item Categories': 'Auflisten aller Artikelkategorien',
'List All Memberships': 'Alle Mitgliedschaften auflisten',
'List All Organization Approvers & Whitelists': 'Zeige alle Organisationsbestätiger & Whitelists',
'List All Received Shipments': 'Auflisten aller empfangenen Lieferungen',
'List All Records': 'Auflisten aller Datensätze',
'List All Requested Items': 'Auflisten aller angefragten Artikel',
'List All Requests': 'Auflisten aller Anfragen',
'List All Roles': 'Zeige alle Rollen',
'List All Sent Shipments': 'Liste aller gesendeten Lieferungen',
'List All Users': 'Zeige alle Nutzer',
'List All Vehicles': 'Liste aller Fahrzeuge',
'List All': 'Alle auflisten',
'List Allowance Information': 'Informationen zum Taschengeld auflisten',
'List Alternative Items': 'Liste alternativer Artikel',
'List Appointment Types': 'Liste Terminarten',
'List Appointments': 'Liste der Termine',
'List Assessment Summaries': 'Zusammenfassungen der Beurteilungen auflisten',
'List Assessments': 'Beurteilungen auflisten',
'List Assets': 'Anlagen auflisten',
'List Availability': 'Liste Verfügbarkeit',
'List Baseline Types': 'Liste der Typen von Referenzdaten',
'List Baselines': 'Liste der Referenzdaten',
'List Branch Organizations':'Liste Zweigorganisationen',
'List Brands': 'Marken auflisten',
'List Budgets': 'Budgets auflisten',
'List Bundles': 'Produktpakete auflisten',
'List Camp Services': 'Liste der Leistungen im Camp',
'List Camp Types': 'Liste Typen von Camps',
'List Camps': 'Liste Camps',
'List Case Flags': 'Fall Flaggen auflisten',
'List Catalog Items': 'Katalogelemente auflisten',
'List Catalogs': 'Liste Kataloge',
'List Certificates': 'Liste Zertifikate',
'List Certifications': 'Liste Zertifizierungen',
'List Checklists': 'Checklisten Auflisten',
'List Cluster Subsectors': 'Cluster Teilbereiche Auflisten',
'List Clusters': 'Cluster Auflisten',
'List Commitment Items': 'Liste zugesagter Artikel',
'List Commitments': 'Liste Zusagen',
'List Competencies': 'Liste Kompetenzen',
'List Competency Ratings': 'Liste Kompetenzrating',
'List Conflicts': 'Liste Konflikte',
'List Contact Information': 'Liste Kontaktinformationen',
'List Contacts': 'Liste Kontakte',
'List Counseling Themes': 'Liste Beratungsthemen',
'List Course Certificates': 'Liste Kurszertifikate',
'List Courses': 'Liste Kurse',
'List Credentials': 'Liste von Qualifikationen',
'List Current': 'Aktuelle Liste',
'List Defects': 'Liste Mängel',
'List Depositories': 'Verwahrungsorte auflisten',
'List Documents': 'Liste Dokumente',
'List Donors': 'Liste Spender',
'List Event Types': 'Liste der Ereignistypen',
'List Events': 'Liste Ereignisse',
'List Facilities': 'Liste Einrichtungen',
'List Family Members': 'Liste Familienmitglieder',
'List Feature Layers': 'Liste Objekt-Layer',
'List Flood Reports': 'Liste Flutberichte',
'List Groups': 'Liste Gruppen',
'List Groups/View Members': 'Liste Gruppen/Anzeige der Mitglieder',
'List Hospitals': 'Liste Krankenhäuser',
'List Human Resources': 'Liste der personellen Ressourcen',
'List Identities': 'Identitäten auflisten',
'List Images': 'Bilder auflisten',
'List Impact Assessments': 'Folgenabschätzung auflisten',
'List Impact Types': 'Auswirkungsarten auflisten',
'List Impacts': 'Auswirkungen auflisten',
'List Incident Reports': 'Vorfallberichte auflisten',
'List Item Categories': 'Liste Artikelkategorien',
'List Item Packs': 'Liste der Artikelpakete',
'List Item Types': 'List der Gegenstandsarten',
'List Items in Inventory': 'Liste der Artikel im Bestand',
'List Items': 'Liste der Artikel',
'List Job Roles': 'Liste der Tätigkeiten',
'List Keys': 'Schlüssel auflisten',
'List Kits': 'Liste Ausstattungen (Kits)',
'List Layers': 'Liste Layer',
'List Level 1 Assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 1 assessments': 'Liste Stufe 1 Beurteilungen',
'List Level 2 Assessments': 'Liste Stufe 2 Beurteilungen',
'List Level 2 assessments': 'Liste Stufe 2 Beurteilungen',
'List Locations': 'Standorte auflisten',
'List Log Entries': 'Protokolleinträge auflisten',
'List Map Profiles': 'Liste der Kartenkonfigurationen',
'List Markers': 'Marker/Symbole auflisten',
'List Members': 'Mitglieder auflisten',
'List Memberships': 'Mitgliedschaften auflisten',
'List Messages': 'Nachrichten auflisten',
'List Missing Persons': 'Vermisste Personen auflisten',
'List Missions': 'Liste Aufträge',
'List Need Types': 'Bedarftypen auflisten',
'List Needs': 'Bedarf auflisten',
'List Notes': 'Notizen auflisten',
'List Offices': 'Liste der Büros',
'List Organizations': 'Liste der Organisationen',
'List Peers': 'Liste der Peers',
'List Personal Effects': 'Liste der persönlichen Habe',
'List Persons': 'Liste der Personen',
'List Photos': 'Liste der Bilder',
'List Population Statistics': 'Liste Bevölkerungsstatistiken',
'List Positions': 'Liste der Positionen',
'List Problems': 'Liste der Probleme',
'List Projections': 'Liste der Kartenprojektionen',
'List Projects': 'Liste Projekte',
'List Rapid Assessments': 'Liste Schnell-Beurteilungen',
'List Received Items': 'Liste empfangene Artikel',
'List Received Shipments': 'Liste empfangene Lieferungen',
'List Records': 'Liste Datensätze',
'List Recurring Requests': 'Liste wiederkehrender Anfragen',
'List Registrations': 'Liste Registrierungen',
'List Reports': 'Liste Berichte',
'List Request Items': 'Angefragte Artikel auflisten',
'List Requests': 'Anfragen auflisten',
'List Residence Permit Types':'Liste der Aufenthaltserlaubnistypen',
'List Residence Status Types':'Liste der Aufenthaltsstatustypen',
'List Residence Statuses':'Liste der Aufenthaltstatus',
'List Residents Reports': 'Übersicht Bewohnerlisten',
'List Residents': 'Bewohnerliste',
'List Resources': 'Ressourcen auflisten',
'List Rivers': 'Flüsse auflisten',
'List Roles': 'Rollen auflisten',
'List Rooms': 'Liste Räume',
'List Scenarios': 'Liste Szenarien',
'List Sections': 'Abschnitte auflisten',
'List Sectors': 'Bereiche auflisten',
'List Seized Items': 'Liste beschlagnahmter Gegenstände',
'List Sent Items': 'Gesendete Artikel auflisten',
'List Sent Shipments': 'Liste verschickte Lieferungen',
'List Service Contact Types': 'Liste Leistungsträgerarten',
'List Service Contacts':'Liste Leistungsträger',
'List Service Profiles': 'Leistungsprofile auflisten',
'List Settings': 'Einstellungen auflisten',
'List Shelter Flags': 'Liste Unterkunftsflaggen',
'List Shelter Inspections': 'Liste Unterkunftsinspektionen',
'List Shelter Services': 'Leistungen der Unterkunft auflisten',
'List Shelter Types': 'Typen der Unterkunft auflisten',
'List Shelters': 'Unterkünfte auflisten',
'List Site Needs': 'Alle Bedarfe',
'List Skill Equivalences': 'Liste Fähigkeits-Vergleichbarkeiten',
'List Skill Provisions': 'Fähigkeits-Bereitstellungen auflisten',
'List Skill Types': 'Liste der Typen von Fähigkeiten',
'List Skills': 'Liste Fähigkeiten',
'List Solutions': 'Liste Lösungen',
'List Staff Types': 'Mitarbeitertypen auflisten',
'List Status': 'Status auflisten',
'List Subscriptions': 'Abonnements anzeigen',
'List Subsectors': 'Teilbereiche auflisten',
'List Support Requests': 'Liste der Anfragen nach Unterstützung',
'List Survey Answers': 'Liste Umfrage-Antworten',
'List Survey Questions': 'Liste Umfrage-Fragen',
'List Survey Series': 'Liste Umfrage-Serien',
'List Survey Templates': 'Liste Umfrage-Vorlagen',
'List Tasks': 'Aufgaben auflisten',
'List Teams': 'Teams auflisten',
'List Themes': 'Themen auflisten',
'List Tickets': 'Tickets auflisten',
'List Tracks': 'Tracks auflisten',
'List Trainings': 'Schulungen/Ausbildung auflisten',
'List Units': 'Einheiten auflisten',
'List Users': 'Liste Benutzer',
'List Warehouses': 'Liste Warenlager',
'List all': 'Alle auflisten',
'List available Scenarios': 'Liste verfügbarer Szenarien',
'List of Items': 'Liste der Artikel',
'List of Missing Persons': 'Liste der vermißten Personen',
'List of Peers': 'Liste der Peers',
'List of Reports': 'Liste der Berichte',
'List of Requests': 'Liste der Anfragen',
'List of Spreadsheets uploaded': 'Liste der hochgeladenen Tabellen',
'List of Spreadsheets': 'Liste der Tabellen',
'List of Volunteers for this skill set': 'Liste der Freiwilligen für dieses Fachgebiet',
'List of Volunteers': 'Liste der Freiwilligen',
'List of addresses': 'Liste der Adressen',
'List unidentified': 'Nicht identifizierte Objekte auflisten',
'List': 'Liste',
'List/Add': 'Auflisten/Hinzufügen',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': 'Liste "Wer macht was & wo". Ermöglicht Hilfsorganizationen, ihre Aktivitäten zu koordinieren',
'Literacy': 'Schriftkundigkeit',
'Live Help': 'Aktuelle Hilfe',
'Livelihood': 'Lebensgrundlage',
'Load Cleaned Data into Database': 'Bereinigte Daten in die Datenbank laden',
'Load Raw File into Grid': 'Unformatierte Datei ins Grid laden',
'Loading Equipment': 'Be-/Entladeaustattung',
'Loading': 'Wird geladen',
'Local Name': 'Lokaler Name',
'Local Names': 'Lokale Namen',
'Location 1': 'Standort 1',
'Location 2': 'Standort 2',
'Location Detail': 'Details zum Gebiet/Standort',
'Location Details': 'Standortdetails',
'Location Hierarchies': 'Standort-Hierachien',
'Location Hierarchy Level 0 Name': 'Standort-Hierachie Level 0 Name',
'Location Hierarchy Level 1 Name': 'Standort-Hierachie Level 1 Name',
'Location Hierarchy Level 2 Name': 'Standort-Hierachie Level 2 Name',
'Location Hierarchy Level 3 Name': 'Standort-Hierarchie Level 3 Name',
'Location Hierarchy Level 4 Name': 'Standort-Hierarchie Level 4 Name',
'Location Hierarchy Level 5 Name': 'Standort-Hierarchie Level 5 Name',
'Location added': 'Standort hinzugefügt.',
'Location deleted': 'Standort gelöscht',
'Location group cannot be a parent.': 'Standortgruppe kann kein übergeordnetes Element sein',
'Location group cannot have a parent.': 'Standortgruppe kann kein übergeordnetes Elemenet haben.',
'Location groups can be used in the Regions menu.': 'Standortgruppen können im Gebietsmenu verwendet werden.',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': 'Standortgruppen können genutzt werden, um die Ergebnisse auf der Karte und in den Suchergebnissen zu filtern.',
'Location updated': 'Standort aktualisiert',
'Location': 'Standort',
'Locations of this level need to have a parent of level': 'Standorte dieser Ebene müssen ein übergeordnetes Element der folgenden Ebene haben',
'Locations': 'Standorte',
'Lockdown': 'Sperrung',
'Log Entry Details': 'Details zum Protokolleintrag',
'Log entry added': 'Protokolleintrag hinzugefügt',
'Log entry deleted': 'Protokolleintrag gelöscht',
'Log entry updated': 'Protokolleintrag aktualisiert',
'Log': 'Protokoll',
'Logged By': 'Protokolliert durch',
'Logged in': 'Eingeloggt',
'Logged out': 'Ausgeloggt',
'Logged-in Last 30 Days': 'Eingeloggt in den letzten 30 Tagen',
'Login': 'Anmeldung',
'Logistics Management System': 'Logistik Managementsystem',
'Logistics': 'Logistik',
'Logo file %s missing!': 'Datei mit Logo %s fehlt!',
'Logout': 'Abmelden',
'Long Name': 'Langschriftlicher Name',
'Long Text': 'Langer Text',
'Longitude %(lon)s is invalid, should be between %(lon_min)s & %(lon_max)s': 'Längengrad %(lon)s ist ungültig, muss zwischen %(lon_min)s & %(lon_max)s',
'Longitude is West - East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is West-East (sideways).': 'Die Geographische Länge ist West-Ost (seitlich).',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'Die Geographische Länge ist 0 am Nullmeridian (GMT) und positiv in Richtung Osten (z.B. Großteil Europas und ganz Asien). In Richtung Westen - über den Atlantik und nach Amerika - ist sie negativ.',
'Longitude of Map Center': 'Geographische Länge des Kartenmittelpunktes',
'Longitude of far eastern end of the region of interest.': 'Geographische Länge des östlichen Endes de Interessensgebietes.',
'Longitude of far western end of the region of interest.': 'Geographische Länge des westlichen Endes de Interessensgebietes.',
'Longitude': 'Geographische Länge',
'Looting': 'Plünderung',
'Lost Password': 'Kennwort vergessen',
'Lost': 'Verloren',
'Low Tide Depth': 'Tiefe bei minimaler Tide',
'Low': 'Niedrig',
'Lunch': 'Mittagessen',
'Magnetic Storm': 'Magnetischer Sturm',
'Mail': 'Post',
'Main Facility': 'Haupteinrichtung',
'Major Damage': 'Großer Schaden',
'Major expenses': 'Hauptausgaben',
'Major outward damage': 'Größter nach außen gerichteter Schaden',
'Major': 'Maßgeblich',
'Make Commitment': 'Eine Zusage machen',
'Make New Commitment': 'Neue Zusage machen',
'Make Request': 'Anfrage erstellen',
'Make Supplies Request': 'Artikelanfrage stellen',
'Make preparations per the <instruction>': 'Vorbereitungen treffen für <instruction>',
'Male': 'Männlich',
'Manage Appointments': 'Terminverwaltung',
'Manage Layers in Catalog': 'Kartenebenen im Katalog verwalten',
'Manage Relief Item Catalogue': 'Katalog der Unterstützungselemente verwalten',
'Manage Users & Roles': 'Benutzer- und Rollenverwaltung',
'Manage Warehouses/Sites': 'Warenlager/Orte verwalten',
'Manage Your Facilities': 'Eigene Einrichtungen verwalten',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': 'Verwaltung der Anfragen nach Vorräten, Anlagen, Mitarbeitern oder anderen Ressourcen. Vergleich mit den Beständen, wo Vorräte angefordert werden',
'Manage requests of hospitals for assistance.': 'Verwaltung der Anfragen von Krankenhäusern nach Unterstützung.',
'Manage volunteers by capturing their skills, availability and allocation': 'Verwaltung der Freiwilligen Helfer anhand ihrer Fähigkeiten, Verfügbarkeit und Zuordnung.',
'Managed by me': 'Von mir verwaltet',
'Managing Office': 'Verwaltungsbüro',
'Mandatory Appointment': 'Obligatorischer Termin',
'Mandatory for Adolescents': 'Obligatorisch für Jugendliche',
'Mandatory for Adults': 'Obligatorisch für Erwachsene',
'Mandatory for Children': 'Obligatorisch für Kinder',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'Zwingend erforderlich. Beim GeoServer, ist das der Name des Layers. In den WFS Capabilities entspricht es dem Namen des FeatureType (ohne namespace - Teil hinter dem Doppelpunkt!).',
'Mandatory. The URL to access the service.': 'Zwingend erforderlich. Der URL um auf den Dienst zuzugreifen.',
'Manual Synchronization': 'Manuelle Synchronisation',
'Manual': 'Anleitung',
'Many': 'Viele',
'Map Center Latitude': 'Geographische Breite des Kartenmittelpunktes',
'Map Center Longitude': 'Geographische Länge des Kartenmittelpunktes',
'Map Height': 'Höhe des Kartenfensters',
'Map Profile Details': 'Details zur Kartenkonfiguration ',
'Map Profile added': 'Kartenkonfiguration hinzugefügt',
'Map Profile deleted': 'Kartenkonfiguration gelöscht',
'Map Profile removed': 'Kartenkonfiguration entfernt',
'Map Profile updated': 'Kartenkonfiguration aktualisiert',
'Map Profile': 'Kartenkonfiguration',
'Map Profiles': 'Kartenkonfigurationen',
'Map Service Catalog': 'Karten Service-Katalog',
'Map Settings': 'Karteneinstellungen',
'Map Styles': 'Kartensymbolisierungen',
'Map Viewing Client': 'Kartenviewer',
'Map Width': 'Breite des Kartenfensters',
'Map Zoom': 'Kartenvergrößerung',
'Map of Hospitals': 'Karte der Krankenhäuser',
'Map of Offices': 'Karte der Büros',
'Map of Requests': 'Karte der Anfragen',
'Map of Vehicles': 'Karte der Fahrzeuge',
'Map': 'Karte',
'Marine Security': 'Hafensicherheit',
'Marital Status': 'Familienstand',
'Mark as duplicate': 'Als Duplikat markieren',
'Marker Details': 'Details zum Marker/Symbol',
'Marker added': 'Marker/Symbol hinzugefügt',
'Marker deleted': 'Marker/Symbol gelöscht',
'Marker updated': 'Marker/Symbol hinzugefügt',
'Marker': 'Marker/Symbol',
'Markers': 'Marker/Symbole',
'Master Message Log to process incoming reports & requests': 'Haupt-Nachrichtenprotokoll um eingehende Berichte und Anfragen zu bearbeiten',
'Master Message Log': 'Haupt-Nachrichtenprotokoll',
'Match Percentage': 'Grad der Übereinstimmung',
'Match Requests': 'Passende Anfrage',
'Match percentage indicates the % match between these two records': 'Der Grad der Übereinstimmung gibt die prozentuale Übereinstimmung zwischen zwei Datensätzen an',
'Match': 'Übereinstimmung',
'Match?': 'Übereinstimmung?',
'Matching Catalog Items': 'Übereinstimmende Katalogelemente',
'Matching Items': 'Übereinstimmende Artikel',
'Matching Records': 'Übereinstimmende Datensätze',
'Max Height': 'Max Höhe',
'Maximum Extent': 'Maximale Ausdehnung',
'Maximum Location Latitude': 'Maximale Geographische Breite des Gebietes',
'Maximum Location Longitude': 'Maximale Geographische Länge des Gebietes',
'Maximum Number per Day': 'Maximale Anzahl pro Tag',
'Maximum number of occurences of this event type for the same person on the same day': 'Maximale Anzahl von Ereignissen dieses Typs für dieselbe Person am gleichen Tag',
'Measure Length: Click the points along the path & end with a double-click': 'Längenmessung: Punkte entlang eines Verlaufs anklicken und mit Doppelklick abschließen',
'Medical Treatment': 'Medizinische Behandlung',
'Medical and public health': 'Medizinische Betreuung und öffentliches Gesundheitswesen',
'Medical': 'Medizin',
'Medication / Treatment': 'Medikamente / Behandlung',
'Medium': 'Mittel',
'Megabytes per Month': 'Megabytes pro Monat',
'Member removed from Group': 'Mitglied aus Gruppe entfernt',
'Members': 'Mitglieder',
'Membership Details': 'Details zur Mitgliedschaft',
'Membership Fee': 'Mitgliedsbeitrag',
'Membership Paid': 'Kostenpflichtige Mitgliedschaft',
'Membership Types': 'Mitgliedschaftstypen',
'Membership updated': 'Mitgliedschaft aktualisiert',
'Membership': 'Mitgliedschaft',
'Memberships': 'Mitgliedschaften',
'Message Details': 'Details zur Nachricht',
'Message Log': 'Nachrichtenprotokoll',
'Message Variable': 'Nachrichtenvariable',
'Message added': 'Nachricht hinzugefügt',
'Message deleted': 'Nachricht gelöscht',
'Message updated': 'Nachricht aktualisiert',
'Message variable': 'Nachrichtenvariable',
'Message': 'Nachricht',
'Messages': 'Nachrichten',
'Messaging settings updated': 'Einstellungen zur Nachrichtenübertragung aktualisiert',
'Messaging': 'Nachrichtenübertragung',
'Meteorite': 'Meteorit',
'Meteorological (inc. flood)': 'Meteorologisch (auch Flut)',
'Method used': 'Verwendete Methode',
'Middle Name': 'Zweiter Vorname',
'Migrants or ethnic minorities': 'Migranten oder ethnische Minderheiten',
'Military Grid Reference System PDFs': 'Military Grid Reference System PDFs',
'Military': 'Militär',
'Minimum Interval (Hours)': 'Mindestzeitabstand (Stunden)',
'Minimum Location Latitude': 'Minimale Geographische Breite des Gebietes',
'Minimum Location Longitude': 'Minimale Geographische Länge des Gebietes',
'Minimum interval between two consecutive registrations of this event type for the same person': 'Mindestzeitabstand zwischen zwei aufeinanderfolgenden Registrierungen dieses Ereignistyps für dieselbe Person',
'Minimum shift time is 6 hours': 'Minimum Dienstzeit ist sechs Stunden.',
'Minor Damage': 'Kleinere Schäden',
'Minor/None': 'Gering / Keine',
'Minorities participating in coping activities': 'Minderheiten beteiligen sich an Bewältigungsaktivitäten / Krisenbewältigungsaktivitäten',
'Minutes must be a number between 0 and 60': 'Minuten muss eine Zahl zwischen 0 und 60 sein',
'Minutes per Month': 'Minuten pro Monat',
'Minutes should be a number greater than 0 and less than 60': 'Minuten muss eine Zahl größer als 0 und kleiner als 60 sein',
'Miscellaneous': 'Verschiedenes',
'Missed': 'Verpasst',
'Missing Person Details': 'Nähere Angaben zur vermissten Person',
'Missing Person Registry': 'Register der vermissten Personen',
'Missing Person': 'Vermisste Person',
'Missing Persons Registry': 'Register der vermissten Personen',
'Missing Persons Report': 'Bericht über vermisste Personen',
'Missing Persons': 'Vermisste Personen',
'Missing Report': 'Bericht über Vermisste',
'Missing Senior Citizen': 'Vermisster älterer Bürger',
'Missing Vulnerable Person': 'Vermisste gefährdete Person',
'Missing': 'Fehlend',
'Mission Record': 'Auftragsbericht',
'Mission added': 'Auftrag hinzugefügt',
'Mission deleted': 'Auftrag gelöscht',
'Mission updated': 'Auftrag aktualisiert',
'Missions': 'Aufträge',
'Mobile Basic Assessment': 'Mobile Grundlegende Beurteilung',
'Mobile Commons Channels': 'Mobile Commons Kanäle',
'Mobile Phone': 'Mobiltelefon',
'Mobile': 'Handy',
'Mode': 'Modus',
'Model/Type': 'Modell/Typ',
'Modem Settings': 'Modemeinstellungen',
'Modem settings updated': 'Modemeinstellungen aktualisiert',
'Moderate': 'Moderat',
'Modified by': 'Geändert von',
'Modify Information on groups and individuals': 'Anpassen der Information über Gruppen und Einzelpersonen',
'Modifying data in spreadsheet before importing it to the database': 'Anpassen von Daten in der Tabelle vor dem Import in die Datenbank',
'Module provides access to information on current Flood Levels.': 'Modul bietet Zugriff auf Information zum aktuellen Stand der Flut',
'Module': 'Modul',
'Monday': 'Montag',
'Monetization Report': 'Monetarisierungsbericht',
'Monitoring Frequency': 'Monitoring Frequenz',
'Month': 'Monat',
'Monthly Cost': 'Monatliche Kosten',
'Monthly Salary': 'Monatliches Gehalt',
'Monthly': 'Monatlich',
'Months': 'Monate',
'More Options': 'Mehr Optionen',
'More': 'Mehr',
'Morgue Status': 'Status der Leichenhalle',
'Morgue Units Available': 'Leichenhallenplätze verfügbar',
'Mosque': 'Moschee',
'Mother': 'Mutter',
'Motorcycle': 'Motorrad',
'Moustache': 'Schnurrbart',
'Moving-in Date': 'Einzugsdatum',
'Moving-out Date': 'Auszugsdatum',
'MultiPolygon': 'MultiPolygon',
'Multiple Matches': 'Mehrere Übereinstimmungen',
'Multiple': 'Mehrere',
'Muslim': 'Moslem',
'Must a location have a parent location?': 'Muss ein Standort einen übergeordneten Standort haben?',
'My Activities to follow-up': 'Meine fälligen Wiedervorlagen',
'My Activities': 'Meine Aktivitäten',
'My Cases': 'Meine Fälle',
'My Current Cases': 'Meine aktuellen Fälle',
'My Current function': 'Meine aktuelle Funktion',
'My Open Tasks': 'Meine unerledigten Aufgaben',
'My Tasks': 'Meine Aufgaben',
'N/A': 'Nicht zutreffend',
'NO': 'NEIN',
'NZSEE Level 1': 'NZSEE Stufe 1',
'NZSEE Level 2': 'NZSEE Stufe 2',
'Name and/or ID': 'Name und/oder ID',
'Name of Award': 'Name der Auszeichnung',
'Name of Driver': 'Name des Fahrers',
'Name of Institute': 'Name der Institution',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und die für den Hintergrund des Headers benutzt werden soll.',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': 'Name der Datei (& optionales Unterverzeichnis) die sich in static befindet und für das obere linke Bild verwendet werden soll.',
'Name of the file (& optional sub-path) located in views which should be used for footer.': 'Name der Datei (& optionales Unterverzeichnis) die sich in views befindet und für die Fußzeile verwendet werden soll.',
'Name of the person in local language and script (optional).': 'Name der Person in lokaler Sprache und Schreibweise (optional).',
'Name': 'Name',
'Name, Org and/or ID': 'Name, Org und/oder ID',
'Names can be added in multiple languages': 'Namen können in mehreren Sprachen hinzugefügt werden',
'Names, IDs, Reference Numbers, Contact Information, Addresses': 'Namen, ID, Referenznummern, Kontaktinformationen, Addressen',
'National ID Card': 'Nationaler Identitätsnachweis',
'National NGO': 'Nationale NGO',
'Nationality of the person.': 'Nationalität der Person.',
'Nationality': 'Nationalität',
'Nautical Accident': 'See-Unfall',
'Nautical Hijacking': 'See-Entführung',
'Need Details': 'Details zum Bedarf',
'Need Type Details': 'Details zum Bedarfstyp',
'Need Type added': 'Bedarfstyp hinzugefügt',
'Need Type deleted': 'Bedarfstyp gelöscht',
'Need Type updated': 'Bedarfstyp aktualisiert',
'Need Type': 'Bedarfstyp',
'Need Types': 'Bedarfstypen',
'Need added': 'Bedarf hinzugefügt',
'Need deleted': 'Bedarf gelöscht',
'Need established on': 'Bedarf festgestellt am',
'Need to be logged-in to be able to submit assessments': 'Sie müssen eingeloggt sein um Beurteilungen zu veröffentlichen',
'Need to configure Twitter Authentication': 'Die Twitter Authentifizierungsdaten müssen konfiguriert sein',
'Need to specify a Budget!': 'Sie müssen ein Budget angegeben!',
'Need to specify a Kit!': 'Müssen Sie eine Ausstattung (Kit) angeben!',
'Need to specify a Resource!': 'Sie müssen eine Ressource angeben.',
'Need to specify a bundle!': 'Sie müssen ein Produktpaket angeben!',
'Need to specify a group!': 'Sie müssen einen Gruppe angeben!',
'Need to specify a location to search for.': 'Sie müssen ein Gebiet/Position für die Suche angeben.',
'Need to specify a role!': 'Sie müssen eine Rolle definieren!',
'Need to specify a table!': 'Sie müssen einen Tabellennamen angeben!',
'Need to specify a user!': 'Ein Benutzer muss angegeben werden!',
'Need updated': 'Bedarf aktualisiert',
'Need': 'Bedarf',
'Needs Assessment': 'Bedarfseinschätzung',
'Needs Details': 'Details zum Bedarf',
'Needs Maintenance': 'Braucht Wartung',
'Needs to reduce vulnerability to violence': 'Handlungsbedarf um die Anfälligkeit für Gewalt zu verringern',
'Needs': 'Bedarf',
'Neighborhood': 'Nachbarschaft',
'Neighbouring building hazard': 'Risiko durch benachbarte Gebäude',
'Neonatal ICU': 'Neugeborenen ICU',
'Neonatology': 'Neonatologie',
'Network': 'Netzwerk',
'Neurology': 'Neurologie',
'New Assessment reported from': 'Neue Beurteilung erstellt durch',
'New Certificate': 'Neues Zertifikat',
'New Checklist': 'Neue Prüfliste',
'New Entry': 'Neuer Eintrag',
'New Event': 'Neues Ereignis',
'New Item Category': 'Neue Kategorie für Artikel',
'New Job Role': 'Neue Tätigkeit',
'New Location Group': 'Neue Standortgruppe',
'New Location': 'Neuer Standort/Gebiet',
'New Peer': 'Neuer Peer',
'New Record': 'Neuer Datensatz',
'New Request': 'Neue Anfrage',
'New Role': 'Neue Rolle',
'New Scenario': 'Neues Szenario',
'New Skill': 'Neue Fähigkeit',
'New Solution Choice': 'Neue Lösungswahl',
'New Staff Member': 'Neue Mitarbeiter',
'New Status': 'Neuer Status',
'New Stock Count': 'Neue Anzahl des Lagerbestands',
'New Support Request': 'Neue Unterstützunganfrage',
'New Synchronization Peer': 'Neuer Synchronisations Peer',
'New Team': 'Neues Team',
'New Training Course': 'Neuer Schulungskurs',
'New Volunteer': 'Neuer Freiwilliger',
'New cases in the past 24h': 'Neue Fälle in den letzten 24h',
'New': 'Neu',
'Next': 'Nächste',
'No Actions currently registered': 'Zurzeit sind keine Maßnahmen registriert',
'No Activities Found': 'Keine Aktivitäten gefunden',
'No Alternative Items currently registered': 'Zurzeit sind keine alternativen Artikel registriert',
'No Assessment Summaries currently registered': 'Zurzeit sind keine Beurteilungszusammenfassungen registriert',
'No Assessments currently registered': 'Zurzeit sind keine Beurteilungen registriert.',
'No Assets currently registered in this event': 'Zurzeit sind keine Anlagen zu diesem Ereignis registriert',
'No Assets currently registered in this scenario': 'Zurzeit sind keine Anlagen zu diesem Szenario registriert',
'No Assets currently registered': 'Zurzeit sind keine Anlagen registriert',
'No Baseline Types currently registered': 'Zurzeit sind keine Referenzdatumstypen registriert',
'No Baselines currently registered': 'Zurzeit sind keine Referenzdaten registriert',
'No Branch Organizations currently registered':'Zurzeit keine Zweigorganisationen registriert',
'No Brands currently registered': 'Zurzeit sind keine Markenregistriert',
'No Budgets currently registered': 'Zurzeit sind keine Budgets registriert',
'No Bundles currently registered': 'Zurzeit sind keine Produktpakete registriert',
'No Camp Services currently registered': 'Zurzeit sind keine Camp-Leistungen registriert',
'No Camp Types currently registered': 'Zurzeit sind keine Typen von Camps registriert',
'No Camps currently registered': 'Zurzeit sind keine Camps registriert',
'No Catalog Items currently registered': 'Zurzeit sind keine Katalogeinträge registriert',
'No Catalogs currently registered': 'Zurzeit sind keine Kataloge registriert',
'No Checklist available': 'Zurzeit sind keine Checklisten verfügbar',
'No Cluster Subsectors currently registered': 'Zurzeit sind keine Cluster Teilbereiche registriert',
'No Clusters currently registered': 'Zurzeit sind keine Cluster registriert',
'No Commitment Items currently registered': 'Zurzeit sind keine zugesagten Artikel registriert',
'No Commitments': 'Zurzeit sind keine Zusagen registriert',
'No Counseling Themes currently defined': 'Zurzeit keine Beratungsthemen definiert',
'No Credentials currently set': 'Derzeit keine Berechtigungen hinterlegt',
'No Depositories currently registered': 'Keine Verwahrungsorte registriert',
'No Details currently registered': 'Zurzeit sind keine Details registriert',
'No Documents found': 'Keine Dokumente gefunden',
'No Donors currently registered': 'Zurzeit sind keine Spender registriert',
'No Events currently registered': 'Zurzeit sind keine Ereignisse registriert',
'No Facilities currently registered in this event': 'Für dieses Ereignis ist zurzeit keine Einrichtung registriert',
'No Facilities currently registered in this scenario': 'Für dieses Szenario ist zurzeit keine Einrichtung registriert.',
'No Family Members currently registered': 'Zurzeit keine Familienmitglieder registriert',
'No Feature Layers currently defined': 'Zurzeit sind keine Objekt-Layer definiert',
'No Flood Reports currently registered': 'Zurzeit sind keine Flutberichte registriert',
'No Groups currently defined': 'Zurzeit sind keine Gruppen definiert',
'No Groups currently registered': 'Zurzeit sind keine Gruppen registriert',
'No Hospitals currently registered': 'Zurzeit sind keine Krankenhäuser registriert',
'No Human Resources currently registered in this event': 'Für dieses Ereignis sind zurzeit keine personellen Ressourcen registriert.',
'No Human Resources currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine personellen Ressourcen registriert.',
'No Identification Report Available': 'Kein Identifizierungbericht verfügbar',
'No Identities currently registered': 'Zurzeit sind keine Identitäten registriert',
'No Image': 'Kein Bild',
'No Images currently registered': 'Zurzeit sind keine Bilder registriert',
'No Impact Types currently registered': 'Zurzeit sind keine Auswirkungsarten registriert',
'No Impacts currently registered': 'Zurzeit sind keine Auswirkungen registriert',
'No Incident Reports currently registered': 'Zurzeit sind keine Vorfallberichte registriert',
'No Incoming Shipments': 'Keine eingehenden Lieferungen',
'No Item Categories currently registered': 'Zurzeit sind keine Artikelkategorien registriert',
'No Item Packs currently registered': 'Zurzeit sind keine Artikelpakete registriert',
'No Item Types currently defined': 'Keine Gegenstandsarten definiert',
'No Items currently registered in this Inventory': 'Für diesen Bestand sind zurzeit keine Artikel registriert',
'No Items currently registered': 'Zurzeit sind keine Artikel registriert',
'No Keys currently defined': 'Zurzeit sind keine Schlüssel definiert',
'No Kits currently registered': 'Zurzeit sind keine Ausstattungen (Kits) definiert',
'No Level 1 Assessments currently registered': 'Zurzeit keine Stufe 1 Beurteilungen registriert',
'No Level 2 Assessments currently registered': 'Zurzeit keine Stufe 2 Beurteilungen registriert',
'No Locations currently available': 'Keine Standorte/Gebiete verfügbar',
'No Locations currently registered': 'Zurzeit sind keine Standorte/Gebiete registriert',
'No Map Profiles currently defined': 'Zurzeit sind keine Kartenkonfigurationen definiert',
'No Map Profiles currently registered in this event': 'Für dieses Ereignis sind zurzeit keine Kartenkonfigurationen registriert',
'No Map Profiles currently registered in this scenario': 'Für dieses Szenario sind zurzeit keine Kartenkonfigurationen registriert',
'No Markers currently available': 'Zurzeit sind keine Marker/Symbole verfügbar',
'No Match': 'Keine Übereinstimmung',
'No Matching Catalog Items': 'Keine passenden Katalogelemente',
'No Matching Items': 'Keine passenden Artikel',
'No Matching Records': 'Keine passenden Datensätze',
'No Members currently registered': 'Zurzeit sind keine Mitglieder registriert',
'No Memberships currently defined': 'Zurzeit sind keine Mitgliedschaften definiert',
'No Messages currently in Outbox': 'Zurzeit sind keine Nachrichten im Postausgang',
'No Need Types currently registered': 'Zurzeit sind keine Anforderungstypen registriert',
'No Needs currently registered': 'Zurzeit sind keine Anforderungen registriert',
'No Offices currently registered': 'Zurzeit sind keine Büros registriert',
'No Offices found!': 'Keine Büros gefunden!',
'No Organizations currently registered': 'Zurzeit sind keine Organisationen registriert',
'No People currently registered in this camp': 'Zurzeit sind in diesem Camp keine Personen registriert',
'No People currently registered in this shelter': 'Zurzeit sind in dieser Unterkunft keine Personen registriert',
'No Persons currently registered': 'Zurzeit sind keine Personen registriert',
'No Persons currently reported missing': 'Zurzeit sind keine Personen vermisst gemeldet',
'No Persons found': 'Keine Personen gefunden',
'No Photos found': 'Keine Fotos gefunden',
'No Picture': 'Kein Bild',
'No Population Statistics currently registered': 'Zurzeit sind keine Bevölkerungsstatistiken registriert',
'No Presence Log Entries currently registered': 'Zurzeit gibt es keine Anwesenheitsprotokolleinträge',
'No Problems currently defined': 'Zurzeit sind keine Probleme definiert',
'No Projections currently defined': 'Zurzeit sind keine Kartenprojektionen definiert',
'No Projects currently registered': 'Zurzeit sind keine Projekte registriert',
'No Rapid Assessments currently registered': 'Zurzeit sind keine Schnell-Beurteilungen registriert',
'No Received Items currently registered': 'Zurzeit sind keine erhaltenen Lieferungen registriert',
'No Received Shipments': 'Keine erhaltene Lieferungen',
'No Records currently available': 'Zurzeit sind keine Datensätze registriert',
'No Request Items currently registered': 'Zurzeit sind keine angefragten Artikel registriert',
'No Requests': 'Keine Anfragen',
'No Residence Permit Types currently defined':'Zur Zeit keine Aufenthaltserlaubnistypen definiert',
'No Residence Status Types currently defined':'Zur Zeit keine Aufenthaltsstatustypen definiert',
'No Residence Statuses currently defined':'Zur Zeit keine Aufenthaltsstatus definiert',
'No Residents Reports found': 'Keine Bewohnerliste gefunden',
'No Rivers currently registered': 'Zurzeit sind keine Flüsse registriert',
'No Roles currently defined': 'Zurzeit sind keine Rollen definiert',
'No Rooms currently registered': 'Zurzeit sind keine Räume registriert',
'No Scenarios currently registered': 'Derzeit sind keine Szenarios eingetragenZurzeit sind keine Szenarios registriert',
'No Sections currently registered': 'Zurzeit sind keine Abschnitte registriert',
'No Sectors currently registered': 'Zurzeit sind keine Bereiche registriert',
'No Seized Items currently registered': 'Zur Zeit keine Beschlagnahmungen registriert',
'No Sent Items currently registered': 'Zurzeit sind keine gesendeten Artikel registriert',
'No Sent Shipments': 'Keine versandten Lieferungen',
'No Service Contact Types currently defined': 'Zur Zeit keine Leistungsträgerarten definiert',
'No Service Contacts currently registered':'Zur Zeit keine Leistungsträger registriert',
'No Settings currently defined': 'Zurzeit sind keine Einstellungen definiert',
'No Shelter Services currently registered': 'Zurzeit sind keine Unterkunftsleistungen registriert',
'No Shelter Types currently registered': 'Zurzeit sind keine Unterkunfttypen registriert',
'No Shelters currently registered': 'Zurzeit sind keine Unterkünfte registriert',
'No Solutions currently defined': 'Zurzeit sind keine Lösungen definiert',
'No Staff Types currently registered': 'Zurzeit sind keine Mitarbeitertypen registriert',
'No Subscription available': 'Keine Abonnements verfügbar',
'No Subsectors currently registered': 'Zurzeit sind keine Teilbereiche registriert',
'No Support Requests currently registered': 'Zurzeit sind keine Unterstützungsanfragen registriert',
'No Survey Answers currently entered.': 'Zurzeit wurden noch keine Antworten auf Umfragen eingegeben.',
'No Survey Questions currently registered': 'Zurzeit wurden noch keine Umfragen-Fragen registriert. ',
'No Survey Series currently registered': 'Zurzeit wurden noch keine Umfragenserie registriert',
'No Survey Template currently registered': 'Zurzeit wurden noch keine Umfragen-Vorlage registriert',
'No Tasks with Location Data': 'Für dieses Gebiet/Standort liegen zurzeit keine Aufgaben vor',
'No Teams currently registered': 'Zurzeit wurden noch keine Teams registriert',
'No Themes currently defined': 'Zurzeit wurden noch keine Themen registriert',
'No Tickets currently registered': 'Zurzeit wurden noch keine Tickets registriert',
'No Tracks currently available': 'Zurzeit sind noch keine Tracks verfügbar',
'No Users currently registered': 'Zurzeit wurden noch keine Benutzer registriert',
'No Volunteers currently registered': 'Zurzeit sind noch keine Freiwilligen registriert',
'No Warehouses currently registered': 'Zurzeit sind noch keine Warenlager registriert',
'No access at all': 'Kein Zugriff',
'No access to this record!': 'Kein Zugriff auf diesen Datensatz!',
'No action recommended': 'Keine Aktion empfohlen',
'No conflicts logged': 'Keine Konflikte protokolliert',
'No contact information available': 'Keine Kontaktinformation verfügbar',
'No contacts currently registered': 'Zurzeit sind noch keine Kontakte registriert',
'No data available': 'Keine Daten verfügbar',
'No data in this table - cannot create PDF!': 'Keine Daten in dieser Tabelle - PDF kann nicht erstellt werden!',
'No databases in this application': 'Keine Datenbanken in dieser Anwendung',
'No dead body reports available': 'Keine Leichenberichte verfügbar',
'No entries found': 'Keine Einträge gefunden',
'No entries matching the query': 'Die Abfrage lieferte keine Einträge',
'No entry available': 'Kein Eintrag verfügbar',
'No instructions for this flag': 'Keine Anweisungen zu dieser Markierung',
'No location known for this person': 'Für diese Person ist kein Gebiet/Standort bekannt',
'No locations found for members of this team': 'Für Mitglieder dieses Teams ist kein Gebiet/Standort bekannt',
'No log entries matching the query': 'Die Abfrage lieferte keine Protokolleinträge',
'No matching records found': 'Keine Treffer gefunden',
'No messages in the system': 'Keine Nachrichten im System',
'No options available': 'Keine Optionen verfügbar',
'No payments specified': 'Keine Auszahlungen angegeben',
'No peers currently registered': 'Zurzeit sind keine Peers registriert',
'No pending payments': 'Keine anstehenden Auszahlungen',
'No pending registrations found': 'Keine anstehenden Registrierungen gefunden',
'No pending registrations matching the query': 'Die Abfrage lieferte keine keine anstehenden Registrierungen',
'No person found with this ID number': 'Keine Person mit dieser ID Nummer gefunden',
'No person record found for current user.': 'Kein Personendatensatz für den aktuellen Benutzer gefunden.',
'No picture available': 'Kein Bild verfügbar',
'No problem group defined yet': 'Noch keine Problem-Gruppe definiert',
'No records found': 'Keine Datensätze gefunden',
'No records matching the query': 'Die Abfrage lieferte keine Datensätze',
'No reports available.': 'Keine Berichte verfügbar.',
'No reports currently available': 'Zurzeit sind keine Berichte verfügbar',
'No requests found': 'Keine Anfragen gefunden',
'No resources currently reported': 'Zurzeit sind keine Ressourcen gemeldet',
'No service profile available': 'Kein Leistungsprofil verfügbar',
'No skills currently set': 'Zurzeit sind keine Fähigkeiten festgelegt',
'No staff or volunteers currently registered': 'Zurzeit sind weder Mitarbeiter noch Freiwillige registriert',
'No status information available': 'Keine Statusinformation verfügbar',
'No synchronization': 'Keine Synchronisation',
'No tasks currently registered': 'Zurzeit sind keine Aufgaben registriert',
'No template found!': 'Keine Vorlage gefunden!',
'No transferable cases found': 'Keine transferierbaren Fälle gefunden',
'No units currently registered': 'Zurzeit sind keine Einheiten registriert',
'No volunteer availability registered': 'Zurzeit ist keine Verfügbarkeit von Freiwilligen registriert',
'No': 'Nein',
'Non-structural Hazards': 'Nicht-strukturelle Gefahren',
'None (no such record)': 'Nichts (kein entsprechender Datensatz)',
'None of the above': 'Keine(r) der oben genannten',
'None': '-',
'Noodles': 'Nudeln',
'Normal Address': 'Normale Adresse',
'Normal Job': 'Normaler Beruf',
'Not Applicable': 'Nicht zutreffend',
'Not Authorised!': 'Nicht berechtigt!',
'Not Authorized': 'Nicht berechtigt',
'Not Available': 'Nicht verfügbar/vorhanden',
'Not Combinable With': 'Nicht kombinierbar mit',
'Not Possible': 'Nicht möglich',
'Not Required': 'Nicht erforderlich',
'Not Set': 'Nicht festgelegt',
'Not Transferable': 'Nicht Transferierbar',
'Not currently a resident': 'Kein aktueller Bewohner',
'Not installed or incorrectly configured.': 'Nicht installiert oder nicht korrekt konfiguriert.',
'Not yet a Member of any Group': 'Bis jetzt kein Mitglied irgendeiner Gruppe',
'Note Details': 'Details zur Notiz',
'Note Type': 'Notizart',
'Note added': 'Notiz erstellt',
'Note deleted': 'Notiz gelöscht',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'Beachten Sie, dass diese Liste nur aktive Freiwillige zeigt. Um alle registrierten Personen im System zu sehen, suchen sie statt dessen auf diesem Bildschirm',
'Note updated': 'Notiz aktualisiert',
'Note': 'Notiz',
'Notes': 'Notizen',
'Notice to Airmen': 'Hinweis für Flieger',
'Notify': 'Benachrichtigen',
'Number of Actions': 'Anzahl Maßnahmen',
'Number of Activities per': 'Zahl der Aktivitäten pro',
'Number of Activities': 'Anzahl Aktivitäten',
'Number of Barges': 'Zahl der Lastschiffe',
'Number of Cases': 'Anzahl Fälle',
'Number of Children': 'Anzahl Kinder',
'Number of Clients': 'Anzahl Klienten',
'Number of Columns': 'Anzahl der Spalten',
'Number of Families': 'Anzahl Familien',
'Number of Patients': 'Anzahl Patienten',
'Number of People Required': 'Anzahl der benötigten Personen',
'Number of Rows': 'Anzahl der Reihen',
'Number of Tugboats': 'Zahl der Schleppkähne',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'Anzahl von zusätzlichen Betten dieses Typs, die voraussichtlich in den nächsten 24 Stunden in dieser Einheit zur Verfügung stehen werden.',
'Number of alternative places for studying': 'Anzahl von alternativen Orten zum studieren.',
'Number of available/vacant beds of that type in this unit at the time of reporting.': 'Anzahl von verfügbaren/freien Betten dieses Typs in dieser Einheit zum Zeitpunkt des Berichtes.',
'Number of deaths during the past 24 hours.': 'Anzahl von Toten in den letzten 24 Stunden',
'Number of discharged patients during the past 24 hours.': 'Anzahl der entlassenen Patienten in den vergangen 24 Stunden',
'Number of doctors': 'Anzahl der Ärzte',
'Number of evacuees registered in the shelter for day and night': 'Zahl der in der Unterkunft für Tag und Nacht registrierten Personen',
'Number of in-patients at the time of reporting.': 'Anzahl von in-Patienten zum Zeitpunkt der Berichterstellung',
'Number of newly admitted patients during the past 24 hours.': 'Anzahl der neu zugewiesenen Patienten innerhalb der letzten 24 Stunden',
'Number of non-medical staff': 'Anzahl des nicht-medizinischen Personals',
'Number of nurses': 'Anzahl der Krankenschwestern',
'Number of private schools': 'Anzahl der privaten Schulen',
'Number of public schools': 'Anzahl der öffentlichen Schulen',
'Number of religious schools': 'Anzahl der religiösen Schulen',
'Number of residential units not habitable': 'Anzahl der nicht bewohnbaren Wohneinheiten',
'Number of residential units': 'Anzahl der Wohneinheiten',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': 'Anzahl der freien/verfügbaren Betten in diesem Krankenhaus. Automatisch aktualisiert aus täglichen Berichten.',
'Number of vacant/available units to which victims can be transported immediately.': 'Anzahl der freien/verfügbaren Einheiten zu denen die Opfer sofort transportiert werden können.',
'Number or Address': 'Nummer oder Adresse',
'Number or Label on the identification tag this person is wearing (if any).': 'Nummer oder Beschriftung auf der Identifikationsmarke den diese Person trägt (falls vorhanden).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': 'Nummer oder Code verwendet markiert den Fundort , z. B. Flaggencode, Koordinaten, Standortnummer oder ähnliches (falls verfügbar)',
'Number': 'Anzahl',
'Number': 'Nummer',
'Number/Percentage of affected population that is Female & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Female & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Female & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Female & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Female & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Female & Aged 61+': 'Anzahl/Prozentsatz der betroffenen weiblichen Bevölkerung über 61',
'Number/Percentage of affected population that is Male & Aged 0-5': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 0-5 Jahren',
'Number/Percentage of affected population that is Male & Aged 13-17': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 13-17 Jahren',
'Number/Percentage of affected population that is Male & Aged 18-25': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 18-25 Jahren',
'Number/Percentage of affected population that is Male & Aged 26-60': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 26-60 Jahren',
'Number/Percentage of affected population that is Male & Aged 6-12': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung im Alter zwischen 6-12 Jahren',
'Number/Percentage of affected population that is Male & Aged 61+': 'Anzahl/Prozentsatz der betroffenen männlichen Bevölkerung über 61',
'Nursery Beds': 'Krankenhausbetten',
'Nutrition problems': 'Ernährungsprobleme',
'Nutrition': 'Nahrung',
'OR Reason': 'oder Grund',
'OR Status Reason': 'oder Statusgrund',
'OR Status': 'oder Status',
'Observer': 'Beobachter',
'Obsolete': 'Hinfällig',
'Obstetrics/Gynecology': 'Geburtshilfe/Gynäkologie',
'Occasion': 'Anlass',
'Office Address': 'Büroadresse',
'Office Details': 'Bürodetails',
'Office Phone': 'Telefon im Büro',
'Office Type': 'Bürotyp',
'Office Types': 'Bürotypen',
'Office added': 'Büro hinzugefügt',
'Office deleted': 'Büro gelöscht',
'Office updated': 'Büro aktualisiert',
'Office': 'Büro',
'Offices & Warehouses': 'Büros & Warenager',
'Offices': 'Büros',
'Official Note': 'Amtliche Mitteilung',
'Offline Sync (from USB/File Backup)': 'Offline-Synchronisation (von USB/Dateisicherung)',
'Offline Sync': 'Offline-Synchronisation',
'Oil Terminal Depth': 'Tiefe des Ölterminals',
'Older people as primary caregivers of children': 'Ältere Menschen als primäre Pfleger von Kindern',
'Older people in care homes': 'Ältere Menschen in Pflegeheimen',
'Older people participating in coping activities': 'Ältere Menschen die sich an Krisenbewältigungsaktivitäten beteiligen',
'Older person (>60 yrs)': 'Ältere Personen (> 60 Jahre)',
'On Hold': 'Abwarten',
'On by default? (only applicable to Overlays)': 'Standardmäßig an? (gilt nur für Overlays)',
'On by default?': 'Standardmäßig an?',
'One Time Cost': 'Einmalige Kosten',
'One time cost': 'Einmalige Kosten',
'One-time costs': 'Einmalige Kosten',
'One-time': 'Einmalig',
'Oops! Something went wrong...': 'Hoppla! Etwas ging schief...',
'Oops! something went wrong on our side.': 'Hoppla! Etwas ging auf unserer Seite schief.',
'Opacity (1 for opaque, 0 for fully-transparent)': 'Opazität (1 für opaque - undurchsichtig, 0 für vollständig transparent)',
'Opacity': 'Opazität (Undurchsichtigkeit)',
'Open area': 'Offener Bereich',
'Open recent': 'Kürzlich Bearbeitetes öffnen',
'Open##the_shelter_is': 'Offen',
'Open': 'Öffnen',
'OpenStreetMap Tiles': 'OpenStreetMap Tiles',
'OpenWeatherMap data': 'OpenWeatherMap Daten',
'Opening Times': 'Öffnungszeiten',
'Operating Rooms': 'Betriebsräume',
'Opportunities to Volunteer On-Site?': 'Möglichkeiten für Freiwillige vor Ort?',
'Optional link to an Incident which this Assessment was triggered by.': 'Optinaler Link zum einem Vorfall, der diese Beurteilung auslöste.',
'Optional': 'Optional',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': 'Optional. Wenn Sie die Darstellung der Objekte auf der Basis von Werten eines Attributs festlegen möchten, wählen sie das zu verwendende Attribut hier aus.',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': 'Optional. Bei GeoServer, das ist die Arbeitsbereich Namespace-URI (nicht der Name!). Beim WFS "Capabilities", ist dies die Namensteil des FeatureTypes vor dem Doppelpunkt(:).',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'Optional. Der Name eines Elements dessen Inhalt eine URL zu einer Bilddatei die im Dialogfenster angezeigt werden soll.',
'Optional. The name of an element whose contents should be put into Popups.': 'Optional. Name eines Elements, dessen Inhalt in Dialogfenstern angezeigt wird.',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'Optional. Name des Schemas. Bei Geoserver wird das Format http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name verwendet.',
'Options': 'Optionen',
'Organization Details': 'Details zur Organisation',
'Organization Domains': 'Organisationsdomains',
'Organization Registry': 'Organisationsdatenbank',
'Organization Type': 'Organisationstyp',
'Organization Types': 'Organisationstypen',
'Organization added': 'Organisation hinzugefügt',
'Organization deleted': 'Organisation gelöscht',
'Organization updated': 'Organisation aktualisiert',
'Organization': 'Organisation',
'Organization/Supplier': 'Organisation/Anbieter',
'Organizations': 'Organisationen',
'Organized By': 'Organisiert durch',
'Origin of the separated children': 'Ursprung der getrennten Kinder',
'Origin': 'Ursprung',
'Other (describe)': 'Andere (näher beschreiben)',
'Other (specify)': 'Sonstige (näher spezifizieren)',
'Other Address': 'Andere Adresse',
'Other Communication': 'Sonstige Mitteilung',
'Other Evidence': 'Anderer Nachweis',
'Other Faucet/Piped Water': 'Andere Wasserrohre/-hähne',
'Other Isolation': 'Andere Isolierung',
'Other Name': 'Sonstiger Name',
'Other activities of boys 13-17yrs before disaster': 'Andere Aktivitäten von Jungen 13-17 Jahre vor der Katastrophe',
'Other activities of boys 13-17yrs': 'Andere Aktivitäten der Jungen 13-17 Jahre',
'Other activities of boys <12yrs before disaster': 'Andere Aktivitäten von Jungen <12 Jahre vor der Katastrophe',
'Other activities of boys <12yrs': 'Andere Aktivitäten von Jungen <12 Jahren',
'Other activities of girls 13-17yrs before disaster': 'Andere Aktivitäten von Mädchen 13-17 Jahre vor der Katastrophe',
'Other activities of girls 13-17yrs': 'Andere Aktivitäten von Mädchen 13-17 Jahre',
'Other activities of girls<12yrs before disaster': 'Andere Aktivitäten von Mädchen <12 Jahre vor der Katastrophe',
'Other activities of girls<12yrs': 'Andere Aktivitäten von Mädchen <12 Jahre',
'Other alternative infant nutrition in use': 'Andere alternative Kindernahrung die Verwendung findet.',
'Other alternative places for study': 'Andere alternative Orte zum Lernen',
'Other assistance needed': 'Andere Unterstützung benötigt',
'Other assistance, Rank': 'Andere Unterstützung, Rang',
'Other current health problems, adults': 'Andere aktuelle gesundheitliche Probleme, Erwachsene',
'Other current health problems, children': 'Andere aktuelle gesundheitliche Probleme, Kinder',
'Other events': 'Sonstige Ereignisse',
'Other factors affecting school attendance': 'Andere Faktoren mit Einfluss auf den Schulbesuch',
'Other major expenses': 'Andere große Ausgaben',
'Other non-food items': 'Andere non-food Posten',
'Other recommendations': 'Andere Empfehlungen',
'Other residential': 'Andere Bewohner/innen',
'Other school assistance received': 'Andere erhaltene Schulunterstützung',
'Other school assistance, details': 'Andere Schulhilfe, Einzelheiten',
'Other school assistance, source': 'Herkunft anderer Schulhilfen',
'Other settings can only be set by editing a file on the server': 'Andere Einstellungen können nur durch Bearbeiten einer Datei auf dem Server festgelegt werden',
'Other side dishes in stock': 'Andere Speisen auf Lager',
'Other types of water storage containers': 'Andere Arten von Wassertanks',
'Other ways to obtain food': 'Weitere Möglichkeiten um an Nahrungsmitteln zu gelangen',
'Other': 'Sonstige',
'Outbound Mail settings are configured in models/000_config.py.': 'Abgehende Mail-Einstellungen werden in der Datei models/000_config.py konfiguriert.',
'Outbox': 'Ausgang',
'Outcome': 'Ergebnis',
'Outgoing SMS Handler': 'SMS-Handler für ausgehende Informationen',
'Outgoing SMS handler': 'SMS-Handler für ausgehende Informationen',
'Overall Hazards': 'Gefahren insgesamt',
'Overhead falling hazard': 'Gefahr fallender Objekte',
'Overland Flow Flood': 'Überflutung',
'Overview': 'Übersicht',
'Overviews': 'Übersichten',
'Owned By (Organization/Branch)': 'Gehört (Organisation/Niederlassung)',
'Owned Records': 'Eigene Datensätze',
'Owned Resources': 'Eigene Ressourcen',
'Owner': 'Eigentümer',
'Ownership': 'Eigentum',
'Owning Organization': 'In Eigentum von',
'P0 Number': 'P0 Nummer',
'PIN number': 'PIN Nummer',
'PIN': 'PIN',
'PL Women': 'PL Frauen',
'PO Number': 'PO Nummer',
'PO': 'PO',
'POIS': 'PoIs',
'Pack': 'Packung',
'Packs': 'Packungen',
'Paid on': 'Ausgezahlt am',
'Paid': 'Ausgezahlt',
'Parameters': 'Parameter',
'Parapets, ornamentation': 'Geländer, Verzierung',
'Parent Office': 'Übergeordnetes Büro',
'Parent needs to be of the correct level': 'Übergeordnetes Element muss auf der richtigen Stufe sein',
'Parent needs to be set for locations of level': 'Ein übergeordnetes Element muss für Gebiete/Standorte dieser Stufe existieren',
'Parent needs to be set': 'Ein übergeordnetes Element muss definiert werden',
'Parent': 'Übergeordnetes Element',
'Parents/Caregivers missing children': 'Eltern/Pfleger vermissen Kinder',
'Parser Connections': 'Parser Verbindungen',
'Parsers': 'Parser',
'Partial': 'partiell',
'Participant': 'Teilnehmer',
'Pashto': 'Paschtu',
'Pass': 'Übergeben',
'Passport': 'Reisepass',
'Password': 'Passwort',
'Path': 'Pfad',
'Pathology': 'Pathologie',
'Patients': 'Patienten',
'Payload Height (m)': 'Ladekapazität Höhe (m)',
'Payload Length (m)': 'Ladekapazität Länge (m)',
'Payload Volume (m3)': 'Ladekapazität Volumen (m3)',
'Payload Weight (kg)': 'Ladekapazität Gewicht (kg)',
'Payload Width (m)': 'Ladekapazität Breite (m)',
'Payment Date': 'Auszahlungsdatum',
'Payment Registration': 'Auszahlungsregistrierung',
'Payment registration not permitted': 'Auszahlungsregistrierung nicht erlaubt',
'Pediatric ICU': 'Kinderklinik ICU',
'Pediatric Psychiatric': 'Kinderpsychiatrie',
'Pediatrics': 'Kinderheilkunde',
'Peer Details': 'Details zu Peers',
'Peer Registration Details': 'Details zur Peer-Registrierung',
'Peer Registration Request': 'Anfrage zu Peer-Registrierung',
'Peer Registration': 'Peer-Registrierung',
'Peer Type': 'Peer Typ',
'Peer UID': 'Peer UID',
'Peer added': 'Peer hinzugefügt',
'Peer deleted': 'Peer gelöscht',
'Peer not allowed to push': 'Peer ist nicht für das pushen von Daten zugelassen',
'Peer registration request added': 'Anfrage zu Peer-Registrierung hinzugefügt',
'Peer registration request deleted': 'Anfrage zu Peer-Registrierung gelöscht',
'Peer registration request updated': 'Anfrage zu Peer-Registrierung aktualisiert',
'Peer updated': 'Peer aktualisiert',
'Peer': 'Peer',
'Pending Payments': 'Anstehende Auszahlungen',
'Pending Requests': 'Anstehende Anfragen',
'Pending': 'Anstehend',
'People Needing Food': 'Personen die Nahrungsmittel brauchen',
'People Needing Shelter': 'Personen die Unterkünfte brauchen',
'People Needing Water': 'Personen die Wasser brauchen',
'People Registration': 'Person registrieren',
'People Reservation': 'Gruppe reservieren',
'People Trapped': 'Eingeschlossene Personen',
'People': 'Personen',
'Performance Rating': 'Ergebnisbeurteilung',
'Permanent Home Address': 'Dauerhafte Heimatadresse',
'Person 1, Person 2 are the potentially duplicate records': 'Person 1 und Person 2 sind möglicherweise Duplikate',
'Person De-duplicator': 'Dubletten in Personen auflösen',
'Person Details': 'Details zur Person',
'Person Registry': 'Personendatenbank',
'Person added to Group': 'Person zur Gruppe hinzugefügt',
'Person added to Team': 'Person zum Team hinzugefügt',
'Person added': 'Person hinzugefügt',
'Person deleted': 'Person gelöscht',
'Person details updated': 'Details zur Person aktualisiert',
'Person interviewed': 'Person befragt',
'Person not found': 'Person nicht gefunden',
'Person or OU': 'Person oder Organisationseinheit',
'Person shall not receive allowance payments when this flag is set': 'Der Person soll kein Taschengeld ausgezahlt werden wenn diese Flagge gesetzt ist',
'Person who has actually seen the person/group.': 'Person, die kürzlich die Person/Gruppe gesehen hat',
'Person': 'Person',
'Person/Group': 'Person/Gruppe',
'Personal Call': 'Persönliches Gespräch',
'Personal Data': 'Persönliche Daten',
'Personal Effects Details': 'Details zur persönlichen Habe',
'Personal Effects': 'Persönliche Habe',
'Personal Map': 'Persönliche Karte',
'Personal Profile': 'Persönliches Profil',
'Personal impact of disaster': 'Persönliche Auswirkung der Katastrophe',
'Persons by Age Group': 'Personen nach Altersgruppen',
'Persons by Gender': 'Personen nach Geschlecht',
'Persons in institutions': 'Personen in Institutionen',
'Persons with disability (mental)': 'Personen mit Behinderungen (psychischen)',
'Persons with disability (physical)': 'Personen mit Behinderungen (körperlichen)',
'Persons': 'Personen',
'Phone #': 'Telefon #',
'Phone 1': 'Telefon 1',
'Phone 2': 'Telefon 2',
'Phone Call': 'Telefonat',
'Phone': 'Telefon',
'Phone/Business': 'Telefon/Geschäftlich',
'Phone/Emergency': 'Telefon/Notfall',
'Phone/Exchange (Switchboard)': 'Telefon/Exchange (Hauptschalttafel)',
'Photo Details': 'Foto Details',
'Photo Taken?': 'Foto gemacht?',
'Photo added': 'Foto hinzugefügt',
'Photo deleted': 'Foto gelöscht',
'Photo updated': 'Foto aktualisiert',
'Photo': 'Foto',
'Photograph': 'Fotografie',
'Photos and Documents': 'Fotos und Dokumente',
'Photos': 'Fotos',
'Physical Description': 'Physische Beschreibung',
'Physical Safety': 'Physische Sicherheit',
'Picture upload and finger print upload facility': 'Einrichtung um Foto und Fingerabdruck hochzuladen',
'Picture': 'Bild',
'Place of Recovery': 'Ort der Wiederherstellung',
'Place on Map': 'Auf Karte plazieren',
'Places for defecation': 'Plätze für Kotablagerung',
'Places the children have been sent to': 'Orte an die Kinder geschickt wurden',
'Planned From': 'Geplant ab',
'Planned Until': 'Geplant bis',
'Planned on': 'Geplant am',
'Planned': 'Geplant',
'Planning': 'In Planung',
'Playing': 'Wiedergabe',
'Please correct all errors.': 'Korrigieren Sie bitte alle Fehler.',
'Please enter a first name': 'Bitte geben Sie den Vornamen ein',
'Please enter a site OR a location': 'Bitte geben Sie eine Stelle oder einen Standort/Gebiet an',
'Please enter the first few letters of the Person/Group for the autocomplete.': 'Bitte geben sie die ersten Buchstaben der Person/Gruppe ein um die Autovervollständigung zu starten.',
'Please enter the recipient': 'Bitte geben sie den Empfänger ein',
'Please fill this!': 'Bitte hier einfüllen!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': 'Bitte geben Sie die URL der Seite auf die sie sich beziehen, eine Beschreibung dessen, was sie erwartet haben & was wirklich passiert ist.',
'Please report here where you are:': 'Bitte hier angeben, wo sie sich befinden:',
'Please select a valid image!': 'Bitte ein gültiges Bild auswählen!',
'Please select an event type': 'Bitte wählen Sie einen Ereignistyp',
'Please select another level': 'Bitte wählen Sie eine andere Ebene',
'Please select': 'Treffen Sie eine Auswahl',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': 'Bitte melden Sie sich unter Angabe Ihrer Mobilfunknummer an. Das erlaubt uns Ihnen Textnachrichten zu senden. Bitten verwenden Sie die internationale Nummer ein (Deutschland: 0049.... - ohne führende 0).',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': 'Bitte geben Sie alle Probleme und Hindernisse bei der korrekten Behandlung der Krankheit an, im Detail (in Zahlen, falls zutreffend). Sie können auch Vorschläge machen wie die Situation verbessert werden kann.',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Please use this field to record any additional information, including any Special Needs.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, einschließlich besonderer Anforderungen, zu hinterlegen.',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'Bitte dieses Feld verwenden um zusätzliche Informationen, wie die Ushahidi Vorgangs-ID, zu hinterlegen, einschließlich der Datensatzhistorie, falls dieser aktualisiert wurde.',
'Pledge Support': 'Zusage von Unterstützung',
'PoI Types': 'PoI Typen',
'Point': 'Point',
'Points of Interest': 'Points of Interest',
'Poisoning': 'Vergiftung',
'Poisonous Gas': 'Gasvergiftung',
'Police': 'Polizei',
'Pollution and other environmental': 'Verschmutzung und andere Umwelt',
'Polygon reference of the rating unit': 'Polygonale Abgrenzung der Bewertungseinheit',
'Poor': 'Arm',
'Population (Day)': 'Belegungszahl (Tag)',
'Population (Night)': 'Belegungszahl (Nacht)',
'Population BEA': 'Belegung BEA',
'Population Other': 'Belegung Sonstige',
'Population Statistic Details': 'Details zur Bevölkerungsstatistik',
'Population Statistic added': 'Bevölkerungsstatistik hinzugefügt',
'Population Statistic deleted': 'Bevölkerungsstatistik gelöscht',
'Population Statistic updated': 'Bevölkerungsstatistik aktualisiert',
'Population Statistics': 'Bevölkerungsstatistiken',
'Population and number of households': 'Bevölkerungs- und Haushaltsanzahl',
'Population': 'Belegung',
'Popup Fields': 'Popup Felder',
'Popup Label': 'Popup Beschriftung',
'Porridge': 'Haferbrei',
'Port Closure': 'Hafenschließung',
'Port': 'Port',
'Portable App': 'Portable App',
'Position Catalog': 'Stanpunktkatalog',
'Position added': 'Standpunkt hinzugefügt',
'Position deleted': 'Standpunkt gelöscht',
'Position updated': 'Standpunkt aktualisiert',
'Positions': 'Positionen',
'Postcode': 'PLZ',
'Posted on': 'Geposted auf',
'Posts can be either full pages, embedded within other pages or part of a series (for use as news items or blog posts)': 'Posts können entweder komplette Seiten, die in anderen Seiten eingebettet wurden oder Teile einer Serie sein (z.B. zur Nutzung als Newseintrag oder Blog Post)',
'Poultry restocking, Rank': 'Geflügel auffüllen, Rank',
'Poultry': 'Geflügel',
'Pounds': 'Pfund',
'Power Failure': 'Netzausfall',
'Power': 'Stromversorgung',
'Powered by Sahana': 'Powered by Sahana',
'Pre-cast connections': 'Beton Verbindungen',
'Preferred Name': 'Bevorzugter Name',
'Pregnant women': 'Schwangere Frauen',
'Preliminary Residence Permit until': 'Aufenthaltsgestattung bis',
'Preliminary Residence Permit': 'Aufenthaltsgestattung',
'Preliminary': 'Vorläufig',
'Presence Condition': 'Anwesenheitsbedingung',
'Presence Log': 'Anwesenheitsprotokollierung',
'Presence in the shelter': 'Anwesend in Unterkunft',
'Presence required': 'Anwesenheit erforderlich',
'Presence': 'Anwesenheit',
'Previous Total': 'Vorherige Summe',
'Previous': 'Vorherige',
'Primary Occupancy': 'Primäre Belegung',
'Priority from 1 to 9. 1 is most preferred.': 'Priorität von 1 bis 9. 1 ist die am meisten bevorzugte.',
'Priority': 'Priorität',
'Privacy': 'Datenschutz',
'Private': 'Privat',
'Problem Administration': 'Verwaltung von Problemen',
'Problem Details': 'Problemdetails',
'Problem Group': 'Problemgruppe',
'Problem Title': 'Problemtitel',
'Problem added': 'Problem hinzugefügt',
'Problem connecting to twitter.com - please refresh': 'Verbindungsproblem zu twitter.com - bitte neu laden',
'Problem deleted': 'Problem gelöscht',
'Problem updated': 'Problem aktualisiert',
'Problem': 'Problem',
'Problems': 'Probleme',
'Procedure': 'Vorgehensweise',
'Process Received Shipment': 'Bearbeiten der erhaltenen Lieferung',
'Process Shipment to Send': 'Vorbereiten der Lieferung zum Versenden',
'Procurement & Logistics cost': 'Kosten für Beschaffung & Logistik',
'Profession': 'Beruf',
'Profile Details': 'Details zum Profil',
'Profile Picture?': 'Profilbild?',
'Profile': 'Profil',
'Program Hours (Month)': 'Programmstunden (Monat)',
'Program Hours (Year)': 'Programmstunden (Jahr)',
'Program': 'Programm',
'Programs': 'Programme',
'Progress': 'Verlauf',
'Proj4js definition': 'Proj4js Definition',
'Project Details': 'Details zum Projekt',
'Project Name': 'Name des Projekts',
'Project Status': 'Projektstatus',
'Project added': 'Projekt hinzugefügt',
'Project deleted': 'Projekt gelöscht',
'Project has no Lat/Lon': 'Projekt hat keine Geographische Koordinate (lat/lon)',
'Project updated': 'Projekt aktualisiert',
'Project': 'Projekt',
'Projection Details': 'Details zur Kartenprojektion',
'Projection added': 'Kartenprojektion hinzugefügt',
'Projection deleted': 'Kartenprojektion gelöscht',
'Projection updated': 'Kartenprojektion aktualisiert',
'Projection': 'Kartenprojektion',
'Projections': 'Kartenprojektionen',
'Projects': 'Projekte',
'Property reference in the council system': 'Anlage im Behördensystem',
'Proposed': 'Vorgeschlagen',
'Protected resource': 'Geschützte Ressource',
'Protection': 'Schutz',
'Provide Metadata for your media files': 'Stellen Sie Metadaten für Ihre Mediadateien zur Verfügung.',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': 'Stekllen Sie optional eine Skizze des gesamten Gebäudes oder der beschädigten Objekte. Markieren Sie dabei die beschädigte Stellen.',
'Providing Agency': 'Leistungsträger',
'Psychiatrics/Adult': 'Psychiatrie/Erwachsene',
'Psychiatrics/Pediatric': 'Psychiatrie/Kinder',
'Psychosocial Care': 'Psychosoziale Betreuung',
'Psychosocial Support': 'Psychosoziale Unterstützung',
'Public Event': 'Öffentliche Ereignis',
'Public and private transportation': 'Öffentlicher und privater Transport',
'Public assembly': 'Öffentliche Versammlung',
'Public': 'Öffentlich',
'Publish': 'Veröffentlichen',
'Published On': 'Veröffentlicht am',
'Pull tickets from external feed': 'Tickets von externen Feeds laden',
'Purchase Date': 'Kaufdatum',
'Purchase Price': 'Kaufpreis',
'Purchase': 'Kauf',
'Purpose': 'Zweck',
'Push tickets to external system': 'Transferiere Tickets zu externen System',
'Pyroclastic Flow': 'Pyroklastischer Strom',
'Pyroclastic Surge': 'Pyroklastischer Welle',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': 'Python Serial-Modul ist innerhalb der aktiven Python Umgebung nicht verfügbar - dieses muss installiert werden um das Modem zu aktivieren.',
'Python needs the ReportLab module installed for PDF export': 'Python braucht das ReportLab-Modul für die PDF-Ausgabe. Dies ist derzeit nicht installiert!',
'Quality/Mode': 'Qualität/Modus',
'Quantity Committed': 'Menge bestätigt',
'Quantity Fulfilled': 'Menge erfüllt',
'Quantity Received': 'Erhaltene Menge',
'Quantity Returned': 'Zurückgegebene Menge',
'Quantity Sent': 'Gesendete Menge',
'Quantity in Transit': 'Menge in Transit',
'Quantity range': 'Mengenumfang',
'Quantity': 'Menge',
'Quarantine': 'Quarantäne',
'Queries': 'Abfragen',
'Query': 'Abfrage',
'Queryable?': 'Abfragbar?',
'RC frame with masonry infill': 'RC Rahmen mit Mauerwerkfüllung',
'RECORD A': 'DATENSATZ A',
'RECORD B': 'DATENSATZ B',
'REQ Number': 'Anfragenummer',
'REQ': 'Anfrage',
'RSS Channels': 'RSS Kanäle',
'RSS Posts': 'RSS Posts',
'Race': 'Rasse',
'Radio Callsign': 'Radio Rufzeichen',
'Radiological Hazard': 'Strahlungsgefahr',
'Radiology': 'Radiologie',
'Railway Accident': 'Eisenbahnunfall',
'Railway Hijacking': 'Eisenbahnentführung',
'Rain Fall': 'Regenfall',
'Rank when ordering cases by status': 'Rang beim Sortieren von Fällen nach Status',
'Rapid Assessment Details': 'Details zur Schnell-Beurteilung',
'Rapid Assessment added': 'Schnell-Beurteilung hinzugefügt',
'Rapid Assessment deleted': 'Schnell-Beurteilung gelöscht',
'Rapid Assessment updated': 'Schnell-Beurteilung aktualisiert',
'Rapid Assessment': 'Schnell-Beurteilung',
'Rapid Assessments & Flexible Impact Assessments': 'Schnell-Beurteilungen & flexible Abschätzungen der Auswirkungen',
'Rapid Assessments': 'Schnell-Beurteilungen',
'Rapid Close Lead': 'Schnell Führung schliessen',
'Rapid Data Entry': 'Schnelle Dateneingabe',
'Raw Database access': 'Direkter Datenbankzugriff',
'Ready for Transfer': 'Transferbereit',
'Receive New Shipment': 'Neue Lieferung erhalten',
'Receive Shipment': 'Lieferung erhalten',
'Receive this shipment?': 'Lieferung erhalten?',
'Receive': 'Erhalten',
'Received By Person': 'Erhalten von einer Person',
'Received By': 'Erhalten von',
'Received Item Details': 'Details zum erhaltenen Artikel',
'Received Item deleted': 'Erhaltener Artikel gelöscht',
'Received Item updated': 'Erhaltener Artikel aktualisiert',
'Received Shipment Details': 'Details zur erhaltenen Lieferung',
'Received Shipment canceled and items removed from Inventory': 'Erhaltene Lieferung abgebrochen und Artikel aus dem Bestand entfernt',
'Received Shipment canceled': 'Erhaltene Lieferung abgebrochen',
'Received Shipment updated': 'Erhaltene Lieferung aktualisiert',
'Received Shipments': 'Erhaltene Lieferung',
'Received date': 'Eingangsdatum',
'Received': 'Erhalten',
'Received/Incoming Shipments': 'Erhaltene/Einkommende Lieferungen',
'Receiving and Sending Items': 'Erhalten und Versenden von Artikeln',
'Recipient': 'Empfänger',
'Recipient(s)': 'Empfänger',
'Recipients': 'Empfänger',
'Recommendations for Repair and Reconstruction or Demolition': 'Empfehlungen für Reparatur und Wiederherstellung oder Abriß',
'Record Details': 'Details zum Datensatz',
'Record Saved': 'Datensatz gesichert',
'Record added': 'Datensatz hinzugefügt',
'Record any restriction on use or entry': 'Registrieren jeglicher Einschränkung bei der Nutzung oder Eintragung',
'Record deleted': 'Datensatz gelöscht',
'Record last updated': 'Datensatz zuletzt aktualisiert',
'Record not found!': 'Datensatz nicht gefunden!',
'Record not found': 'Datensatz nicht gefunden',
'Record updated': 'Datensatz aktualisiert',
'Record': 'Datensatz',
'Recording and Assigning Assets': 'Aufzeichnen und Zuweisen von Anlagen',
'Records': 'Datensätze',
'Recovery Request added': 'Bergungsanfrage hinzugefügt',
'Recovery Request deleted': 'Bergungsanfrage gelöscht',
'Recovery Request updated': 'Bergungsanfrage aktualisiert',
'Recovery Request': 'Bergungsanfrage',
'Recovery Requests': 'Bergungsanfragen',
'Recovery': 'Bergung',
'Recurring Cost': 'Wiederkehrende Kosten',
'Recurring Request?': 'Wiederkehrende Anfrage?',
'Recurring cost': 'Wiederkehrende Kosten',
'Recurring costs': 'Wiederkehrende Kosten',
'Recurring': 'Wiederkehrend',
'Red Cross / Red Crescent': 'Rotes Kreuz / Roter Halbmond',
'Red': 'Rot',
'Ref.No.': 'Ref.Nr.',
'Reference Document': 'Referenzdokument',
'Refresh Rate (seconds)': 'Aktualisierungsrate (Sekunden)',
'Refrigerator': 'Kühlschrank',
'Refugee Support Database': 'Flüchtlingshilfe-Datenbank',
'Refugees': 'Flüchtlinge',
'Region Location': 'Standort Region',
'Region': 'Regierungsbezirk',
'Regional': 'Regional',
'Regions': 'Regionen',
'Register As': 'Registrieren als',
'Register Person into this Camp': 'Registrieren der Person in dieses Camp',
'Register Person into this Shelter': 'Registrieren der Person in diese Unterkunft',
'Register Person': 'Registrieren einer Person',
'Register Surplus Meals Quantity': 'Anzahl überzähliger Essen registrieren',
'Register them as a volunteer': 'Als Freiwillige registrieren',
'Register': 'Registrieren',
'Registered People': 'Registrierte Personen',
'Registered by': 'Registriert von',
'Registered on': 'Registriert am',
'Registered users can <b>login</b> to access the system': 'Registrierte Benutzer können sich <b>anmelden</b> um auf das System zuzugreifen',
'Registration Date': 'Registriert am',
'Registration Details': 'Details zur Registrierung',
'Registration added': 'Registrierung hinzugefügt',
'Registration entry deleted': 'Anmeldungseintrag gelöscht',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': 'Die Registrierung wartet noch auf die Genehmigung von der Qualifizierenden Stelle (%s) - bitte warten Sie bis Sie eine Bestätigung erhalten',
'Registration not found': 'Registrierung nicht gefunden',
'Registration updated': 'Anmeldung aktualisiert',
'Registration': 'Registrierung',
'Rehabilitation/Long Term Care': 'Rehabilitation/Langfristige Pflege',
'Reinforced masonry': 'Mauerwerk verstärkt',
'Rejected': 'Zurückgewiesen',
'Relationship': 'Beziehung',
'Relief Team': 'Unterstützungsteam',
'Relief': 'Unterstützung',
'Religious Leader': 'Religiöser Führer',
'Religious': 'Religiös',
'Relocate as instructed in the <instruction>': 'Verlagern wie in der <instruction> angewiesen',
'Remarks': 'Bemerkungen',
'Remember Me': 'Eingeloggt bleiben',
'Remove Asset from this event': 'Anlage von diesem Ereignis entfernen',
'Remove Asset from this scenario': 'Anlage von diesem Szenario entfernen',
'Remove Facility from this event': 'Einrichtung von diesem Ereignis entfernen',
'Remove Facility from this scenario': 'Einrichtung von diesem Szenario entfernen',
'Remove Family Member': 'Familienmitglied entfernen',
'Remove Human Resource from this event': 'Personelle Ressource von diesem Ereignis entfernen',
'Remove Human Resource from this scenario': 'Personelle Ressource von diesem Szenario entfernen',
'Remove Incident Type from this event': 'Vorfallstyp von diesem Ereignis entfernen',
'Remove Item from Inventory': 'Artikel aus Bestand entfernen',
'Remove Layer from Profile': 'Löschen der Kartenebene aus dem Profil',
'Remove Map Profile from this event': 'Kartenkonfiguration von diesem Ereignis entfernen',
'Remove Map Profile from this scenario': 'Kartenkonfiguration von diesem Szenario entfernen',
'Remove Person from Group': 'Person aus Gruppe entfernen',
'Remove Person from Team': 'Person aus Team entfernen',
'Remove existing data before import': 'Löschen der existierenden Daten vor dem Import',
'Remove this asset from this event': 'Diese Anlage vom Ereignis entfernen',
'Remove this asset from this scenario': 'Diese Anlage vom Szenario entfernen',
'Remove': 'Entfernen',
'Removed from Group': 'Aus Gruppe entfernt',
'Removed from Team': 'Aus Team entfernt',
'Repacked By': 'Umgepackt von',
'Repair': 'Reparieren',
'Repaired': 'Repariert',
'Repairs': 'Reparaturen',
'Repeat your password': 'Kennwort wiederholen',
'Replace if Master': 'Ersetzen wenn Master',
'Replace if Newer': 'Ersetze, falls neuer',
'Replace': 'Ersetzen',
'Report Another Assessment...': 'Melde andere Beurteilung...',
'Report Details': 'Details zum Bericht',
'Report Options': 'Optionen zum Bericht',
'Report Options': 'Optionen zum Bericht:',
'Report To': 'Melden bei',
'Report Types Include': 'Berichtstypen beinhalten',
'Report added': 'Bericht hinzugefügt',
'Report created': 'Bericht angelegt',
'Report deleted': 'Bericht gelöscht',
'Report my location': 'Meinen Standort melden',
'Report of': 'Bericht von',
'Report the contributing factors for the current EMS status.': 'Melde die beitragenen Faktoren für den aktuellen EMS Status',
'Report the contributing factors for the current OR status.': 'Melde die beitragenden Faktoren für den aktuellen OR Status.',
'Report them as found': 'Als gefunden melden',
'Report them missing': 'Als vermisst melden',
'Report updated': 'Bericht aktualisiert',
'Report': 'Bericht',
'Reported To': 'Gemeldet bei',
'Reported Transferable': 'Transferierbar gemeldet',
'Reporter Name': 'Name des Meldenden',
'Reporter': 'Meldender',
'Reporting on the projects in the region': 'Berichterstattung über die Projekte in der Region',
'Reports': 'Berichte',
'Repositories': 'Repositories',
'Request Added': 'Anfrage hinzugefügt',
'Request Canceled': 'Anfrage storniert',
'Request Details': 'Details zur Anfrage',
'Request From': 'Anfrage von',
'Request Item Details': 'Details zur Anfrage nach Artikel',
'Request Item added': 'Anfrage nach Artikel hinzugefügt',
'Request Item deleted': 'Anfrage nach Artikel entfernt',
'Request Item from Available Inventory': 'Anfrage nach Artikel aus verfügbarem Bestand',
'Request Item updated': 'Anfrage nach Artikel aktualisiert',
'Request Item': 'Angefragter Artikel',
'Request Items': 'Angefragte Artikel',
'Request Status': 'Anfragestatus',
'Request Templates': 'Anfragevorlagen',
'Request Type': 'Anfragetyp',
'Request Updated': 'Anfrage aktualisiert',
'Request added': 'Anfrage hinzugefügt',
'Request deleted': 'Anfrage gelöscht',
'Request for Role Upgrade': 'Rollenupgrade anfordern',
'Request updated': 'Anfrage aktualisiert',
'Request': 'Anfrage',
'Request, Response & Session': 'Anfrage, Antwort & Sitzung',
'Requested By Facility': 'Angefragt von Einrichtung',
'Requested By': 'Angefragt durch',
'Requested For Facility': 'Angefragt für Einrichtung',
'Requested From': 'Angefragt von',
'Requested Items': 'Angefragte Artikel',
'Requested Skills': 'Angefragte Fähigkeiten',
'Requested by': 'Angefragt durch',
'Requested on': 'Angefragt am',
'Requested': 'Angefragt',
'Requester': 'Anfragender',
'Requests Management': 'Anfragenverwaltung',
'Requests': 'Anfragen',
'Requests': 'Anfragen',
'Required Skills': 'Benötigte Fähigkeiten',
'Requires Login!': 'Anmeldung erforderlich!',
'Rescue and recovery': 'Rettung und Bergung (SAR)',
'Reset Password': 'Kennwort zurücksetzen',
'Reset': 'Zurücksetzen',
'Residence Permit Type Details':'Details zum Aufenthaltserlaubnistyp',
'Residence Permit Type created':'Aufenthaltserlaubnistyp angelegt',
'Residence Permit Type deleted':'Aufenthaltserlaubnistyp gelöscht',
'Residence Permit Type updated':'Aufenthaltserlaubnistyp aktualisiert',
'Residence Permit Type': 'Art der Aufenthaltserlaubnis',
'Residence Permit Type':'Aufenthaltserlaubnistyp',
'Residence Permit Types':'Aufenthaltserlaubnistypen',
'Residence Status Details':'Details zum Aufenthaltsstatus',
'Residence Status Type Details':'Details zum Aufenthaltsstatustyp',
'Residence Status Type created':'Aufenthaltsstatustyp angelegt',
'Residence Status Type deleted':'Aufenthaltsstatustyp gelöscht',
'Residence Status Type updated':'Aufenthaltsstatustyp aktualisiert',
'Residence Status Types':'Aufenthaltsstatustypen',
'Residence Status created':'Aufenthaltsstatus angelegt',
'Residence Status deleted':'Aufenthaltsstatus gelöscht',
'Residence Status updated':'Aufenthaltsstatus aktualisiert',
'Residence Status': 'Aufenthaltsstatus',
'Residence Status':'Aufenthaltsstatus',
'Residence Statuses':'Aufenthaltsstatus',
'Residents Report created': 'Bewohnerliste angelegt',
'Residents Report deleted': 'Bewohnerliste gelöscht',
'Residents Report updated': 'Bewohnerliste aktualisiert',
'Residents Report': 'Bewohnerliste',
'Residents Reports': 'Bewohnerlisten',
'Residents': 'Bewohner',
'Resolve Conflict': 'Konflikt lösen',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': 'Das verfolgen des Links lässt eine neue Anzeige erscheinen die hilft doppelte Einträge aufzulösen und die Datenbank zu aktualisieren',
'Resolve': 'Auflösen',
'Resolved': 'Gelöst',
'Resource Details': 'Details zur Ressource',
'Resource Inventory': 'Ressourcenbestand',
'Resource Type': 'Ressourcentyp',
'Resource added': 'Ressource hinzugefügt',
'Resource deleted': 'Ressource gelöscht',
'Resource updated': 'Ressource aktualisiert',
'Resource': 'Ressource',
'Resources': 'Ressourcen',
'Respiratory Infections': 'Atemwegsinfektionen',
'Response': 'Antwort',
'Restricted Access': 'Eingeschränkter Zugriff',
'Restricted Use': 'Eingeschränkte Verwendung',
'Result': 'Ergebniss',
'Results': 'Ergebnisse',
'Retail Crime': 'Einzelhandel Kriminalität',
'Retrieve Password': 'Kennwort abrufen',
'Return to Request': 'Zurück zur Anfrage',
'Return': 'Zurück',
'Returned From': 'Zurückgegeben von',
'Returned by': 'Zurückgegeben von',
'Returned on': 'Zurückgegeben am',
'Returned': 'Zurückgegeben',
'Review Incoming Shipment to Receive': 'Überprüfung der eingehenden Lieferung für die Annahme',
'Rice': 'Reis',
'Rich Text?': 'Rich Text?',
'Riot': 'Aufruhr',
'River Details': 'Details zum Fluss',
'River added': 'Fluss hinzugefügt',
'River deleted': 'Fluss gelöscht',
'River updated': 'Fluss aktualisiert',
'River': 'Fluss',
'Rivers': 'Flüsse',
'Road Accident': 'Verkehrsunfall',
'Road Closed': 'Straße gesperrt',
'Road Conditions': 'Zustand der Straßen',
'Road Delay': 'Verkehrsverzögerung',
'Road Hijacking': 'Straßenentführung',
'Road Usage Condition': 'Strassennutzungszustand',
'Role Details': 'Details zur Rolle',
'Role Name': 'Name der Rolle',
'Role Required': 'Erforderliche Rolle',
'Role Updated': 'Rolle aktualisiert',
'Role added': 'Rolle hinzugefügt',
'Role deleted': 'Rolle gelöscht',
'Role updated': 'Rolle aktualisiert',
'Role': 'Rolle',
'Role-based': 'Rollenbasiert',
'Roles Permitted': 'Zulässige Rollen',
'Roles currently assigned': 'Zurzeit zugewiesene Rollen',
'Roles of User': 'Rollen für Benutzer',
'Roles': 'Rollen',
'Roll On Roll Off Berth': 'Fähranlegestelle',
'Roof tile': 'Dachziegel',
'Roofs, floors (vertical load)': 'Dächer, Böden (vertikale Belastung)',
'Room Details': 'Details zum Raum',
'Room Inspection': 'Zimmerkontrolle',
'Room No.': 'Raum-Nr.',
'Room added': 'Raum hinzugefügt',
'Room deleted': 'Raum gelöscht',
'Room updated': 'Raum aktualisiert',
'Room': 'Raum',
'Rooms': 'Räume',
'Rows in table': 'Zeilen in der Tabelle',
'Rows selected': 'Ausgewählte Zeilen',
'Run Interval': 'Intervall der Läufe',
'Running Cost': 'Laufzeitkosten',
'Runway Length (m)': 'Länge der Landebahn (m)',
'Runway Surface': 'Oberfläche der Landebahn',
'Runway Width (m)': 'Breite der Landebahn (m)',
'Rural District / District': 'Landkreis / Kreis',
'SMS Modem Channels': 'SMS Modem Kanäle',
'SMS Outbound Gateways': 'SMS Ausgangsgateaways',
'SMS SMTP Channels': 'SMS SMTP Kanäle',
'SMS WebAPI Channels': 'SMS WebAPI Kanäle',
'Safe environment for vulnerable groups': 'Sichere Umgebung für gefährdete Gruppen',
'Safety Assessment Form': 'Formular für Sicherheitsbeurteilung',
'Safety of children and women affected by disaster?': 'Ist die Sicherheit von Kindern und Frauen durch die Katastrophe (resp. das Unglück) beeinträchtigt?',
'Sahana Blue': 'Sahana Blau',
'Sahana Community Chat': 'Sahana Gemeinschaft Chat',
'Sahana Eden <=> Other': 'Sahana Eden <=> Andere',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden - OpenSource Management-Plattform für humanitäre Notsituationen',
'Sahana Eden Website': 'Sahana Eden Internetseite',
'Sahana Steel': 'Sahana Stahl',
'Sahana access granted': 'Sahana Zugriff gewährt',
'Salted Fish': 'Gesalzener Fisch',
'Sanitation problems': 'Sanitäre Probleme',
'Satellite': 'Satellit',
'Saturday': 'Samstag',
'Save': 'Speichern',
'Save: Default Lat, Lon & Zoom for the Viewport': 'Speichern: Standardmäßig Länge/Breite und Zoomfaktor',
'Saved Filters': 'Gespeicherte Filter',
'Saved.': 'Gespeichert.',
'Saving...': 'Wird gespeichert...',
'Scale of Results': 'Umfang der Ergebnisse',
'Scan with Zxing': 'Scannen mit Zxing',
'Scenario Details': 'Details zum Szenario',
'Scenario added': 'Szenario hinzugefügt',
'Scenario deleted': 'Szenario gelöscht',
'Scenario updated': 'Szenario aktualisiert',
'Scenario': 'Szenario',
'Scenarios': 'Szenarios',
'Schedule': 'Zeitplan',
'School Closure': 'Schulschließung',
'School Lockdown': 'Schule geschlossen',
'School Teacher': 'Schullehrer',
'School activities': 'Schulaktivitäten',
'School assistance': 'Schulunterstützung',
'School attendance': 'Schulbesuch',
'School destroyed': 'Schule zerstört',
'School heavily damaged': 'Schule stark beschädigt',
'School tents received': 'Schulzelte erhalten',
'School tents, source': 'Herkunft der Schulzelte',
'School used for other purpose': 'Schule wird für andere Zwecke verwendet',
'School': 'Schule',
'School/studying': 'Schule/lernen',
'Schools': 'Schulen',
'Seaports': 'Seehafen',
'Search Activities': 'Suchaktivitäten',
'Search Activity Report': 'Bericht über Suchaktivitäten',
'Search Addresses': 'Suche nach Adressen',
'Search All Requested Items': 'Alle angefordeten Artikel durchsuchen',
'Search All Requested Skills': 'Alle angefragten Fähigkeiten durchsuchen',
'Search Alternative Items': 'Suche nach alternativen Artikeln',
'Search Assessment Summaries': 'Suche Beurteilungszusammenfassungen',
'Search Assessments': 'Suche Beurteilungen',
'Search Asset Log': 'Suche Anlageprotokoll',
'Search Assets': 'Suche Anlagen',
'Search Baseline Type': 'Referenzdatumstyp suchen',
'Search Baselines': 'Referenzdatum suchen',
'Search Brands': 'Marken suchen',
'Search Budgets': 'Budgets suchen',
'Search Bundles': 'Produktpakete suchen',
'Search Camp Services': 'Camp Leistungen suchen',
'Search Camp Types': 'Camp Typen suchen',
'Search Camps': 'Camps suchen',
'Search Catalog Items': 'Katalog Einträge suchen',
'Search Catalogs': 'Kataloge suchen',
'Search Certificates': 'Zertifikate suchen',
'Search Certifications': 'Zertifizierungen suchen',
'Search Checklists': 'Checklisten suchen',
'Search Cluster Subsectors': 'Cluster Teilbereiche suchen',
'Search Clusters': 'Cluster suchen',
'Search Commitment Items': 'Zugesagte Artikel suchen',
'Search Commitments': 'Zusagen suchen',
'Search Competencies': 'Kompetenzen suchen',
'Search Competency Ratings': 'Kompetenzeinstufungen suchen',
'Search Contact Information': 'Nach Kontaktinformationen suchen',
'Search Contacts': 'Nach Kontakten suchen',
'Search Course Certificates': 'Suchen nach Kurszertifikaten',
'Search Courses': 'Kurse suchen',
'Search Credentials': 'Qualifikationen suchen',
'Search Documents': 'Dokumente suchen',
'Search Donors': 'Spender suchen',
'Search Entries': 'Einträge suchen',
'Search Events': 'Ereignisse suchen',
'Search Facilities': 'Einrichtungen suchen',
'Search Feature Layers': 'Objekt-Ebenen suchen',
'Search Flood Reports': 'Flutberichte suchen',
'Search Groups': 'Gruppen suchen',
'Search Human Resources': 'Personelle Ressourcen suchen',
'Search Identity': 'Identität suchen',
'Search Images': 'Bilder suchen',
'Search Impact Type': 'Auswirkungstypen suchen',
'Search Impacts': 'Auswirkungen suchen',
'Search Incident Reports': 'Vorfallberichte suchen',
'Search Inventory Items': 'Bestandsartikel suchen',
'Search Inventory items': 'Bestandsartikel suchen',
'Search Item Categories': 'Artikelkategorien suchen',
'Search Item Packs': 'Artikelpakete suchen',
'Search Items': 'Artikel suchen',
'Search Job Roles': 'Tätigkeiten suchen',
'Search Keys': 'Sschlüssel suchen',
'Search Kits': 'Ausstattungen (Kits) suchen',
'Search Layers': 'Kartenebenen suchen',
'Search Level 1 Assessments': 'Suche Stufe 1 Beurteilungen',
'Search Level 2 Assessments': 'Suche Stufe 2 Beurteilungen',
'Search Locations': 'Gebiet/Standort suchen',
'Search Log Entry': 'Protokolleintrag suchen',
'Search Map Profiles': 'Kartenkonfiguration suchen',
'Search Markers': 'Marker/Symbol suchen',
'Search Members': 'Mitglied suchen',
'Search Membership': 'Mitgliedschaft suchen',
'Search Missions': 'Aufträge suchen',
'Search Need Type': 'Anforderungstyp suchen',
'Search Needs': 'Anforderungstyp suchen',
'Search Offices': 'Büros suchen',
'Search Organizations': 'Organisationen suchen',
'Search Peer': 'Peer Suchen',
'Search Personal Effects': 'Persönliche Habe suchen',
'Search Persons': 'Personen suchen',
'Search Photos': 'Fotos suchen',
'Search Population Statistics': 'Bevölkerungsstatistiken suchen',
'Search Positions': 'Positionen suchen',
'Search Problems': 'Probleme suchen',
'Search Projections': 'Kartenprojektionen suchen',
'Search Projects': 'Projekte suchen',
'Search Queries': 'Suchabfragen',
'Search Rapid Assessments': 'Schnell-Beurteilung suchen',
'Search Received Items': 'Erhaltene Artikel suchen',
'Search Received Shipments': 'Erhaltene Lieferungen suchen',
'Search Records': 'Datensätze suchen',
'Search Registration Request': 'Registrierungsanfragen suchen',
'Search Registrations': 'Registrierungen suchen',
'Search Report': 'Berichte suchen',
'Search Request Items': 'Angefragte Artikel suchen',
'Search Request': 'Anfrage suchen',
'Search Requested Items': 'Angefragte Artikel suchen',
'Search Requests': 'Anfragen suchen',
'Search Resources': 'Ressourcen suchen',
'Search Rivers': 'Flüsse suchen',
'Search Roles': 'Rollen suchen',
'Search Rooms': 'Räume suchen',
'Search Scenarios': 'Szenarien suchen',
'Search Sections': 'Abschnitte suchen',
'Search Sectors': 'Bereiche suchen',
'Search Sent Items': 'Gesendete Artikel suchen',
'Search Sent Shipments': 'Gesendete Lieferungen suchen',
'Search Service Profiles': 'Leistungsprofile suchen',
'Search Settings': 'Sucheinstellungen',
'Search Shelter Services': 'Unterkunftsleistungen suchen',
'Search Shelter Types': 'Unterkunftsarten suchen',
'Search Shelters': 'Unterkünfte suchen',
'Search Shipped Items': 'Suche über gelieferte Artikel',
'Search Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten suchen',
'Search Skill Provisions': 'Fähigkeits-Bereitstellungen suchen',
'Search Skill Types': 'Fähigkeitstypen suchen',
'Search Skills': 'Fähigkeiten suchen',
'Search Solutions': 'Lösungen suchen',
'Search Staff Types': 'Mitarbeitertypen suchen',
'Search Staff or Volunteer': 'Suche Mitarbeiter oder Freiwillige',
'Search Status': 'Status suchen',
'Search Subscriptions': 'Abonnement suchen',
'Search Subsectors': 'Teilbereiche suchen',
'Search Support Requests': 'Unterstützungsanfragen suchen',
'Search Tasks': 'Aufgaben suchen',
'Search Teams': 'Teams suchen',
'Search Themes': 'Themen suchen',
'Search Tickets': 'Tickets suchen',
'Search Tracks': 'Tracks suchen',
'Search Training Participants': 'Suche Kursteilnehmer',
'Search Trainings': 'Schulung suchen',
'Search Twitter Tags': 'Twitter-Tags suchen',
'Search Units': 'Einheiten suchen',
'Search Users': 'Benutzer suchen',
'Search Volunteer Availability': 'Verfügbarkeit von Freiwilligen suchen',
'Search Volunteers': 'Freiwillige suchen',
'Search Warehouses': 'Warenlager suchen',
'Search and Edit Group': 'Suchen und Bearbeiten von Gruppen',
'Search and Edit Individual': 'Suchen und Bearbeiten von einzelnen Personen',
'Search by Skills': 'Suche nach Fähigkeiten',
'Search by owner ID, name or comments': 'Nach Eigentümer-ID, -Name oder Kommentaren suchen',
'Search by service contact reference number': 'Suche nach Leistungsträger-Referenznummer',
'Search by skills': 'Suche nach Fähigkeiten',
'Search for Persons': 'Suche nach Personen',
'Search for Staff or Volunteers': 'Suche nach Mitarbeitern oder Freiwilligen',
'Search for a Location by name, including local names.': 'Suchen nach Standortnamen, einschließlich lokaler Namen.',
'Search for a Person': 'Such nach einer Person',
'Search for a Project': 'Suche nach einem Projekt',
'Search for a shipment by looking for text in any field.': 'Suche nach einer Lieferung (Volltextsuche)',
'Search for a shipment received between these dates': 'Suche nach einer erhaltenen Lieferung im Zeitraum',
'Search for an Organization by name or acronym': 'Suche nach einer Organisation nach Namen oder Abkürzung',
'Search for an Organization by name or acronym.': 'Suche nach einer Organisation in Namen und Acronym.',
'Search for an asset by text.': 'Suche Anlage über Text.',
'Search for an item by category.': 'Suche Artikel nach Kategorie.',
'Search for an item by text.': 'Suche Artikel über Text.',
'Search for asset by country.': 'Suche Anlage nach Ländern.',
'Search for multiple IDs (separated by blanks)': 'Suche nach mehreren IDs (durch Leerzeichen getrennt)',
'Search for office by country.': 'Suche Büro nach Ländern.',
'Search for office by organization.': 'Suche Büro nach Organisation.',
'Search for office by text.': 'Suche Büro über Text',
'Search for warehouse by country.': 'Suche Warenlager nach Ländern',
'Search for warehouse by organization.': 'Suche Warenlager nach Organisation',
'Search for warehouse by text.': 'Suche Warenlager über Text',
'Search here for a person record in order to:': 'Hier nach einem Personendatensatz suchen, um zu:',
'Search location in Geonames': 'Ortssuche in Geonames',
'Search messages': 'Suche Nachrichten',
'Search': 'Suchen',
'Searching for different groups and individuals': 'Suche nach verschiedenen Gruppen und Einzelpersonen',
'Secondary Server (Optional)': 'Sekundärer Server (optional)',
'Seconds must be a number between 0 and 60': 'Sekunden müssen eine Zahl zwischen 0 und 60 sein',
'Section Details': 'Details zum Abschnitt',
'Section deleted': 'Abschnitt gelöscht',
'Section updated': 'Abschnitt aktualisiert',
'Sections': 'Abschnitte',
'Sector Details': 'Details zum Bereich ',
'Sector added': 'Bereich hinzugefügt',
'Sector deleted': 'Bereich gelöscht',
'Sector updated': 'Bereich aktualisiert',
'Sector': 'Bereich',
'Sector(s)': 'Bereich(e)',
'Sectors': 'Bereiche',
'Secure Storage Capacity': 'Sichere Lagerkapazität',
'Security Notes': 'Sicherheitsnotizen',
'Security Status': 'Sicherheitsstatus',
'Security problems': 'Sicherheitsprobleme',
'Security': 'Sicherheit',
'See All Entries': 'Siehe alle Einträge',
'See all': 'Alles anzeigen',
'See unassigned recovery requests': 'Siehe nicht zugeordnete Bergungsanfragen.',
'Seized Item Details': 'Details zum beschlagnahmten Gegenstand',
'Seized Item created': 'Beschlagnahmung angelegt',
'Seized Item deleted': 'Beschlagnahmung gelöscht',
'Seized Item updated': 'Beschlagnahmung aktualisiert',
'Seized Items': 'Beschlagnahmte Gegenstände',
'Select All': 'Alles auswählen',
'Select Items from the Request': 'Wählen sie Artikel aus der Anfrage',
'Select Items from this Inventory': 'Wählen sie Artikel aus diesem Bestand',
'Select Land': 'Land auswählen',
'Select Modules for translation': 'Auswahl der Module zum Übersetzen',
'Select a location': 'Wählen Sie einen Ort aus',
'Select a question from the list': 'Wählen sie eine Frage aus der Liste aus',
'Select a range for the number of total beds': 'Wählen sie einen Bereich für die Gesamtanzahl von Betten',
'Select all that apply': 'Wählen Sie alles Zutreffende aus',
'Select an Organization to see a list of offices': 'Wählen Sie eine Organisation aus, um eine Liste der zugehörigen Büros anzuzeigen.',
'Select an image to upload. You can crop this later by opening this record.': 'Wählen Sie ein Bild zum Hochladen aus. Sie können es später beschneiden indem Sie diesen Datensatz bearbeiten',
'Select resources to import': 'Wählen Sie Ressourcen zum Importieren aus',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'Wählen sie die overlays für die Beurteilungen und die zugehörigen Aktivitäten um die Differenz zu identifizieren.',
'Select the person assigned to this role for this project.': 'Wählen Sie die Person die mit diesr Rolle dem Projekt zugeordnet werden soll.',
'Select to show this configuration in the Regions menu.': "Auswahl um sich diese Konfiguration im Menu 'Regionen' anzeigen.",
'Select': 'Auswahl',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': 'Auswahl ob ein Modem, Tropo oder eine andere Schnittstelle zum Versand von SMS verwendet werden soll.',
'Send Alerts using Email &/or SMS': 'Senden von Alarmen unter Nutzung von E-Mail und/oder SMS',
'Send Commitment as Shipment': 'Zusage Lieferung zu senden',
'Send Message': 'Nachricht senden',
'Send New Shipment': 'Neue Lieferung senden',
'Send Notification': 'Benachrichtigung senden',
'Send Shipment': 'Lieferung senden',
'Send Task Notification': 'Auftragsbenachrichtigung senden',
'Send a message to this person': 'Dieser Person eine Nachricht senden',
'Send a message to this team': 'Diesem Team eine Nachricht senden',
'Send from %s': 'Senden von %s',
'Send message': 'Nachricht senden',
'Send new message': 'Neue Nachricht senden',
'Send': 'Senden',
'Sends & Receives Alerts via Email & SMS': 'Schickt & empfängt Benachrichtigungen über Email und SMS',
'Sent By Person': 'Gesendet von einer Person',
'Sent By': 'Gesendet von',
'Sent Emails': 'Gesendete E-Mails',
'Sent Item Details': 'Details zum versendeten Artikel',
'Sent Item deleted': 'Gesendeter Artikel gelöscht',
'Sent Item updated': 'Gesendeter Artikel aktualisiert',
'Sent Posts': 'Gesendete Posts',
'Sent SMS': 'Gesendete SMS',
'Sent Shipment Details': 'Details zur gesendeten Lieferungsdetails',
'Sent Shipment canceled and items returned to Inventory': 'Gesendete Lieferung storniert und Artikel zum Lager zurückgebracht',
'Sent Shipment canceled': 'Gesendete Lieferung storniert',
'Sent Shipment updated': 'Gesendete Lieferung aktualisiert',
'Sent Shipments': 'Gesendete Lieferungen',
'Sent date': 'Versanddatum',
'Sent to RP': 'Zu RP geschickt',
'Sent': 'gesendet',
'Separated children, caregiving arrangements': 'von Eltern getrennte Kinder, Pflegevereinbarungen',
'Serial Number': 'Seriennummer',
'Series': 'Serie',
'Server': 'Server',
'Service Catalog': 'Leistungskatalog',
'Service Contact Details':'Leistungsträger Details',
'Service Contact Type added': 'Leistungsträgerart hinzugefügt',
'Service Contact Type deleted': 'Leistungsträgerart gelöscht',
'Service Contact Type updated': 'Leistungsträgerart aktualisiert',
'Service Contact Type': 'Leistungsträgerart',
'Service Contact Types': 'Leistungsträgerarten',
'Service Contact added':'Leistungsträger hinzugefügt',
'Service Contact deleted':'Leistungsträger gelöscht',
'Service Contact updated':'Leistungsträger aktualisiert',
'Service Contacts':'Leistungsträger',
'Service Record': 'Leistungseintrag',
'Service or Facility': 'Leistung oder Einrichtung',
'Service profile added': 'Leistungsprofil hinzugefügt',
'Service profile deleted': 'Leistungsprofil gelöscht',
'Service profile updated': 'Leistungsprofil aktualisiert',
'Service': 'Leistung',
'Services Available': 'Verfügbare Leistungen',
'Services': 'Leistungen',
'Set Base Site': 'Basisstandort festlegen',
'Set By': 'Definiert durch',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': "Wählen sie 'Wahr' um Benutzern, die nicht Karten-Admins sind, zu erlauben dieses Level der Gebietshierachie zu verändern.",
'Setting Details': 'Details konfigurieren',
'Setting added': 'Einstellung hinzugefügt',
'Setting deleted': 'Einstellungen gelöscht',
'Setting updated': 'Einstellung aktualisiert',
'Settings updated': 'Einstellungen aktualisiert',
'Settings were reset because authenticating with Twitter failed': 'Einstellungen wurden zurückgesetzt da die Authentifizierung mit Twitter fehlgeschlagen ist',
'Settings which can be configured through the web interface are available here.': 'Die Einstellungen, die über das Webinterface konfiguriert werden können, sind hier verfügbar.',
'Settings': 'Einstellungen',
'Severe': 'Ernsthaft',
'Severity': 'Wertigkeit',
'Sex': 'Geschlecht',
'Share a common Marker (unless over-ridden at the Feature level)': 'Definiere einen allgemeinen Marker/Symbol (kann auf Objekt-Ebene überschrieben werden)',
'Shelter & Essential NFIs': 'Unterkünfte & Essentielle NFIs',
'Shelter Details': 'Details zur Unterkunft',
'Shelter Flag Details': 'Details zur Unterkunftsflagge',
'Shelter Flag created': 'Unterkunftsflagge hinzugefügt',
'Shelter Flag deleted': 'Unterkunftsflagge gelöscht',
'Shelter Flag updated': 'Unterkunftsflagge aktualisiert',
'Shelter Flags': 'Unterkunftsflaggen',
'Shelter Inspection Details': 'Details zur Unterkunftsinspektion',
'Shelter Inspection created': 'Unterkunftsinspektion erstellt',
'Shelter Inspection deleted': 'Unterkunftsinspektion gelöscht',
'Shelter Inspection updated': 'Unterkunftsinspektion aktualisiert',
'Shelter Inspection': 'Unterkunftsinspektion',
'Shelter Inspections': 'Unterkunftsinspektionen',
'Shelter Name': 'Name der Unterkunft',
'Shelter Registration Status': 'Registrierungsstatus',
'Shelter Registry': 'Unterkunft Register',
'Shelter Service Details': 'Details zur Unterkunftsleistung',
'Shelter Service added': 'Unterkunftsleistung hinzugefügt',
'Shelter Service deleted': 'Unterkunftsleistung gelöscht',
'Shelter Service updated': 'Unterkunftsleistung aktualisiert',
'Shelter Service': 'Unterkunftsleistung',
'Shelter Services': 'Unterkunftsleistungen',
'Shelter Settings': 'Eigenschaften der Unterkunft',
'Shelter Type Details': 'Details zum Unterkunftstyp',
'Shelter Type added': 'Unterkunftstyp hinzugefügt',
'Shelter Type deleted': 'Unterkunftstyp gelöscht',
'Shelter Type updated': 'Unterkunftstyp aktualisiert',
'Shelter Type': 'Unterkunftstyp',
'Shelter Types and Services': 'Unterkunftstypen und -leistungen',
'Shelter Types': 'Unterkunftstypen',
'Shelter added': 'Unterkunft hinzugefügt',
'Shelter deleted': 'Unterkunft gelöscht',
'Shelter updated': 'Unterkunft aktualisiert',
'Shelter': 'Unterkunft',
'Shelter/NFI Assistance': 'Unterkunft/ NFI Hilfe',
'Shelters': 'Unterkünfte',
'Shipment Created': 'Lieferung erstellt',
'Shipment Items received by Inventory': 'Lieferungsartikel aus Bestand empfangen',
'Shipment Items sent from Inventory': 'Lieferungsartikel von Bestand gesendet',
'Shipment Items': 'Lieferungsartikel',
'Shipment Type': 'Typ der Lieferung',
'Shipment to Send': 'Zu sendende Lieferung zu senden',
'Shipments To': 'Lieferungen nach',
'Shipments': 'Lieferungen',
'Shipping cost': 'Lieferkosten',
'Shooting': 'Filmaufnahme',
'Short Assessment': 'Kurz Beurteilung',
'Short Description': 'Kurzbeschreibung',
'Show %(number)s entries': 'Zeige %(number)s Einträge',
'Show Branch Hierarchy': 'Organisationshierarchie anzeigen',
'Show Checklist': 'Checkliste anzeigen',
'Show Details': 'Details anzeigen',
'Show Location?': 'Gebiet/Standort anzeigen?',
'Show Map': 'Karte anzeigen',
'Show Picture': 'Bild anzeigen',
'Show Region in Menu?': 'Region im Menu anzeigen?',
'Show Table': 'Tabelle anzeigen',
'Show author picture?': 'Bild des Authors anzeigen?',
'Show handling instructions at ID checks (e.g. for event registration, payments)': 'Handhabungshinweise bei ID Prüfungen anzeigen (z.B. Ereignisregistrierung, Auszahlungen)',
'Show handling instructions at check-in': 'Handhabungshinweise bei Check-in anzeigen',
'Show handling instructions at check-out': 'Handhabungshinweise bei Check-out anzeigen',
'Show on Map': 'Auf Karte anzeigen',
'Show on map': 'Auf Karte anzeigen',
'Show totals': 'Summen anzeigen',
'Show': 'Anzeigen',
'Shower Availability': 'Verfügbarkeit von Duschen',
'Shower Handicap Facilities': 'Behindertengerechte Dusche',
'Shower with handicap facilities': 'Dusche mit behindertengerechter Einrichtung',
'Showing 0 to 0 of 0 entries': 'Keine Einträge',
'Showing _START_ to _END_ of _TOTAL_ entries': 'Einträge _START_ bis _END_ von _TOTAL_',
'Sign-up as a volunteer': 'Als Freiwilliger anmelden',
'Sign-up for Account': 'Für Benutzerkennung anmelden',
'Sign-up succesful - you should hear from us soon!': 'Registrierung erfolgreich - sie werden in Kürze von uns hören.',
'Site Administration': 'Administration der Seite',
'Site Needs added': 'Standortbedarf hinzugefügt',
'Site Needs deleted': 'Standortbedarf gelöscht',
'Site Needs updated': 'Standortbedarf aktualisiert',
'Site Needs': 'Standortbedarf',
'Site': 'Standort',
'Situation Awareness & Geospatial Analysis': 'Situationseinschätzung & Räumliche Analyse',
'Size of Family': 'Grösse der Familie',
'Sketch': 'Skizze',
'Skill Catalog': 'Fähigkeitskatalog',
'Skill Details': 'Details zur Fähigkeit',
'Skill Equivalence Details': 'Details zur Fähigkeits-Vergleichbarkeit',
'Skill Equivalence added': 'Fähigkeits-Vergleichbarkeit hinzugefügt',
'Skill Equivalence deleted': 'Fähigkeits-Vergleichbarkeit gelöscht',
'Skill Equivalence updated': 'Fähigkeits-Vergleichbarkeit aktualisiert',
'Skill Equivalence': 'Fähigkeits-Vergleichbarkeit',
'Skill Equivalences': 'Fähigkeits-Vergleichbarkeiten',
'Skill Provision Catalog': 'Fähigkeiten Bestimmungskatalog',
'Skill Provision Details': 'Fähigkeiten Bestimmung Details',
'Skill Provision added': 'Geschick Bestimmung hinzugefügt',
'Skill Provision deleted': 'Fähigkeitenbestimmung gelöscht',
'Skill Provision updated': 'Fähigkeiten Bestimmung aktualisiert',
'Skill Provision': 'Geschick Bestimmung',
'Skill Provisions': 'Fähigkeits-Bereitstellungen',
'Skill Status': 'Fähigkeitsstatus',
'Skill TYpe': 'Art der Fähigkeit',
'Skill Type Catalog': 'Fähigkeitstypen-Katalog',
'Skill Type Details': 'Details zum Fähigkeitstyp',
'Skill Type added': 'Fähigkeitstyp hinzugefügt',
'Skill Type deleted': 'Fähigkeitstyp gelöscht',
'Skill Type updated': 'Fähigkeitstyp aktualisiert',
'Skill Types': 'Fähigkeitstypen',
'Skill added': 'Fähigkeit hinzugefügt',
'Skill deleted': 'Fähigkeit gelöscht',
'Skill updated': 'Fähigkeit aktualisiert',
'Skill': 'Kenntnisse',
'Skills Catalog': 'Fähigkeiten Katalog',
'Skills Management': 'Fähigkeiten Management',
'Skills': 'Fähigkeiten',
'Skype ID': 'Skype ID',
'Slope failure, debris': 'Abhang Bruch, Schutt',
'Small Trade': 'Kleiner Handel',
'Smoke': 'Rauch',
'Snapshot Report': 'Bericht zur aktuellen Lage',
'Snapshot': 'Momentaufnahme',
'Snow Fall': 'Schneefall',
'Snow Squall': 'Schneeschauer',
'Soil bulging, liquefaction': 'Boden aufgequollen, Verflüssigung',
'Solid waste': 'Feste Abfälle',
'Solution Details': 'Details zur Lösung',
'Solution Item': 'Lösungselement',
'Solution added': 'Lösung hinzugefügt',
'Solution deleted': 'Lösung gelöscht',
'Solution updated': 'Lösung aktualisiert',
'Solution': 'Lösung',
'Solutions': 'Lösungen',
'Some': 'Einige',
'Sorry that location appears to be outside the area of the Parent.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs des übergeordneten Elements zu liegen.',
'Sorry that location appears to be outside the area supported by this deployment.': 'Entschuldigung, diese Position scheint ausserhalb des Bereichs zu liegen, der von dieser Anwendung unterstützt wird.',
'Sorry, I could not understand your request': 'Entschuldigung, leider konnte ich ihre Anfrage nicht verstehen',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt Gruppen von Standorten/Gebieten zu erstellen.',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'Entschuldigung, nur Benutzer mit der Kartenadministrator-Rolle sind berechtigt diese Standorte/Gebiete zu bearbeiten',
'Sorry, something went wrong.': 'Entschuldigung, leider is etwas schief gelaufen.',
'Sorry, that page is forbidden for some reason.': 'Entschuldigung, leider der Besuch dieser Seite aus einem bestimmten Grund nicht zulässig.',
'Sorry, that service is temporary unavailable.': 'Entschuldigung, leider steht dieses Service vorübergehend nicht zur Verfügung.',
'Sorry, there are no addresses to display': 'Entschuldigung, leider sind keine Adressen vorhanden um angezeigt zu werden.',
'Sought': 'Gesucht',
'Source ID': 'Quellen ID',
'Source Time': 'Zeit der Quelle',
'Source': 'Quelle',
'Sources of income': 'Einkommsquellen',
'Space Debris': 'Weltraumschrott',
'Spanish': 'Spanisch',
'Special Ice': 'Besonderes Eis',
'Special Marine': 'Spezielles Wasserfahrzeug',
'Special Protection Needs': 'Besonderer Schutzbedarf',
'Specialized Hospital': 'Spezialisiertes Krankenhaus',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'Bestimmter Bereich (z.B. Gebäude/Raum) innerhalb eines Ortes in der diese Person/Gruppe gefunden werden kann.',
'Specific locations need to have a parent of level': 'Bestimmte Orte benötigen ein übergeordnetes Element der Stufe',
'Specify a descriptive title for the image.': 'Geben Sie einen beschreibenden Titel für das Bild an.',
'Specify the bed type of this unit.': 'Geben Sie den Bettentypen an für diese Einheit an.',
'Specify the number of available sets': 'Geben Sie die Anzahl der verfügbaren Sätze an',
'Specify the number of available units (adult doses)': 'Geben Sie die Anzahl der verfügbaren Einheiten ein (Dosis für Erwachsene)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': 'Geben Sie die Anzahl der verfügbaren Einheiten (in Liter) von Ringer-Lactat oder gleichwertige Lösungen ein',
'Specify the number of sets needed per 24h': 'Geben Sie die Anzahl der erforderlichen Sätze pro 24h ein',
'Specify the number of units (Erwachsenendosen) needed per 24h': 'Geben Sie die Anzahl der Einheiten ein (Dosis für Erwachsene) die pro 24h benötigt werden.',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': 'Geben Sie die Anzahl der Einheiten (in Liter) von Ringer-Lactat oder gleichwertigen Lösungen ein, die man pro 24h braucht.',
'Spherical Mercator?': 'Spherische Mercator?',
'Spouse': 'Ehegatte',
'Spreadsheet Importer': 'Import von Tabellendokumenten',
'Spreadsheet uploaded': 'Tabellendokument hochgeladen',
'Squall': 'Sturmschauer',
'Staff & Volunteers (Combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff & Volunteers (combined)': 'Mitarbeiter & Freiwillige (kombiniert)',
'Staff & Volunteers': 'Mitarbeiter & Freiwillige',
'Staff ID': 'Mitarbeiter-ID',
'Staff Management': 'Mitarbeitermanagement',
'Staff Member Details': 'Details zum Mitarbeiter',
'Staff Member added': 'Mitarbeiter hinzugefügt',
'Staff Members': 'Mitarbeiter',
'Staff Record': 'Mitarbeiterakte',
'Staff Report': 'Mitarbeiterbericht',
'Staff Type Details': 'Details zum Mitarbeitertyp',
'Staff Type added': 'Mitarbeitertyp hinzugefügt.',
'Staff Type deleted': 'Mitarbeitertyp gelöscht',
'Staff Type updated': 'Mitarbeitertyp aktualisiert',
'Staff Types': 'Mitarbeitertypen',
'Staff and Volunteers': 'Mitarbeiter und Freiwillige',
'Staff member added': 'Mitarbeiter hinzugefügt',
'Staff present and caring for residents': 'Mitarbeiter ist anwesend und versorgt die Anwohner.',
'Staff with Contracts Expiring in the next Month': 'Mitarbeiter deren Veträge im Laufe des nächsten Monats ablaufen',
'Staff': 'Mitarbeiter',
'Staffing': 'Mitarbeiterausstattung',
'Stairs': 'Treppen',
'Start Date': 'Startdatum',
'Start date': 'Startdatum',
'Start of Period': 'Beginn einer Periode',
'State / Province': 'Staat / Bundesland',
'State /Province': 'Staat / Bundesland',
'State': 'Bundesland',
'Stationery': 'Büromaterial',
'Statistics': 'Statistiken',
'Status Code': 'Statuscode',
'Status Comment': 'Kommentar zum Status',
'Status Report': 'Statusbericht',
'Status Reports': 'Statusberichte',
'Status Update': 'Statusaktualisierung',
'Status Updated': 'Status aktualisiert',
'Status added': 'Status hinzugefügt',
'Status deleted': 'Status gelöscht',
'Status of clinical operation of the facility.': 'Status von klinischen Möglichkeiten dieser Einrichtung.',
'Status of general operation of the facility.': 'Status von allgemeinen Möglichkeiten dieser Einrichtung.',
'Status of morgue capacity.': 'Status der Leichenhallenkapazität',
'Status of operations of the emergency department of this hospital.': 'Status von Möglichkeiten der Notaufnahme dieses Krankenhauses.',
'Status of security procedures/access restrictions in the hospital.': 'Status von Sicherheitsverfahren/Zugriffsbeschränkung in diesem Krankenhaus.',
'Status of the operating rooms of this hospital.': 'Der Status des Betriebsräume des Krankenhauses.',
'Status updated': 'Status aktualisiert',
'Status': 'Status',
'Stay Permit until': 'Aufenthaltserlaubnis bis',
'Steel frame': 'Stahlrahmen',
'Stock Counts': 'Bestandszahlen',
'Stock in Warehouse': 'Bestand im Warenlager',
'Stock': 'Bestand',
'Stolen': 'Gestohlen',
'Store spreadsheets in the Eden database': 'Speichere Tabellendokument in die Eden Datenbank',
'Storeys at and above ground level': 'Stockwerke auf und über der Erdoberfläche',
'Storm Force Wind': 'Sturm Kraft Wind',
'Storm Surge': 'Sturm Spitzenauslastung',
'Stowaway': 'Blinder Passagier',
'Street Address': 'Adresse',
'Strong Wind': 'Starker Wind',
'Structural Hazards': 'Strukturelle Gefahren',
'Structural': 'Strukturell',
'Style Field': 'Style-Feld',
'Style Values': 'Style-Werte',
'Styles': 'Styles/Symbolisierungen',
'Sub-type': 'Unterart',
'Subject / Occasion': 'Betreff / Anlass',
'Subject': 'Betreff',
'Submission successful - please wait': 'Absenden erfolgreich - bitte warten',
'Submission successful - please wait...': 'Absenden erfolgreich - bitte warten ...',
'Submit New (full form)': 'Daten erneut absenden (vollständiges Formular)',
'Submit New (triage)': 'Daten erneut absenden (Auswahl)',
'Submit New': 'Daten erneut absenden',
'Submit a request for recovery': 'Registrieren einer Bergungsanfrage',
'Submit new Level 1 assessment (full form)': 'Absenden einer neuen Stufe 1 Beurteilung (vollständiges Formular)',
'Submit new Level 1 assessment (triage)': 'Absenden einer neuen Stufe 1 Beurteilung (Auswahl)',
'Submit new Level 2 assessment': 'Absenden einer neuen Stufe 2 Beurteilung',
'Submit': 'Abschicken',
'Subscription Details': 'Details zum Abo',
'Subscription added': 'Abo hinzugefügt',
'Subscription deleted': 'Abo gelöscht',
'Subscription updated': 'Abo aktualisiert',
'Subscriptions': 'Abonnements',
'Subsector Details': 'Details zum Teilbereich',
'Subsector added': 'Teilbereich hinzugefügt',
'Subsector deleted': 'Teilbereich gelöscht',
'Subsector updated': 'Teilbereich aktualisiert',
'Subsector': 'Teilbereich',
'Subsectors': 'Teilbereich',
'Subsistence Cost': 'Verpflegungskosten',
'Suburb': 'Vorort',
'Suggest not changing this field unless you know what you are doing.': 'Bitte ändern sie diesen Bereich nur, wenn sie ganz genau wissen was sie da tun!!!!',
'Suitable': 'Geeignet',
'Summary by Administration Level': 'Zusammenfassung nach Verwaltungsstufe',
'Summary of Incoming Supplies': 'Zusammenfassung der eingehenden Vorräte',
'Summary of Releases': 'Zusammenfassung der Releases',
'Summary': 'Zusammenfassung',
'Sunday': 'Sonntag',
'Supermarket': 'Supermarkt',
'Supper': 'Abendessen',
'Supplier/Donor': 'Lieferant/Spender',
'Suppliers': 'Lieferanten',
'Supply Chain Management': 'Versorgungsketten-Management',
'Support Request': 'Unterstützungsanforderung',
'Support Requests': 'Unterstützungsanforderungen',
'Support provided': 'Durchgeführte Maßnahmen',
'Support with Paperwork': 'Unterstützung bei Formalitäten',
'Supported formats': 'Unterstützte Formate',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': 'Unterstützt den Entscheidungsprozess von großen Gruppen von Krisenmanagementexperten indem man den Gruppen ermöglicht Prioritätenlisten aufzustellen.',
'Surgery': 'Chirugie',
'Surplus Meals Quantity': 'Anzahl überzähliger Essen',
'Surplus Meals': 'Überzählige Essen',
'Survey Answer Details': 'Details zur Umfrage-Antwort',
'Survey Answer added': 'Umfrage-Antwort hinzugefügt',
'Survey Answer deleted': 'Umfrage-Antwort gelöscht',
'Survey Answer updated': 'Umfrage-Antwort aktualisiert',
'Survey Answer': 'Umfrage-Antwort',
'Survey Module': 'Umfrage Modul',
'Survey Name': 'Name der Umfrage',
'Survey Question Details': 'Details zur Umfrage-Frage',
'Survey Question Display Name': 'Angezeigter Name der Umfrage-Frage',
'Survey Question added': 'Umfrage-Frage hinzugefügt',
'Survey Question deleted': 'Umfrage-Frage gelöscht',
'Survey Question updated': 'Umfrage-Frage aktualisiert',
'Survey Question': 'Umfrage-Frage',
'Survey Series Details': 'Details zur Umfragenserie',
'Survey Series Name': 'Angezeigter Name der Umfrageserie',
'Survey Series added': 'Umfrageserie hinzugefügt',
'Survey Series deleted': 'Umfrageserie gelöscht',
'Survey Series updated': 'Umfrageserie aktualisiert',
'Survey Series': 'Umfrageserien',
'Survey Template Details': 'Details zur Umfragenvorlage',
'Survey Template added': 'Umfragenvorlage hinzugefügt',
'Survey Template deleted': 'Umfragenvorlage gelöscht',
'Survey Template updated': 'Umfragevorlage aktualisiert',
'Survey Template': 'Umfragenvorlage',
'Survey Templates': 'Umfragenvorlagen',
'Surveys': 'Umfragen',
'Suspected Stolen Goods': 'Mutmassliches Diebesgut',
'Suspended Cases': 'Gesperrte Fälle',
'Suspended': 'Gesperrt',
'Switch to 3D': 'In Google Earth anzeigen',
'Symbology': 'Symbolisierung',
'Sync Conflicts': 'Synchronisierungskonflikte',
'Sync History': 'Synchronisierungshistorie',
'Sync Now': 'Jetzt synchronisieren',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': 'Partner für die Synchronisation sind Instanzen von Peers (SahanaEden, SahanaAgasti, Ushahidi, etc. ) mit denen die aktuelle Intanz synchronisiert werden soll. Ein Klick auf den Link rechts bringt Sie zur Seite auf der Sie diese hinzufügen, suchen und ändern können.',
'Sync Partners': 'Partner für die Synchronisation',
'Sync Pools': 'Synchronisierungspools',
'Sync Schedule': 'Synchronisierungszeitplan',
'Sync Settings': 'Synchronisierungseinstellungen',
'Sync process already started on': 'Sync-Prozess bereits gestartet am',
'Synchronisation': 'Synchronisierung',
'Synchronization Conflicts': 'Synchronisierungskonflikte',
'Synchronization Details': 'Synchronisierung - Details',
'Synchronization History': 'Synchronisierungsgeschichte',
'Synchronization Peers': 'Synchronisierung von Peers',
'Synchronization Settings': 'Synchronisierungseinstellungen',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': 'Die Synchronisation erlaubt ihnen Daten gemeinsam zu nutzen, indem ihre eigene Datenbank mit aktuellen Daten anderer aktualisieren oder umgekehrt. Diese Seite informiert sie darüber wie sie das automatische Synchronisationsfeature von Sahana Eden verwenden.',
'Synchronization not configured.': 'Synchronisierung nicht konfiguriert.',
'Synchronization settings updated': 'Synchronisierungseinstellungen wurden aktualisiert',
'Synchronization': 'Synchronisierung',
'Syncronisation History': 'Synchronisierungshistorie',
'TV Set': 'Fernseher',
'Table': 'Tabelle',
'Tags': 'Tags',
'Take shelter in place or per <instruction>': 'Unterkunft aufsuchen oder <instruction>',
'Task Description': 'Beschreibung der Aufgabe',
'Task Details': 'Details zur Aufgabe',
'Task List': 'Aufgabenliste',
'Task Status': 'Aufgabenstatus',
'Task added': 'Aufgabe hinzugefügt',
'Task deleted': 'Aufgabe gelöscht',
'Task updated': 'Aufgabe aktualisiert',
'Tasks': 'Aufgaben',
'Team Description': 'Teambeschreibung',
'Team Details': 'Details zum Team',
'Team Id': 'Team ID',
'Team Leader': 'Teamleiter',
'Team Member added': 'Teammitglied hinzugefügt',
'Team Members': 'Teammitglieder',
'Team Name': 'Name des Teams',
'Team Type': 'Type des Teams',
'Team added': 'Team hinzugefügt',
'Team deleted': 'Team gelöscht',
'Team updated': 'Team aktualisiert',
'Technical testing only, all recipients disregard': 'Diese Benachrichtung ist ein technischer Test, bitte ignorieren',
'Telecommunications': 'Telekommunikation',
'Telephone': 'Telefon',
'Telephony': 'Telefonie',
'Temp folder %s not writable - unable to apply theme!': 'Temporärer Ordner %s nicht beschreibbar - Layout (theme) kann nicht angewandt werden!',
'Template Name': 'Name der Vorlage',
'Template file %s not readable - unable to apply theme!': 'Template Datei %s nicht lesbar - Layout (theme) kann nicht angewandt werden!',
'Templates': 'Vorlagen',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'Begriff für die 5. Ebene der Verwaltungshierarchie eines Landes (z.B. eine Wahl- oder Postleitzahlenbereich). Diese Stufe wird nicht oft verwendet.',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'Begriff für die 4. Ebene der Verwaltungshierarchie eines Landes (z.B. Dorf, Stadtteil).',
'Term for the primary within-country administrative division (e.g. State or Province).': 'Begriff für die 1. Ebene der Verwaltungshierarchie eines Landes (z. B. Staat oder Bundesland).',
'Term for the secondary within-country administrative division (e.g. District or County).': 'Begriff für die 2. Ebene der Verwaltungshierarchie eines Landes (z. B. Regierungsbezirk oder Landkreis).',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'Begriff für die 3. Ebene der Verwaltungshierarchie eines Landes (z. B. Ort oder Stadt).',
'Term for the top-level administrative division (i.e. Country).': 'Begriff für die Verwaltung der höchsten Ebene (d. h. Land).',
'Territorial Authority': 'Territoriale Behörde',
'Terrorism': 'Terrorismus',
'Tertiary Server (Optional)': 'Tertiärer Server (Optional)',
'Test Results': 'Testergebnisse',
'Text Color for Text blocks': 'Text Farbe für Text Blöcke',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': 'Danke für die Validierung Ihrer E-Mail. Ihr Benutzeraccount wurde vom Systemadministrator noch nicht genehmigt (%s). Sie werden eine Benachrichtigung per E-Mail erhalten wenn ihr Account aktiviert wurde.',
'Thanks for your assistance': 'Danke für Ihre Hilfe',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': 'Die "query" ist eine Bedingung für "db.table1.field1==\'value\'". Irgendetwas wie "db.table1.field1 == db.table2.field2" führt zu einem SQL JOIN.',
'The Area which this Site is located within.': 'Der Bereich, in dem sich dieser Ort befindet.',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyze': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt Antworten auf Beurteilungen spezieller Ereignisse zu sammeln und auszuwerten',
'The Assessment Module stores assessment templates and allows responses to assessments for specific events to be collected and analyzed': 'Das Beurteilungsmodul speichert Beurteilungsvorlagen und erlaubt es Antworten zu speziellen Ereignissen zu sammeln und zu analysieren',
'The Assessments module allows field workers to send in assessments.': 'Das Beurteilungsmodul erlaubt allen Aussendienstmitarbeitern ihre Beurteilungen einzusenden.',
'The Author of this Document (optional)': 'Der Auto dieses Dokumentes (optional)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': 'Das Gebäudebeurteilungsmodul erlaubt die Sicherheit eines Gebäudes zu beurteilen, z. B. nach einem Erdbeben.',
'The Camp this Request is from': 'Das Camp von dem diese Anfrage stammt',
'The Camp this person is checking into.': 'Das Camp, in das diese Person überführt wird',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Die aktuelle Position der Person/Gruppe, welche ungenau (für die Berichterstellung) oder genau (zur Anzeige von auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'Die E-mail Adresse an welche die Genehmigungen gesendet werden (normalerweise ist das eine Gruppen-Mail, keine Adresse einer Einzelperson) Wenn das Feld leer ist, dann werden Anforderungen automatisch genehmigt, wenn die Domänennamen übereinstimmen.',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': 'Das Vorfall Berichtssystem ermöglicht der Allgemeinheit Vorfälle zu melden und diese verfolgen zu lassen.',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Herkunftsort der Person kann ungenau (für die Berichterstellung) oder genau (zur anzeige auf einer Karte ) sein. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'Der Ort, zu dem die Person gehen wird, welcher ungenau (für Berichte) oder genau (für die Darstellung auf einer Karte) sein kann. Geben Sie einige Zeichen ein um aus verfügbaren Standorten auszuwählen.',
'The Media Library provides a catalog of digital media.': 'Das Medienverzeichnis bietet einen Katalog digitaler Medien',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': 'Das Nachrichtenmodul ist der Hauptknotenpunkt der Kommunikation des Sahana Systems. Es wird verwendet, um Warnungen und/oder andere Nachrichten mit Hilfe von SMS & E-Mail an unterschiedliche Gruppen und Einzelpersonen während und nach einem Katastrophenfall zu schicken.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'Das Organisationsregister gibt einen Überblick über alle Hilfsorganisationen, die in der Region arbeiten.',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': 'Das Projektüberwachungsmodul ermöglicht die Erstellung von Aktivitäten um Lücken in Anforderungsbewertungen zu füllen.',
'The Role this person plays within this hospital.': 'Die Rolle die diese Person im Krankenhaus übernimmt.',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Das Unterkunftsregister protokolliert alle Unterkünfte und speichert allgemeine Details. Es arbeitet mit anderen Modulen zusammen, um Menschen die sich in einer Unterkunft befinden, sowie die dort zur Verfügung stehenden Leistungen etc. zu dokumentieren.',
'The Shelter this Request is from': 'Die Unterkunft aus welcher diese Anforderung stammt',
'The Shelter this person is checking into.': 'Die Unterkunft in die diese Person eincheckt.',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'Die URL zur "GetCapabilities" Operation eines MapWebService (WMS), dessen Kartenbenen über die Anzeige verfügbar sein sollen.',
'The URL of your web gateway without the post parameters': 'Die URL ihres Web gateways ohne die POST parameter.',
'The URL to access the service.': 'Die URL für den Zugriff zum Service.',
'The Unique Identifier (UUID) as assigned to this facility by the government.': 'Die eindeutige Kennung (UUID) die dieser Einrichtung von der Regierung zugeordnet wurde.',
'The asset must be assigned to a site OR location.': 'Die Anlage muss einem Standort oder einem Gelände zugeordnet werden',
'The attribute which is used for the title of popups.': 'Das Atribut welches für den Titel von Dialogfenstern verwendet wird',
'The attribute within the KML which is used for the title of popups.': 'Das Attribut in der KML das für den Titel der Dialogfenster verwendet wird.',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'Die Attribute innerhalb der KML, die für den body des Dialogfenster verwendet werden sollen. (Verwenden Sie ein Leerzeichen zwischen Attributen)',
'The body height (crown to heel) in cm.': 'Die Körpergrösse (Kopf bis Fuss) in cm.',
'The country the person usually lives in.': 'Das Land, in dem die Person normalerweise lebt.',
'The default Organization for whom this person is acting.': 'Die Standardorganisation, für die diese Person agiert',
'The default Organization for whom you are acting.': 'Die Standardorganisation für welche Sie agieren',
'The duplicate record will be deleted': 'Der doppelte Datensatz wird gelöscht.',
'The first or only name of the person (mandatory).': 'Der erste oder einzige Name der Person (erforderlich)',
'The following information will be deleted from the record': 'Die folgenden Informationen werden aus dem Datensatz gelöscht',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'Das Format der URL ist http://your/web/map/service?service=WMS&request=GetCapabilities wobei your/web/map/service für den Pfad der URL zum WMS steht',
'The language you wish the site to be displayed in.': 'Die Sprache in der die Seite angezeigt werden soll.',
'The list of Brands are maintained by the Administrators.': 'Die Liste der Marken wird von den Administratoren verwaltet.',
'The list of Catalogs are maintained by the Administrators.': 'Die Liste der Kataloge wird vom Administrator verwaltet.',
'The map will be displayed initially with this latitude at the center.': 'Die Karte wird zunächst auf diese Geographische Breite zentriert.',
'The map will be displayed initially with this longitude at the center.': 'Die Karte wird zunächst auf diese Geographische Länge zentriert.',
'The minimum number of features to form a cluster.': 'Die minimale Anzahl von Objekten, die als Cluster angezeigt werden.',
'The name to be used when calling for or directly addressing the person (optional).': 'Der zu verwendende Name beim Anfragen oder direkten Ansprechen der Person (optional).',
'The next screen will allow you to detail the number of people here & their needs.': 'Der nächste Bildschirm erlaubt es, nähere Angaben zur Anzahl Menschen hier & ihrer Bedürfnisse zu machen.',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': 'Die Anzahl der Maßeinheiten eines alternativen Artikels, welcher einer Maßeinheit von diesem Artikel entspricht',
'The number of pixels apart that features need to be before they are clustered.': 'Mindestanzahl erforderlicher Pixel, damit sie nicht in Clustern zusammengefasst dargestellt werden.',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'Die Anzahl der Teilbilder rund um den sichtbaren Kartenausschnitt die heruntergeladen werden. Null bedeutet, dass die erste Seite schneller geladen wird, höhere Zahlen bedeuten dass nachfolgendes Schwenken schneller ist.',
'The person at the location who is reporting this incident (optional)': 'Die Person vor Ort welche das Ereignis meldet (optional)',
'The post variable containing the phone number': 'Der POST Parameter, der die Telefonnummer beinhaltet',
'The post variable on the URL used for sending messages': 'Der POST Parameter, der die Nachricht beinhaltet.',
'The post variables other than the ones containing the message and the phone number': 'Die POST Parameter, die nicht die Nachricht oder Telefonnummer beinhalten',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': 'Der serielle Anschluss mit dem das Modem verbunden ist - /dev/ttyUSB0, etc unter linux und com1, com2, etc unter Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': 'Der Server hat keine rechtzeitige Antwort von einem anderen Server erhalten, um die Anfrage des Clients beantworten zu können.',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': 'Der Server hat eine ungültige Antwort von einem anderen Server erhalten, dass er zugreift um die Anfrage vom Browser zu erfüllen.',
'The site where this position is based.': 'Das Gelände auf dem dieser Standort/Gebiet liegt.',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': 'Die zuständigen Mitarbeiter für Anlagen können Hilfe anfordern. Bezüglich dieser Anfragen können Zusagen gemacht werden. Diese bleiben solange offen, bis der Anforderer bestätigt, dass die Anfrage erfüllt ist.',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': 'Das genannte Ereignis stellt keine Bedrohung oder Sorge mehr dar und jede nachfolgende Aktion is unter <instruction> beschrieben.',
'The time at which the Event started.': 'Die Zeit zu der das Ereignis startete.',
'The token associated with this application on': 'Das token welches mit dieser Anwendung verbunden ist',
'The total number of family members including this person.': 'Die Gesamtanzahl der Familienmitglieder einschliesslich dieser Person.',
'The type of appointments which are completed with this type of event': 'Die Art von Terminen die mit Ereignissen dieses Typs abgeschlossen werden',
'The unique identifier which identifies this instance to other instances.': 'Die eindeutige Kennung (UUID), die diese Instanz bei der Kommunikation mit anderen Instanzen identifiziert.',
'The way in which an item is normally distributed': 'Die Art in der ein Artikel normalerweise verteilt wird.',
'The weight in kg.': 'Das Gewicht in kg.',
'The': 'Das',
'Thematic Mapping': 'Thematische Kartendarstellung',
'Theme Details': 'Details zum Thema',
'Theme added': 'Thema hinzugefügt',
'Theme deleted': 'Thema gelöscht',
'Theme updated': 'Thema aktualisiert',
'Theme': 'Thema',
'Themes': 'Themen',
'There are errors': 'Es sind Fehler aufgetreten',
'There are insufficient items in the Inventory to send this shipment': 'Es sind nicht genügend Artikel im Bestand um diese Lieferung zu abzusenden.',
'There are more than %(max)s results, please input more characters.': 'Mehr als %(max)s Treffer gefunden, bitte mehr Zeichen eingeben',
'There are multiple records at this location': 'An dieser Stelle gibt es mehrere Datensätze',
'There is no address for this person yet. Add new address.': 'Für diese Person gibt es noch keine Adresse. Fügen Sie eine neue Adresse hinzu.',
'These are settings for Inbound Mail.': 'Dies sind Einstellungen für eingehende Mail.',
'These are the Incident Categories visible to normal End-Users': 'Dies sind die für alle Endbenutzer sichtbaren Kategorien von Vorfällen',
'These need to be added in Decimal Degrees.': 'Diese müssen in Dezimalgrad hinzugefügt werden.',
'They': 'Sie',
'This Group has no Members yet': 'Diese Gruppe hat noch keine Mitglieder',
'This Team has no Members yet': 'Dieses Team hat noch keine Mitglieder',
'This appears to be a duplicate of': 'Dies scheint ein Duplikat zu sein von',
'This appointment is mandatory before transfer': 'Dieser Termin ist zwingend erforderlich vor Transfer',
'This appointment requires the presence of the person concerned': 'Dieser Termin erfordert die Anwesenheit der betroffenen Person',
'This event type requires the presence of the person concerned': 'Dieser Ereignistyp erfordert die Anwesenheit der betroffenen Person',
'This file already exists on the server as': 'Diese Datei existiert bereits auf dem Server als',
'This flag indicates that the person is currently accommodated/being held externally (e.g. in Hospital or with Police)': 'Dieses Flag zeigt an dass die Person momentan extern untergebracht ist oder festgehalten wird (z.B. im Krankenhaus, oder bei der Polizei)',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': "Dies ist zulässig, wenn sich die Stufe noch im Aufbau befindet. Um unbeabsichtige Änderungen zu verhindern, nachdem dieses Level abgeschlossen ist, kann dies auf 'False' gesetzt werden.",
'This is the way to transfer data between machines as it maintains referential integrity.': 'Auf diese Weise werden Daten zwischen Maschinen übertragen um die referenzielle Integrität aufrecht zu erhalten.',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': 'Auf diese Weise werden Daten zwischen Maschinen übertragen, um die referenzielle Integrität aufrechtzu erhalten. Doppelte Daten sollten vorher manuell entfernt werden.',
'This level is not open for editing.': 'Diese Stufe ist nicht zum Bearbeiten freigegeben.',
'This might be due to a temporary overloading or maintenance of the server.': 'Dies wurde möglicherweise durch eine vorübergehende Überlastung oder Wartung des Servers ausgelöst.',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': 'Dieses Modul ermöglicht es, Bestandsartikel zwischen Beständen verschiedener Anlagen Anzufragen und zu liefern.',
'This module allows the editing of page content using a web browser.': 'Dieses Modul ermöglicht das Editieren der Webseite unter Verwendung des Browsers.',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': 'Mit diesem Modul können Szenarien sowohl für Übungen als auch für Ereignisse planen. Sie können geeignete Ressourcen (Menschen, Anlagen & Einrichtungen) zuordnen, damit diese leicht mobilisiert werden können.',
'This page shows you logs of past syncs. Click on the link below to go to this page.': 'Diese Seite zeigt ihnen die Protokolle von vorherigen Syncs. Klicken Sie auf den Link unten um auf diese Seite zu gelangen.',
'This person already belongs to another case group': 'Diese Person gehört bereits zu einer anderen Fallgruppe',
'This person already belongs to this group': 'Diese Person gehört bereits zu dieser Gruppe',
'This process can take a couple of minutes': 'Dieser Vorgang kann einige Minuten dauern',
'This screen allows you to upload a collection of photos to the server.': 'Diese Seite ermöglicht ihnen eine Sammlung von Fotos zum Server hochzuladen.',
'This setting can only be controlled by the Administrator.': 'Diese Einstellung kann nur vom Systemverwalter vorgenommen werden.',
'This shipment has already been received.': 'Diese Lieferung wurde bereits empfangen.',
'This shipment has already been sent.': 'Diese Lieferung wurde bereits abgeschickt.',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'Diese Lieferung wurde noch nicht empfangen - sie ist nicht abgebrochen worden weil sie immer noch editiert werden kann.',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'Diese Sendung wurde nicht gesendet-es ist nicht abgebrochen worden weil können immer noch bearbeitet werden.',
'This shipment will be confirmed as received.': 'Der Empfang dieser Lieferung wurde bestätigt.',
'This status applies for new cases unless specified otherwise': 'Dieser Status gilt für neue Fälle wenn nicht anders angegeben',
'This unit is for transitory accommodation upon arrival.': 'Diese Einheit dient zur kurzfristigen Unterbringung bei Ankunft.',
'Thunderstorm': 'Gewitter',
'Thursday': 'Donnerstag',
'Ticket Details': 'Details zum Ticket',
'Ticket ID': 'Ticket-ID',
'Ticket added': 'Ticket hinzugefügt',
'Ticket deleted': 'Ticket gelöscht',
'Ticket updated': 'Ticket aktualisiert',
'Ticketing Module': 'Ticket Modul',
'Tile Mapping Service': 'TileMapService',
'Tilt-up concrete': 'Konkrete Neigung',
'Timber frame': 'Holzrahmen',
'Time Out': 'Ausgangszeit',
'Time Question': 'Zeit Frage',
'Time of Day': 'Uhrzeit',
'Timeline Report': 'Bericht zum Zeitplan',
'Timeline': 'Zeitplan',
'Title to show for the Web Map Service panel in the Tools panel.': 'Titel, mit der die WebMapService-Leiste in der Werkzeugleiste angezeigt wird',
'Title': 'Titel',
'To Address': 'Empfängeradresse',
'To Location': 'Zum Standort',
'To Organization': 'Zur Organisation',
'To Person': 'Zu Händen von',
'To begin the sync process, click the button on the right =>': 'Zum Starten der Synchronisierung, klicken Sie auf die Schaltfläche auf der rechten Seite =>',
'To begin the sync process, click this button =>': 'Um den Synchronisierungsprozess zu starten, klicken Sie diese Schaltfläche =>',
'To create a personal map configuration, click': 'Um eine persönliche Kartenkonfiguration zu erstellen, klicken Sie auf',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': 'Zum Bearbeiten von OpenStreetMap, müssen Sie die Einstellungen in models/000_config. py anpassen',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': "Um die Zeitachse zu verschieben nutzen Sie bitte das Mausrad, die Pfeiltasten oder verschieben Sie sie per Drag'n Drop",
'To search by job title, enter any portion of the title. You may use % as wildcard.': 'Um nach einer Jobbezeichnung zu suchen, geben sie einen beliebigen Teil des Namens ein. Sie können % als Wildcard verwenden.',
'To variable': 'zu variieren',
'To': 'Bis',
'Tools': 'Arbeitsmittel',
'Tornado': 'Wirbelsturm',
'Total # of Target Beneficiaries': 'Gesamtzahl der Nutznießer',
'Total # of households of site visited': 'Gesamtzahl der Haushalte des besuchten Geländes',
'Total Beds': 'Betten insgesamt',
'Total Beneficiaries': 'Gesamtzahl Nutznießer',
'Total Budget': 'Gesamtbudget',
'Total Capacity (Night)': 'Gesamtkapazität (Nacht)',
'Total Capacity': 'Gesamtkapazität',
'Total Cost per Megabyte': 'Gesamtkosten pro Megabyte',
'Total Cost per Minute': 'Gesamtkosten pro Minute',
'Total Cost': 'Gesamtkosten',
'Total Monthly Cost': 'Gesamte monatliche Kosten',
'Total Monthly': 'Insgesamt Monatlich',
'Total One-time Costs': 'Summe einmaliger Kosten',
'Total Persons': 'Gesamtzahl an Personen',
'Total Quantity': 'Gesamtmenge',
'Total Records: %(numrows)s': 'Gesamtzahl an Datensätzen %(numrows)s',
'Total Recurring Costs': 'Gesamte wiederkehrende Kosten',
'Total Unit Cost': 'Gesamtstückkosten',
'Total Units': 'Summe Einheiten',
'Total User Accounts': 'Gesamtzahl Benutzerkonten',
'Total Value': 'Gesamtwert',
'Total Volume (m3)': 'Gesamtvolumen (m3)',
'Total Weight (kg)': 'Gesamtgewicht (kg)',
'Total gross floor area (square meters)': 'Gesamtgröße der Fläche (Quadratmeter)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'Gesamtzahl der Betten in diesem Krankenhaus. Automatisch aktualisiert über die täglichen Berichte.',
'Total number of houses in the area': 'Gesamtzahl der Häuser im Gebiet',
'Total number of schools in affected area': 'Gesamtzahl der Schulen im betroffenen Gebiet',
'Total population of site visited': 'Gesamtzahl der Bevölkerung des besuchten Gebietes',
'Total': 'Summe',
'Tourist Group': 'Touristengruppe',
'Town / Municipality': 'Stadt / Gemeinde',
'Town': 'Stadt',
'Traces internally displaced people (IDPs) and their needs': 'Verfolgung von Binnenflüchtlingen (IDP) und deren Bedürfnisse',
'Tracing': 'Verfolgung',
'Track Details': 'Details zum Track',
'Track deleted': 'Track gelöscht',
'Track updated': 'Track aktualisiert',
'Track uploaded': 'Track hochgeladen',
'Track with this Person?': 'Diese Person verfolgen?',
'Track': 'Track',
'Tracking of Projects, Activities and Tasks': 'Verfolgen von Projekten, Aktivitäten und Aufgaben',
'Tracking of basic information on the location, facilities and size of the Shelters': 'Verfolgung von Basisinformationen über Ort, Einrichtungen und Größe von Unterkünften',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': 'Verfolgung der Position, Verteilung, Kapazität und Aufteilung der Opfer auf Unterkünfte',
'Tracks': 'Verfolgungen',
'Traffic Report': 'Datenverkehrsbericht',
'Training Course Catalog': 'Schulungskurs-Katalog',
'Training Details': 'Details zur Schulung',
'Training Event': 'Schulungskurs',
'Training Events': 'Schulungskurse',
'Training Facility': 'Schulungseinrichtung',
'Training Hours (Month)': 'Trainingsstunden (Monat)',
'Training Hours (Year)': 'Trainingsstunden (Jahr)',
'Training Report': 'Schulungsbericht',
'Training added': 'Schulung hinzugefügt',
'Training deleted': 'Schulung gelöscht',
'Training updated': 'Schulung aktualisiert',
'Training': 'Schulung',
'Trainings': 'Weiterbildungen / Übungen',
'Transfer Completed': 'Transfer Erledigt',
'Transfer to': 'Transfer nach',
'Transferable': 'Transferierbar',
'Transferred': 'Transferiert',
'Transit Status': 'Transitstatus',
'Transition Effect': 'Übergangseffekt',
'Transitory Accommodation': 'Durchgangsunterkunft',
'Translation': 'Übersetzung',
'Transportation assistance, Rank': 'Transport-Unterstützung, Rank',
'Trauma Center': 'Trauma Zentrum',
'Travel Cost': 'Reisekosten',
'Tropical Storm': 'Tropischer Sturm',
'Tropo Messaging Token': 'Tropo Nachrichten Token',
'Tropo Settings': 'Tropo Einstellungen',
'Tropo settings updated': 'Tropo Einstellungen aktualisiert',
'Truck': 'Lastwagen',
'Try checking the URL for errors, maybe it was mistyped.': 'Untersuchen Sie die URL auf Fehler, vielleicht war sie falsch geschrieben.',
'Try hitting refresh/reload button or trying the URL from the address bar again.': "Versuchen Sie den Knopf 'Aktualisieren/Erneut Laden' oder versuchen Sie nochmals die URL aus der Adresszeile.",
'Try refreshing the page or hitting the back button on your browser.': "Versuchen Sie die Seite zu aktualisieren oder den 'Zurück'-Knopf im Browser zu nutzen.",
'Tuesday': 'Dienstag',
'Tugboat Capacity': 'Schleppkahnkapazitäten',
'Tweeted by': 'Getwittert von',
'Tweeted on': 'Getwittert auf',
'Twilio Channels': 'Twilio Kanäle',
'Twitter Channels': 'Twitter Kanäle',
'Twitter ID or #hashtag': 'Twitter-ID oder #hashtag',
'Twitter InBox': 'Twitter Eingang',
'Twitter Search Results': 'Twitter Suchergebnisse',
'Twitter Search': 'Twitter Suche',
'Twitter Settings': 'Einstellungen für Twitter',
'Type of Construction': 'Bautyp',
'Type of water source before the disaster': 'Typ der Wasserquelle vor der Katastrophe',
'Type': 'Typ',
'Types': 'Typen',
'UN': 'UN',
'Un-Repairable': 'Nicht zu reparieren',
'Unable to parse CSV file!': 'CSV Datei kann nicht analysiert werden!',
'Understaffed': 'Unterbesetzt',
'Unidentified': 'Nicht identifiziert',
'Unit Cost': 'Kosten für Einheit',
'Unit Value': 'Einheitswert',
'Unit added': 'Einheit hinzugefügt',
'Unit deleted': 'Einheit gelöscht',
'Unit of Measure': 'Maßeinheit',
'Unit updated': 'Einheit aktualisiert',
'Unit': 'Einheit',
'Units': 'Einheiten',
'Unknown Peer': 'Unbekannter Peer',
'Unknown type of facility': 'Unbekannter Einrichtungstyp',
'Unknown': 'unbekannt',
'Unmark as duplicate': 'Duplikatsmarkierung entfernen',
'Unreinforced masonry': 'Nicht verstärktes Mauerwerk',
'Unresolved Conflicts': 'Ungelöste Konflikte',
'Unsafe': 'Unsicher',
'Unselect to disable the modem': 'Abwählen um das Modem zu deaktivieren',
'Unsent': 'Nicht gesendet',
'Unspecified': 'Unspezifiziert',
'Unsupported data format!': 'Nicht unterstütztes Datenformat!',
'Unsupported method!': 'Nicht unterstützte Methode!',
'Update Activity Report': 'Aktivitätsbericht aktualisieren',
'Update Allowance Status': 'Taschengeld Status Aktualisierung',
'Update Cholera Treatment Capability Information': 'Aktualisieren der Informationen zu den Cholera Behandlungsmöglichkeiten',
'Update Request': 'Anfrage Aktualisieren',
'Update Service Profile': 'Leistungsprofil aktualisieren',
'Update Status': 'Status aktualisieren',
'Update Task Status': 'Status der Aufgabe aktualisieren',
'Update Unit': 'Enheit Aktualisieren',
'Update if Master': 'Aktualisiere wenn Master',
'Update if Newer': 'Aktualisiere falls neuer',
'Update now': 'Jetzt aktualisieren',
'Update your current ordered list': 'Aktualisieren Sie ihre aktuell bestellte Liste',
'Update': 'Aktualisieren',
'Updated By': 'Aktualisiert von',
'Upload Image': 'Bild hochladen',
'Upload Photos': 'Fotos hochladen',
'Upload Spreadsheet': 'Tabellendokument hochladen',
'Upload Track': 'Verfolgung hochladen',
'Upload a Spreadsheet': 'Ein Tabellendokument hochladen',
'Upload a file formatted according to the Template.': 'Laden Sie eine entsprechend der Vorlage formatierte Datei hoch.',
'Upload an Assessment Template import file': 'Upload einer Beurteilungsvorlage',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': 'Grafikdatei hochladen (bmp, gif, jpeg-oder png), max. 300x300 Pixel!',
'Upload an image file here.': 'Laden Sie hier die Grafikdatei hoch.',
'Upload an image, such as a photo': 'Laden Sie eine Grafikdatei hoch, wie beispielsweise ein Foto',
'Upload different Image': 'Anderes Bild hochladen',
'Upload translated files': 'Übersetzte Dateien hochladen',
'Uploaded Image': 'Hochgeladenes Bild',
'Uploaded on': 'Hochgeladen am',
'Upon Request': 'Eingehende Anfrage',
'Urban Fire': 'Siedlungsfeuer',
'Urban area': 'Stadtgebiet / Ballungsgebiet',
'Urgent': 'Dringend',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Verwende (...)&(...) für UND, (...)|(...) für ODER und ~(...) für NICHT um komplexere Abfragen zu erstellen.',
'Use Geocoder for address lookups?': "Verwendung von 'Geocoder' für Adressenüberprüfung?",
'Use decimal': 'Nutze Dezimalgrad',
'Use default': 'Standardwert verwenden',
'Use deg, min, sec': 'Nutze Grad, Minuten, Sekunden',
'Use for Login?': 'Für Login verwenden?',
'Use these links to download data that is currently in the database.': 'Verwenden Sie diese Links um Daten, die derzeit in der Datenbank liegen herunterzuladen.',
'Used by IRS & Assess': 'Verwendet vom IRS & Assess',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'Verwendet in onHover Tooltip & Cluster Popups um verschiedene Typen zu unterscheiden.',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'Verwendet um onHover Tooltip zu erstellen & das 1. Feld wird ebenfalls im Cluster Dialogfeld benutzt um zwischen verschiedenen Datensätzen zu unterscheiden.',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Länge für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'Wird zur Überprüfung genutzt, dass die eingegebene Geographische Breite für den Ort sinnvoll ist. Kann verwendet werden um Resources zu filtern die Standorte haben.',
'Used to import data from spreadsheets into the database': 'Dient dazu Daten aus Tabellendokumenten in die Datenbank zu übertragen.',
'Used within Inventory Management, Request Management and Asset Management': 'Verwendung beim der Bestands-, Anfrage- und Anlagenverwaltung',
'User Account has been Disabled': 'Das Benutzerkonto wurde deaktiviert',
'User Account': 'Benutzerkonto',
'User Details': 'Details zum Benutzer',
'User Management': 'Benutzerverwaltung',
'User Profile': 'Benutzerprofil',
'User Requests': 'Benutzeranfragen',
'User Role Required': 'Erforderliche Benutzerrolle',
'User Statistics': 'Anwenderstatistik',
'User Updated': 'Benutzer aktualisiert',
'User added': 'Benutzer hinzugefügt',
'User already has this role': 'Der Benutzer hat bereits diese Rolle',
'User deleted': 'Benutzer gelöscht',
'User role required to register events of this type': 'Erforderliche Benutzerrolle um Ereignisse dieses Typs registrieren zu dürfen',
'User updated': 'Benutzer aktualisiert',
'User': 'Benutzer',
'Username': 'Benutzername',
'Users removed': 'Benutzer entfernt',
'Users': 'Benutzer',
'Uses the REST Query Format defined in': 'Verwendet das REST-Abfrageformat das definiert ist in',
'Utilities': 'Dienstprogramme',
'Utility, telecommunication, other non-transport infrastructure': 'Dienstprogramm, Telekommunikation, andere nicht-Verkehrsinfrastruktur',
'Utilization Report': 'Verwendungsbericht',
'Valid From': 'Gültig ab',
'Valid Until': 'Gültig bis',
'Valid until': 'Gültig bis',
'Value per Pack': 'Wert pro Packet',
'Value': 'Wert',
'Various Reporting functionalities': 'Verschiedene Funktionalitäten für das Berichtswesen',
'Vehicle Categories': 'Fahrzeugkategorien',
'Vehicle Crime': 'Fahrzeug Kriminalität',
'Vehicle Height (m)': 'Höhe des Fahrzeugs (m)',
'Vehicle Management': 'Fahrzeugmanagement',
'Vehicle Plate Number': 'Fahrzeugnummernschild',
'Vehicle Type': 'Fahrzeugtyp',
'Vehicle Types': 'Fahrzeugtypen',
'Vehicle Weight (kg)': 'Gewicht des Fahrzeugs (kg)',
'Vehicle': 'Fahrzeug',
'Vehicles are assets with some extra details.': 'Fahrzeuge sind Anlagen, die mit einigen speziellen Funktionen ausgestattet sind',
'Vehicles': 'Fahrzeuge',
'Venue': 'Örtlichkeit',
'Verification Status': 'Prüfstatus',
'Verified?': 'Geprüft?',
'Verify password': 'Passwortprüfung',
'Very Good': 'Sehr gut',
'Very High': 'Sehr hoch',
'Vessel Max Length': 'Wasserfahrzeug maximale Länge',
'View Alerts received using either Email or SMS': 'Empfangene Warnungen über E-Mail oder SMS',
'View All': 'Alles anzeigen',
'View Error Tickets': 'Fehler Tickets ansehen',
'View Fullscreen Map': 'Vollbild Karte anzeigen',
'View Image': 'Bild anzeigen',
'View Items': 'Artikel anzeigen',
'View On Map': 'Auf Karte anzeigen',
'View Outbox': 'Postausgang anzeigen',
'View Picture': 'Bild anzeigen',
'View Settings': 'Einstellungen anzeigen',
'View Test Result Reports': 'Zeige Berichte der Testergebnisse',
'View Tickets': 'Tickets anzeigen',
'View Translation Percentage': 'Zeige Übersetzungsstatistik',
'View and/or update their details': 'Anzeige und/oder Aktualisieren Ihrer Detailinformationen',
'View as Pages': 'Anzeige als Seiten',
'View or update the status of a hospital.': 'Anzeige oder Aktualisieren des Status eines Krankenhauses.',
'View pending requests and pledge support.': 'Anstehende Anforderungen anzeigen und Zusageunterstützung.',
'View the hospitals on a map.': 'Krankenhäuser auf einer Karte anzeigen',
'View/Edit the Database directly': 'Die Datenbank direkt anzeigen/bearbeiten',
'Village / Suburb': 'Ortschaft / Vorort',
'Village Leader': 'Dorfvorsteher',
'Village': 'Dorf',
'Visible?': 'Sichtbar?',
'Visual Recognition': 'Visuelle Erkennung',
'Volcanic Ash Cloud': 'Wolke vulkanischer Asche',
'Volcanic Event': 'Vulkanischen Ereignis',
'Volume (m3)': 'Volumen (m3)',
'Volunteer Availability': 'Verfügbarkeit von Freiwilligen',
'Volunteer Contact': 'Kontaktdaten des Freiwilligen',
'Volunteer Details': 'Details zu Freiwilligen',
'Volunteer Information': 'Freiwilligeninformation',
'Volunteer Management': 'Management von Freiwilligen',
'Volunteer Project': 'Freiwilligen Projekt',
'Volunteer Record': 'Freiwilligen Datensatz',
'Volunteer Report': 'Freiwilligen Bericht',
'Volunteer Request': 'Freiwilligen Anforderung',
'Volunteer Role Catalog': 'Rollenkatalog für Freiwillige',
'Volunteer Role': 'Rolle des Freiwilligen',
'Volunteer added': 'Freiwilliger hinzugefügt',
'Volunteer availability added': 'Freiwilligen Verfügbarkeit hinzugefügt',
'Volunteer availability deleted': 'Freiwilligen Verfügbarkeit geöscht',
'Volunteer availability updated': 'Freiwilligen Verfügbarkeit aktualisiert',
'Volunteer deleted': 'Freiwilliger gelöscht',
'Volunteer details updated': 'Details zu Freiwilligen aktualisiert',
'Volunteer': 'Freiwilliger',
'Volunteers were notified!': 'Freiwillige wurden unterrichtet!',
'Volunteers': 'Freiwillige',
'Vote': 'Abstimmung',
'Votes': 'Abstimmungen',
'WASH': 'WASH',
'WB': 'Frachtbriefnr.',
'Waiting': 'Warten',
'Walking Only': 'Nur laufen',
'Wall or other structural damage': 'Wand oder andere Gebäudeschäden',
'Warehouse Details': 'Details zu Warenlager',
'Warehouse Stock Report': 'Bericht zum Warenlagerbestand',
'Warehouse Stock': 'Lagerbestand',
'Warehouse Type': 'Warenlagertyp',
'Warehouse Types': 'Warenlagertypen',
'Warehouse added': 'Warenlager hinzugefügt',
'Warehouse deleted': 'Warenlager gelöscht',
'Warehouse updated': 'Warenlager aktualisiert',
'Warehouse': 'Warenlager',
'Warehouses': 'Warenlager',
'Warehousing Storage Capacity': 'Warenlager Ablagekapazität',
'Water Sanitation Hygiene': 'Wasser Abwasserentsorgung Hygiene',
'Water collection': 'Wassersammlung',
'Water gallon': 'Wasser Gallonen',
'Water storage containers in households': 'Wasser-Behälter in Haushalten',
'Water supply': 'Wasserversorgung',
'Waybill Number': 'Frachtbriefnummer',
'Web Feature Service': 'WebFeatureService',
'Web Map Service Browser Name': 'WebMapService Browser Name',
'Web Map Service Browser URL': 'WebMapService Browser URL',
'Web Map Service': 'WebMapService',
'Website': 'Webseite',
'Wednesday': 'Mittwoch',
'Weight (kg)': 'Gewicht (kg)',
'Weight': 'Gewicht',
'Welcome to the Sahana Portal at': 'Willkommen beim Sahana Portal',
'Well-Known Text': 'WellKnownText (OGC-WKT)',
'What the Items will be used for': 'Beabsichtigte Verwendung der Artikel',
'Wheat': 'Weizen',
'When reports were entered': 'Wann die Berichte eingegeben wurden',
'Whiskers': 'Barthaare',
'Who is doing what and where': 'Wer macht was und wo',
'Who usually collects water for the family?': 'Wer sammelt normalerweise Wasser für die Familie?',
'Width (m)': 'Breite (m)',
'Width': 'Breite',
'Wild Fire': 'Wildfeuer',
'Wind Chill': 'Kälte vom Wind',
'Window frame': 'Fensterrahmen',
'Winter Storm': 'Wintersturm',
'Women of Child Bearing Age': 'Frauen im gebärfähigen Alter',
'Women participating in coping activities': 'Frauen die sich an den Hilfsaktivitäten beteiligen',
'Women who are Pregnant or in Labour': 'Frauen die schwanger sind oder in den Wehen',
'Womens Focus Groups': 'Focus Gruppen für Frauen',
'Wooden plank': 'Hölzerne Planke',
'Wooden poles': 'Holzmasten',
'Workflow Position': 'Position im Ablauf',
'Working hours end': 'Arbeitszeit Ende',
'Working hours start': 'Arbeitszeit Beginn',
'Working or other to provide money/food': 'Arbeiten oder etwas anderes um Geld/Lebensmittel zur Verfügung zu stellen.',
'X-Ray Done': 'Röntgen erledigt',
'X-Ray Place': 'Röntgen Ort',
'X-Ray': 'Röntgen',
'XYZ Tiles': 'XYZ Tiles',
'YES': 'JA',
'Year built': 'Baujahr',
'Year of Manufacture': 'Herstellungsjahr',
'Year': 'Jahr',
'Years': 'Jahre',
'Yellow': 'Gelb',
'Yes': 'Ja',
'Yes, delete the selected details': 'Ja, die ausgewählten Details löschen',
'You are a recovery team?': 'Sind Sie ein Bergungsteam?',
'You are attempting to delete your own account - are you sure you want to proceed?': 'Sie versuchen Ihr eigenes Konto zu löschen - sind Sie sicher, dass Sie fortfahren möchten?',
'You are currently reported missing!': 'Sie sind derzeit als vermisst gemeldet!',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': 'Sie können die Konfiguration des Synchronisierungsmodules unter Einstellungen anpassen. Diese Konfiguration enthält ihre UUID (unique identification number), Synchronisierungszeitpläne, Beacon-Service, usw. . Klicken sie auf den folgenden Link um zu den Einstellungen für die Synchronisierung zu gelangen.',
'You can click on the map below to select the Lat/Lon fields': 'Sie können auf die untere Karte klicken um Geographische und Geographische Breiten abzugreifen.',
'You can search by name, ID or case number': 'Sie können nach Namen, ID oder Fallnummer recherchieren',
'You can search by name, ID or comments': 'Sie können nach Name, ID oder Kommentaren suchen',
'You can search by name, ID, EasyOpt number and comments': 'Sie können nach Namen, ID, EasyOpt Nummer oder Kommentaren recherchieren',
'You can select the Draw tool': 'Sie können das Zeichen Tool verwenden',
'You can set the modem settings for SMS here.': 'Sie können die Modemeinstellungen für SMS hier festlegen.',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': 'Sie können das Konvertierungprogamm verwenden von GPS-Koordinatenoder Grad/Minuten/Sekunden umzuwandeln.',
'You do not have permission for any facility to make a commitment.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Zusage zu machen.',
'You do not have permission for any facility to make a request.': 'Sie haben keine Berechtigung für irgendeine Einrichtung eine Anfrage zu starten.',
'You do not have permission for any site to add an inventory item.': 'Sie haben keine Berechtigung für irgendein Gelände einen Bestandsartikel hinzuzufügen.',
'You do not have permission for any site to receive a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung anzunehmen.',
'You do not have permission for any site to send a shipment.': 'Sie haben keine Berechtigung für irgendein Gelände eine Lieferung abzusenden.',
'You do not have permission to cancel this received shipment.': 'Sie haben keine Berechtigung diese erhaltene Lieferung zu löschen.',
'You do not have permission to cancel this sent shipment.': 'Sie haben keine Berechtigung diese gesendete Lieferung zu löschen.',
'You do not have permission to make this commitment.': 'Sie haben keine Berechtigung diese Zusage zu machen.',
'You do not have permission to receive this shipment.': 'Sie haben keine Berechtigung diese Lieferung entgegenzunehmen.',
'You do not have permission to send a shipment from this site.': 'Sie haben keine Berechtigung Lieferungen von diesem Gelände zu senden.',
'You do not have permission to send messages': 'Sie habe keine Berechtigung Nachrichten zu versenden',
'You do not have permission to send this shipment.': 'Sie haben keine Berechtigung diese Lieferung zu senden.',
'You have a personal map configuration. To change your personal configuration, click': 'Sie haben eine persönliche Kartenkonfiguration. Um ihre persönliche Konfiguration zu ändern, klicken Sie hier',
'You have found a dead body?': 'Sie haben eine Leiche gefunden?',
'You must be logged in to register volunteers.': 'Sie müssen angemeldet sein, um Freiwillige zu registrieren.',
'You must be logged in to report persons missing or found.': 'Sie müssen angemeldet sein, um fehlende oder gefundene Personen zu melden.',
'You must provide a series id to proceed.': 'Sie müssen eine serien-id vorweisen, um fortzufahren.',
'You should edit Twitter settings in models/000_config.py': 'Sie sollten die Twitter Einstellungen unter models/000_config.py bearbeiten',
'Your current ordered list of solution items is shown below. You can change it by voting again.': 'Ihre aktuelle, geordnete Liste der Lösungselemente wird unten angezeigt. Sie können es durch Abstimmen erneut verändern.',
'Your post was added successfully.': 'Der Eintrag wurde erfolgreich hinzugefügt.',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'Ihr System verfügt über eine eindeutige ID (UUID), die andere Computer nützen können um Sie zu identifizieren. Zum Anzeigen Ihrer UUID, können Sie zur Synchronisierung gehen --> Sync Einstellungen Sie könnem auch andere Einstellungen auf dieser Seite einsehen.',
'Zero Hour': 'Stunde null',
'Zinc roof': 'Zinkdach',
'Zoom Levels': 'Zoomebenen',
'Zoom in': 'Hineinzoomen',
'Zoom to Current Location': 'Auf aktuelles Gebiet/Standort fokussieren',
'Zoom to maximum map extent': 'Auf maximale Kartenausdehung fokussieren',
'Zoom': 'Zoomen',
'active': 'aktiv',
'added': 'hinzugefügt',
'all records': 'Alle Datensätze',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'Ermöglicht ein Budget zu entwickeln, basierend auf Mitarbeiter- und Gerätekosten, einschließlich aller administrativen Gemeinkosten.',
'allows for creation and management of surveys to assess the damage following a natural disaster.': 'Ermöglicht die Erstellung und Verwaltung von Umfragen zur Beurteilung von Schäden nach einer Naturkatastrophe.',
'an individual/team to do in 1-2 days': 'Eine Aufwand von 1-2 Tagen für ein einzelnes Team',
'and': 'und',
'assigned': 'zugewiesen',
'average': 'Durchschnitt',
'black': 'schwarz',
'blue': 'blau',
'brown': 'braun',
'business_damaged': 'Business_beschädigt',
'by': 'durch',
'can be used to extract data from spreadsheets and put them into database tables.': 'Kann verwendet werden um Daten von einer Tabelle zu extrahieren und diese in Datenbanktabellen einzutragen.',
'check all': 'Alles markieren',
'checked-in': 'eingecheckt',
'checked-out': 'ausgecheckt',
'click for more details': 'hier klicken, um mehr Details zu erhalten',
'consider': 'Berücksichtigen',
'curly': 'lockig',
'currently registered': 'derzeitig registriert',
'daily': 'täglich',
'dark': 'dunkel',
'data uploaded': 'hochgeladene Daten',
'database %s select': 'Datenbank%s gewählt',
'database': 'Datenbank',
'deceased': 'Verstorbene',
'delete all checked': 'Alle Ausgewählten löschen',
'deleted': 'gelöscht',
'deposited': 'in Verwahrung',
'design': 'Design',
'diseased': 'erkrankt',
'displaced': 'vertrieben',
'disposed of/destroyed': 'entsorgt/vernichtet',
'divorced': 'geschieden',
'done!': 'fertig!',
'duplicate': 'Dublette',
'eg. gas, electricity, water': 'zum Beispiel Gas, Strom, Wasser',
'enclosed area': 'eingeschlossener Bereich',
'export as csv file': 'Exportieren als CSV-Datei',
'fat': 'fett',
'feedback': 'Rückmeldung',
'female': 'weiblich',
'flag': 'Flagge',
'flags': 'Flaggen',
'fluent': 'fliessend',
'flush latrine with septic tank': 'die provisorische Toilette mit dem fauligen Tank spülen',
'food_sources': 'lebensmittel_quellen',
'forehead': 'Stirn',
'forwarded to RP': 'weitergeleitet an RP',
'forwarded': 'weitergeleitet',
'found': 'gefunden',
'from Twitter': 'aus Twitter',
'green': 'Grün',
'grey': 'grau',
'here': 'hier',
'high': 'hoch',
'hourly': 'stündlich',
'households': 'Haushalte',
'identified': 'identifiziert',
'ignore': 'ignorieren',
'illiterate': 'Analphabet',
'in Deg Min Sec format': 'im Format Grad Minuten Sekunden',
'in housing units': 'in Unterkunftseinheiten',
'in staging area (PX)': 'im Sammelbereich (PX)',
'inactive': 'inaktiv',
'injured': 'verletzt',
'insert new %s': 'neue %en hinzufügen',
'insert new': 'neu einfügen',
'interpreter required': 'Dolmetscher erforderlich',
'inv Home Page': 'inv Homepage',
'invalid request': 'Ungültige Anfrage',
'invalid': 'ungültig',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'ist ein zentrales online Verzeichnis, in dem Informationen zu allen Opfern und Familien der Katastrophe gespeichert werden können, insbesondere identifizierte Verluste, Evakuierte, Flüchtlinge, Heimatlose. Informationen wie Name, Alter, Kontaktnummer, Ausweisnummer, Vertriebenen-Ort und andere Details werden erfasst. Fotos und Fingerabdrücke der Leute können auf das System hochgeladen werden. Personen können zum Zweck der Effizienz und Einfachheit auch in Gruppen zusammengefasst werden',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'ist so konzipiert, dass es aus mehreren Untermodulen zu besteht. Diese arbeiten zusammen, um Organisationen komplexe Funktionalitäten zur Unterstützung von Hilfen und Durchführung von Projekten zur Verfügung zu stellen. Dies beinhaltet ein Aufnahmesystem, ein Warenlager Management System, Produkt-Tracking, Versorgungsketten-Management, Fahrzeugbestand Management, Beschaffungswesen, Finanz-Tracking und andere Bestands- und Resource Management Einsatzmöglichkeiten.',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': 'Überwacht alle eingehenden Tickets, so dass diese entsprechend eingestuft und an die entsprechende Stelle zur Bearbeitung geleitet werden können.',
'latrines': 'Toiletten',
'leave empty to detach account': 'Leerlassen um das Konto zu entfernen/aufzuheben.',
'legend URL': 'URL zur Legende',
'less': 'weniger',
'light': 'lichtquelle',
'literate': 'schriftkundig',
'loading': 'wird geladen',
'login': 'Anmeldung',
'long': 'lang',
'long>12cm': 'lang > 12cm',
'low': 'niedrig',
'male': 'männlich',
'manual': 'manuell',
'married': 'verheiratet',
'medium': 'mittel',
'medium<12cm': 'mittel < 12 cm',
'meters': 'meter',
'missed': 'verpasst',
'missing': 'fehlend',
'module allows the site administrator to configure various options.': 'Modul das dem Seitenadministrator ermöglicht verschiedene Optionen zu konfigurieren.',
'module helps monitoring the status of hospitals.': 'Modul das hilft den Status von Krankenhäusern zu überwachen',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': 'Modul das gemeinschaftlich einen Mechanismus bietet einen GIS-gestützen Überblick über die sich entwickelnde Lage zu erhalten.',
'more': 'mehr',
'n/a': 'nicht zutreffend',
'native': 'Muttersprache',
'negroid': 'Negroid',
'never': 'nie',
'new record inserted': 'Neuen Datensatz eingefügt',
'new': 'neu',
'next 100 rows': 'Nächste 100 Zeilen',
'no': 'nein',
'none': 'nichts',
'not accessible - no cached version available!': 'Nicht verfügbar - keine zwischengespeicherte Version verfügbar!',
'not accessible - using cached version from': 'Nicht verfügbar - benutze zwischengespeicherte Version von',
'not checked-in!': 'nicht eingecheckt!',
'not specified': 'nicht angegeben',
'obsolete': 'obsolet',
'on': 'ein',
'once': 'einmal',
'open defecation': 'Verrichtung der Bedürfnisse im Freien',
'or Drop here': 'oder hier ablegen',
'or drop here': "oder hier per Drag'n Drop ablegen",
'or import from csv file': 'oder aus CSV-Datei importieren',
'other': 'sonstige',
'over one hour': 'über eine Stunde',
'paid': 'bezahlt',
'pending': 'anstehend',
'people': 'Personen',
'per': 'nach',
'piece': 'Stück',
'pit latrine': 'Grubenlatrine',
'pit': 'Grube',
'postponed': 'zurückgestellt',
'preliminary template or draft, not actionable in its current form': 'vorläufige Vorlage oder Entwurf, nicht aussagekräftig in seiner jetzigen Form',
'previous 100 rows': 'Vorherige 100 Zeilen',
'record does not exist': 'Datensatz ist nicht vorhanden',
'record id': 'Datensatz ID',
'red': 'rot',
'refused': 'zurückgewiesen',
'reports successfully imported.': 'Berichte erfolgreich importiert.',
'representation of the Polygon/Line.': 'Darstellung der Fläche/Linie.',
'retired': 'Außer Dienst',
'returned to owner': 'an Eigentümer zurückgegeben',
'river': 'Fluss',
'see comment': 'siehe Kommentar',
'selected': 'ausgewählt',
'separated from family': 'von Familie getrennt',
'separated': 'getrennt',
'shaved': 'rasiert',
'short': 'kurz',
'short<6cm': 'kurz < 6cm',
'sides': 'Seiten',
'sign-up now': 'Jetzt Registrieren',
'simplified/slow': 'vereinfacht/langsam',
'single': 'alleinstehend',
'slim': 'dünn',
'specify': 'genauer beschreiben',
'staff members': 'Mitarbeiter',
'staff': 'Personal',
'state location': 'Beschaffenheit des Standort',
'state': 'Zustand',
'straight': 'gerade',
'suffered financial losses': 'Finanzielle Verluste erlitten',
'table': 'Tabelle',
'tall': 'groß',
'this': 'Dieses',
'to access the system': 'um auf das System zuzugreifen',
'tonsure': 'Tonsur',
'total': 'Summe',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'Tweepy Modul nicht verfügbar in der aktuellen Python Umgebung läuft - das benötigt die Installation einer none-Tropo Twitter Unterstützung!',
'unable to parse csv file': 'CSV Datei kann nicht analysiert werden',
'uncheck all': 'Alles deselektieren',
'unidentified': 'nicht identifiziert',
'unknown': 'unbekannt',
'unspecified': 'unspezifiziert',
'unverified': 'ungeprüft',
'updated': 'aktualisiert',
'updates only': 'nur Aktualisierungen',
'verified': 'verifiziert',
'volunteer': 'Freiwilliger',
'volunteers': 'Freiwillige',
'wavy': 'wellenförmige Lücke',
'weekly': 'wöchentlich',
'white': 'weiß',
'wider area, longer term, usually contain multiple Activities': 'Größerer Bereich, längere Sicht, enthält normalerweise mehrere Aktivitäten',
'widowed': 'verwitwet',
'within human habitat': 'In menschlichen Lebensraum',
'written-only': 'nur schriftlich',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt Modul nicht verfügbar im Rahmen der laufenden Python Umgebung - das muss installiert werden für XLS Ausgabe!',
'yes': 'ja',
}
| 59.666232 | 931 | 0.767347 |
4a2043539a8beda8629c2d1d8f7829b65703f448 | 1,092 | py | Python | work_at_olist/authors/viewsets.py | diego-marcelino/work-at-olist | e0d7de566969c0b8ae555732ab52248e0f7e0481 | [
"MIT"
] | null | null | null | work_at_olist/authors/viewsets.py | diego-marcelino/work-at-olist | e0d7de566969c0b8ae555732ab52248e0f7e0481 | [
"MIT"
] | 16 | 2021-02-08T07:22:24.000Z | 2022-03-30T12:07:05.000Z | work_at_olist/authors/viewsets.py | diego-marcelino/work-at-olist | e0d7de566969c0b8ae555732ab52248e0f7e0481 | [
"MIT"
] | 1 | 2020-08-12T14:30:07.000Z | 2020-08-12T14:30:07.000Z | from django.utils.translation import gettext_lazy as _
from django_filters import rest_framework as filters
from rest_framework.viewsets import ModelViewSet
from .models import Author
from .serializers import AuthorSerializer
class AuthorFilter(filters.FilterSet):
"""Filter for authors."""
name = filters.CharFilter(field_name='name', lookup_expr='icontains',
help_text=_('Filter by any part of author name'))
class Meta:
"""Meta info for author filter."""
model = Author
fields = ('name',)
class AuthorViewSet(ModelViewSet):
"""Author view set.
create:
Create a new author.
retrieve:
Get a specific author information according to its id.
list:
Retrieve a paginated list of authors. Filter by any part of author name.
update:
Update an author information.
partial_update:
Partially update an author information.
destroy:
Delete an author.
"""
serializer_class = AuthorSerializer
queryset = Author.objects.all()
filterset_class = AuthorFilter
| 23.73913 | 79 | 0.690476 |
4a2043a130044ec36817c839ea040210d94c89d5 | 1,058 | py | Python | script/preprocessing/tokenizer.py | henriettekhn/MLinPractice | ad05f0137a407a17e75ffca08706c5250bcd0d49 | [
"MIT"
] | null | null | null | script/preprocessing/tokenizer.py | henriettekhn/MLinPractice | ad05f0137a407a17e75ffca08706c5250bcd0d49 | [
"MIT"
] | null | null | null | script/preprocessing/tokenizer.py | henriettekhn/MLinPractice | ad05f0137a407a17e75ffca08706c5250bcd0d49 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tokenize the tweet into individual words.
Created on Wed Oct 6 13:59:54 2021
@author: lbechberger
"""
from script.preprocessing.preprocessor import Preprocessor
import nltk
class Tokenizer(Preprocessor):
"""Tokenizes the given input column into individual words."""
def __init__(self, input_column, output_column):
"""Initialize the Tokenizer with the given input and output column."""
super().__init__([input_column], output_column)
# don't need to implement _set_variables(), since no variables to set
def _get_values(self, inputs):
"""Tokenize the tweet."""
tokenized = []
for tweet in inputs[0]:
sentences = nltk.sent_tokenize(tweet)
tokenized_tweet = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
tokenized_tweet += words
tokenized.append(str(tokenized_tweet))
return tokenized | 28.594595 | 78 | 0.621928 |
4a2043bba8bfb188f86d2595d6fa097381cf61ec | 794 | py | Python | setup.py | steveWinter/python-fmrest | 2a3243b0b22021d435ae4a239fecc5cc2fc546bb | [
"MIT"
] | null | null | null | setup.py | steveWinter/python-fmrest | 2a3243b0b22021d435ae4a239fecc5cc2fc546bb | [
"MIT"
] | null | null | null | setup.py | steveWinter/python-fmrest | 2a3243b0b22021d435ae4a239fecc5cc2fc546bb | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', 'r', encoding='utf-8') as ld:
long_description = ld.read()
setup(
name='python-fmrest',
version='1.2.1',
python_requires='>=3.6',
author='David Hamann',
author_email='[email protected]',
description='python-fmrest is a wrapper around the FileMaker Data API.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/davidhamann/python-fmrest',
packages=['fmrest'],
include_package_data=True,
install_requires=['requests>=2'],
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
)
)
| 30.538462 | 76 | 0.661209 |
4a2044139ce9399210988dce1bd54b2c48e21aa2 | 3,389 | py | Python | asyncpokepy/ahttp.py | DJJ05/asyncpokepy | 8c2b05a3427bacf2335bf1e379a2dd6d92364b68 | [
"MIT"
] | 1 | 2021-11-08T11:06:29.000Z | 2021-11-08T11:06:29.000Z | asyncpokepy/ahttp.py | DevilJamJar/asyncpokepy | 8c2b05a3427bacf2335bf1e379a2dd6d92364b68 | [
"MIT"
] | null | null | null | asyncpokepy/ahttp.py | DevilJamJar/asyncpokepy | 8c2b05a3427bacf2335bf1e379a2dd6d92364b68 | [
"MIT"
] | 1 | 2021-09-28T18:17:35.000Z | 2021-09-28T18:17:35.000Z | """
The MIT License (MIT)
Copyright (c) 2020 Raj Sharma
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import aiohttp
from asyncpokepy.exceptions import *
class ahttp:
"""HTTP function class
Args:
session (aiohttp.client.ClientSession, optional): Custom client session if one is needed. Defaults to None.
"""
def __init__(self):
self.session=None
self.baseurl = 'https://pokeapi.co/api/v2/'
self.codes = {
301:URLMoved,
302:URLMoved,
401:AuthorisationError,
404:UnknownPokemon,
405:MethodNotAllowed,
408:RequestTimeout,
429:RateLimitation,
500:InternalServerError
}
async def makesession(self):
"""Creates a client session
"""
if not self.session:
self.session = aiohttp.ClientSession()
async def killsession(self):
"""Kills a client session
"""
if self.session:
await self.session.close()
self.session = None
async def pokeget(self, poke: str):
"""Base pokemon get request
Args:
poke (str): The pokéapi endpoint
"""
await self.makesession()
url = self.baseurl + 'pokemon/' + poke
async with self.session.get(url) as cs:
if cs.status == 200:
try:
return await cs.json()
except aiohttp.ContentTypeError:
return await cs.text()
if cs.status in self.codes:
exception = self.codes[cs.status]
raise exception()
else:
raise UnCaughtError(code=cs.status, error=cs.reason)
async def typeget(self, type: str):
"""Base type get request
Args:
type (str): The pokéapi endpoint
"""
await self.makesession()
url = self.baseurl + 'type/' + type
async with self.session.get(url) as cs:
if cs.status == 200:
try:
return await cs.json()
except aiohttp.ContentTypeError:
return await cs.text()
if cs.status in self.codes:
exception = self.codes[cs.status]
raise exception()
else:
raise UnCaughtError(code=cs.status, error=cs.reason)
| 33.89 | 115 | 0.614636 |
4a20447f2063d722bdf287ef1b211bc2d5c59ac3 | 3,624 | py | Python | qcdb/__init__.py | nuwandesilva/qcdb | b47fb2ed550fc4176198ddb1dbea3724d6704d23 | [
"BSD-3-Clause"
] | null | null | null | qcdb/__init__.py | nuwandesilva/qcdb | b47fb2ed550fc4176198ddb1dbea3724d6704d23 | [
"BSD-3-Clause"
] | null | null | null | qcdb/__init__.py | nuwandesilva/qcdb | b47fb2ed550fc4176198ddb1dbea3724d6704d23 | [
"BSD-3-Clause"
] | null | null | null | #
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2007-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module to facilitate quantum chemical computations on chemical
databases. Contains Molecule class and physical constants from psi4 suite.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
#__version__ = '0.4'
__author__ = 'Lori A. Burns'
# Figure out psidatadir: envvar trumps staged/installed
import os
qcdb_module_loc = os.path.dirname(os.path.abspath(__file__))
pymod = os.path.normpath(os.sep.join(['@PYMOD_INSTALL_LIBDIR@', '@CMAKE_INSTALL_LIBDIR@', 'qcdb']))
if pymod.startswith(os.sep + os.sep):
pymod = pymod[1:]
pymod_dir_step = os.sep.join(['..'] * pymod.count(os.sep))
data_dir = os.sep.join([qcdb_module_loc, pymod_dir_step, '@CMAKE_INSTALL_DATADIR@', 'qcdb'])
if 'PSIDATADIR' in os.environ.keys():
data_dir = os.path.expanduser(os.environ['PSIDATADIR'])
elif 'CMAKE_INSTALL_DATADIR' in data_dir:
data_dir = os.sep.join([os.path.abspath(os.path.dirname(__file__)), '..', 'share', 'qcdb'])
data_dir = os.path.abspath(data_dir)
if not os.path.isdir(data_dir):
raise KeyError('Unable to read the data folder - check the PSIDATADIR environment variable'
' Current value of PSIDATADIR is {}'.format(data_dir))
from .metadata import __version__, version_formatter
#from .driver import *
from .driver import energy, properties, hessian, gradient, frequency
from .driver import optking, geometric
from .driver import vpt2
from .driver.cbs_driver import cbs
from .driver.cbs_helpers import *
from .driver.driver_helpers import get_variable, print_variables
from .qcvars import get_variable_details
from .driver.driver_helpers import set_options, get_active_options
from .driver.driver_helpers import set_molecule, activate
from .driver.yaml import yaml_run
#from .header import print_header
## Load Python modules
#import sys
from .molecule import Molecule #, compute_atom_map
#from .dbproc import *
#from .options import *
#from . import moptions
#from .qcformat import *
#from . import cfour
#from . import jajo
#from . import orca
#from .orient import OrientMols
#from .dbwrap import Database, DB4 #DatabaseWrapper, ReactionDatum, Reagent, Reaction
#from .libmintspointgrp import SymmetryOperation, PointGroup
from .libmintsbasisset import BasisSet, basishorde
#from .libmintsmolecule import LibmintsMolecule
#from .basislist import *
#from . import align
from . import vib
#from . import molparse
#
## Load items that are useful to access from an input file
#from .psiutil import *
from .vib import compare_vibinfos
from .testing import *
#from .physconst import *
from .exceptions import *
#from .util import *
#from .driver.endorsed_plugins import *
#from .datastructures import QCAspect
| 34.514286 | 99 | 0.767108 |
4a20449440a810d3962802b75f54f3c6a9141590 | 920 | py | Python | ecommerce/products/models.py | starboi02/e-commerce-CMS | 9307854521a44d4277a0045541c0754201f3c1ff | [
"Apache-2.0"
] | 2 | 2020-04-21T19:49:48.000Z | 2022-03-26T10:24:15.000Z | ecommerce/products/models.py | starboi02/e-commerce-CMS | 9307854521a44d4277a0045541c0754201f3c1ff | [
"Apache-2.0"
] | 8 | 2020-02-02T05:45:39.000Z | 2021-06-10T19:43:24.000Z | ecommerce/products/models.py | starboi02/e-commerce-CMS | 9307854521a44d4277a0045541c0754201f3c1ff | [
"Apache-2.0"
] | 6 | 2020-02-02T13:50:58.000Z | 2021-10-04T17:43:41.000Z | from django.db import models
from django.urls import reverse
# Category Model
# TODO- Add model for sub-category inside the categories
class Category(models.Model):
title = models.CharField(max_length=300)
def __str__(self):
return self.title
#Product Model
class Product(models.Model):
mainimage = models.ImageField(upload_to='products/', blank=True)
name = models.CharField(max_length=300)
slug = models.SlugField()
category = models.ForeignKey(Category, on_delete=models.CASCADE)
preview_text = models.TextField(max_length=200, verbose_name='Preview Text')
detail_text = models.TextField(max_length=1000, verbose_name='Detail Text')
price = models.FloatField()
old = models.FloatField()
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("mainapp:product", kwargs={
'slug': self.slug
}) | 30.666667 | 80 | 0.703261 |
4a2044aea5c663dd5092b7d7dd822749d6d8374c | 13,638 | py | Python | medimg/dcm/mr/ge.py | henrymj/nimsdata | 7009c3316d9cb4255de44bd46258979a18a1e224 | [
"MIT"
] | 2 | 2016-03-26T09:43:55.000Z | 2016-11-15T11:22:06.000Z | medimg/dcm/mr/ge.py | henrymj/nimsdata | 7009c3316d9cb4255de44bd46258979a18a1e224 | [
"MIT"
] | null | null | null | medimg/dcm/mr/ge.py | henrymj/nimsdata | 7009c3316d9cb4255de44bd46258979a18a1e224 | [
"MIT"
] | 1 | 2017-11-18T10:06:57.000Z | 2017-11-18T10:06:57.000Z | # @author: Kevin S. Hahn
"""
nimsdata.medimg.dcm.mr.ge
=========================
load all data from a set of GE Dicoms, almost all information is available via non-private tags.
Cannot be instantiated. This object is merely a container for GE specific processing functions.
GE saves screenshot of the graphical prescription for each scan, the image contains some useful metadata.
therefore, any missing values should remain MISSING, regardless if they are needed in calculations, to
attempt to ensure correctness.
"""
import logging
import dcmstack
import numpy as np
import generic_mr
from ... import nimsdicom
log = logging.getLogger(__name__)
GEMS_TYPE_ORIG = ['ORIGINAL', 'PRIMARY', 'OTHER']
GEMS_TYPE_DERIVED_RFMT = ['DERIVED', 'SECONDARY', 'REFORMATTED', 'AVERAGE']
TAG_RECON_FLAG = (0x0043, 0x107d)
TAG_BVALUE = (0x0043, 0x1039) # CSA_BVALUE = 'Slop_int_6...Slop_int_9'
TAG_BVEC = [(0x0019, 0x10bb), (0x0019, 0x10bc), (0x0019, 0x10bd)] # CSA_BVEC = ['UserData20', 'UserData21', 'UserData22']
TAG_MUX_NUM_BANDS = (0x0019, 0x10bd)
MAX_LOC_DCMS = nimsdicom.MAX_LOC_DCMS
MetaExtractor = nimsdicom.MetaExtractor
NIMSDicomError = nimsdicom.NIMSDicomError
def infer_psd_type(self):
"""
Infer the psd type based on the manufacturer and psd name.
This heuristic is entirely based on parsing the name.
Parameters
----------
self : NIMSDicom instance
uses self.psd_type
Returns
-------
None : NoneType
sets self.psd_type
"""
if not self.psd_name:
self.psd_type = 'unknown'
else:
if 'service' in self.psd_name:
self.psd_type = 'service'
elif self.psd_name == 'sprt':
self.psd_type = 'spiral'
elif self.psd_name == 'sprl_hos':
self.psd_type = 'hoshim'
elif self.psd_name == 'basic':
self.psd_type = 'basic'
elif 'mux' in self.psd_name or 'mb_' in self.psd_name:
self.psd_type = 'muxepi'
elif 'epi' in self.psd_name:
self.psd_type = 'epi'
elif self.psd_name in ['probe-mega', 'gaba_ss_cni', 'gaba_special']:
self.psd_type = 'mrs'
elif self.psd_name.startswith('special_siam') or self.psd_name.startswith('mega_special'):
self.psd_type = 'mrs'
elif self.psd_name == 'asl':
self.psd_type = 'asl'
elif self.psd_name in ['bravo', '3dgrass']:
self.psd_type = 'spgr'
elif 'fgre' in self.psd_name: # also want to catch efgre3d
self.psd_type = 'gre'
elif self.psd_name == 'ssfse':
self.psd_type = 'fse'
elif self.psd_name == 'cube':
self.psd_type = 'cube'
elif self.psd_name.endswith('b1map'): # XXX general enough?
self.psd_type = 'fieldmap' # XXX is this correct "type"?
else:
self.psd_type = 'unknown'
log.debug(self.psd_type)
def parse_one(self):
"""
Composer function, parses all metadata that can be parsed from a single dicom.
Called by NIMSData init, if dicom manufacturer is GE Medical Sytems.
"""
generic_mr.parse_standard_mr_tags(self)
self.psd_name = self.getelem(self._hdr, 'PulseSequenceName', str, '').lower()
self.psd_iname = self.getelem(self._hdr, 'InternalPulseSequenceName')
self.fov_x, self.fov_y = 2 * [self.getelem(self._hdr, 'ReconstructionDiameter', float)]
self.receive_coil_name = self.getelem(self._hdr, 'ReceiveCoilName')
self.mt_offset_hz = self.getelem(self._hdr, 'OffsetFrequency', float)
effective_echo_spacing = self.getelem(self._hdr, 'EffectiveEchoSpacing', float)
self.effective_echo_spacing = effective_echo_spacing / 1e6 if effective_echo_spacing else None
asset_r = self.getelem(self._hdr, 'AssetRFactors', None, [None, None])
if isinstance(asset_r, unicode) and '\\' in asset_r: # GE Signa HDxt stores asset as string '1\1'
asset_r = map(int, asset_r.split('\\')) # reformat to [1, 1] for consistency
elif isinstance(asset_r, float): # asset_r can be single item float
asset_r = [None, None]
self.phase_encode_undersample, self.slice_encode_undersample = asset_r
# some very old Ge systems will output dicoms that don't define Locations in Acquition, or define it in a way
# that is weird. It may incorrectly label the value type as OB, but not be able to translate the value, resulting
# in the MetaExtractor excluding it from the it's output metadata.
self.num_slices = self.getelem(self._hdr, 'LocationsInAcquisition', int)
self.total_num_slices = self.getelem(self._hdr, 'ImagesInAcquisition', int)
self.num_timepoints = self.getelem(self._hdr, 'NumberOfTemporalPositions', int)
# slice check could end up wrong, if both total_num_slices and num_slices are None
# could force num_slices and total_num_slices into different ORs, to prevent matching if both are None
# thus only when they are both defined, AND not equal, can this test pass
if (self.total_num_slices or 1) == (self.num_slices or 0):
self.total_num_slices = (self.num_slices or 1) * (self.num_timepoints or 1)
log.debug('adjusted total_num_slices from %3d to %3d' % (self.num_slices, self.total_num_slices)) # num_slices == 'old' total_num
# some localizer don't have header field to indicate the number of slices
# per acquisition. If the total_number of slices is set, and the num_timepoints is 1
# then the number of slices should be equal to total number of slices
if not self.num_slices and (self.num_timepoints or 1) == 1:
self.num_slices = self.total_num_slices
prescribed_duration = (self.tr or 0) * (self.num_timepoints or 0) * (self.num_averages or 1) # FIXME: only works for fMRI, not anatomical
if prescribed_duration != 0:
self.prescribed_duration = prescribed_duration
self.duration = prescribed_duration
else:
self.prescribed_duration = None
self.duration = None
dwi_dirs = self.getelem(self._hdr, 'UserData24{#DTIDiffusionDir.,Release10.0&Above}', float)
self.dwi_dirs = int(dwi_dirs) if dwi_dirs else None
if self.image_type == GEMS_TYPE_ORIG and (self.dwi_dirs or 0) >= 6:
self.is_dwi = True
self.num_timepoints = 1
if self.image_type == GEMS_TYPE_DERIVED_RFMT:
self.is_non_image = True
infer_psd_type(self)
if self.psd_type=='muxepi':
self.num_bands = self.getelem(self._hdr, TAG_MUX_NUM_BANDS)
generic_mr.adjust_fov_acqmat(self)
generic_mr.infer_scan_type(self)
def parse_all(self):
"""
Parse all metadata that requires all dicoms.
Called by NIMSDicom load_data, if dicom manufacturer is GE Medical System.
"""
if self.total_num_slices < MAX_LOC_DCMS:
slice_norms = [np.cross(np.matrix(d.get('ImageOrientationPatient')[0:3]), np.matrix(d.get('ImageOrientationPatient')[3:6]))[0] for d in self._dcm_list]
norm_diff = [np.abs(np.dot(slice_norms[0], n)).round(2) for n in slice_norms]
self.is_localizer = bool(len(set(norm_diff)) > 1)
if self.is_dwi:
# DTI scans could have 1+ non-DTI volume. num vols will be >= dwi_dirs + 1
self.bvals = np.array([float(self.getelem(d, TAG_BVALUE)[0]) for d in self._dcm_list[0::self.num_slices]])
self.bvecs = np.array([[self.getelem(d, TAG_BVEC[i], float) for i in range(3)] for d in self._dcm_list[0::self.num_slices]]).transpose()
# TODO separatation of concern; identification vs dicom grouping
recon_mode_flag = np.unique([self.getelem(d, TAG_RECON_FLAG, int, 0) for d in self._dcm_list])
if recon_mode_flag == [1] and self.psd_type not in ['fieldmap']:
log.debug('recon_mode: 1, might be multicoil')
vol_counter = 0
ref_position = self.getelem(self._hdr, 'ImagePositionPatient')
for d in self._dcm_list:
if np.allclose(self.getelem(d, 'ImagePositionPatient'), ref_position):
vol_counter += 1
log.debug('found %d volumes; expected %d' % (vol_counter, (self.num_timepoints or 1) * (self.num_echos or 1)))
if vol_counter > (self.num_timepoints or 1) * (self.num_echos or 1):
self.is_multicoil = True
self.num_receivers = (self.total_num_slices / self.num_slices) - 1 # actual #recv = -1 of num volumes
self._dcm_groups = [self._dcm_list[x::self.num_receivers + 1] for x in xrange(0, self.num_receivers + 1)]
log.debug('groups: %3d; %3d coils + 1 combined' % (len(self._dcm_groups), self.num_receivers))
# attempt to calculate trigger times and slice duration, if the first dicom reports trigger time
self.slice_duration = None
if self.total_num_slices >= self.num_slices and self.getelem(self._dcm_list[0], 'TriggerTime', float) is not None:
log.debug('using trigger times to calculate slice order and slice duration')
trigger_times = np.array([self.getelem(d, 'TriggerTime', float) for d in self._dcm_list[0:self.num_slices]])
if self.reverse_slice_order:
trigger_times = trigger_times[::-1]
# if trigger_times is mixed float and None, then dtype = 'O'
if self.num_slices > 2 and len(trigger_times) > 2 and trigger_times.dtype == 'float64':
trigger_times_from_first_slice = trigger_times[0] - trigger_times
self.slice_duration = float(min(abs(trigger_times_from_first_slice[1:]))) / 1000 # msec to sec
if trigger_times_from_first_slice[1] < 0:
self.slice_order = generic_mr.SLICE_ORDER_SEQ_INC if trigger_times[2] > trigger_times[1] else generic_mr.SLICE_ORDER_ALT_INC
else:
self.slice_order = generic_mr.SLICE_ORDER_ALT_DEC if trigger_times[2] > trigger_times[1] else generic_mr.SLICE_ORDER_SEQ_DEC
else:
self.slice_duration = trigger_times[0]
self.slice_order = generic_mr.SLICE_ORDER_SEQ_INC
# identify fastcard by looking at number of cardiac images as timepoints, with 5 dicoms per slice
cardiac_images = self.getelem(self._hdr, 'CardiacNumberOfImages', int, 0)
if self.total_num_slices == (cardiac_images * self.num_slices * 5): # fastcard: 5 value per temporal X spatial location
self.num_timepoints = cardiac_images
self.is_fastcard = True
self.velocity_encode_scale = self.getelem(self._hdr, 'VelocityEncodeScale', float)
self.velocity_encoding = self.getelem(self._hdr, 'VelocityEncoding', int)
log.debug(self.psd_name)
log.debug(self.psd_type)
def fastcard_convert(self):
"""GE fast card conversion."""
log.debug('fast card')
stacks = []
group_id = 0
def _split_list(l, size):
return [l[i:i+size] for i in range(0, len(l), size)]
dcm_groups = _split_list(self._dcm_list, self.total_num_slices / 5)
for group in dcm_groups:
group_id += 1
num_positions = len(set([d.SliceLocation for d in group]))
if num_positions != self.num_slices:
raise NIMSDicomError('volume %s has %s unique positions; expected %s' % (group_id, num_positions, self.num_slices))
stack = dcmstack.DicomStack()
for d in group:
meta = MetaExtractor(d)
stack.add_dcm(d, meta)
nii_wrp = stack.to_nifti_wrapper()
stacks.append(nii_wrp)
try:
nii_wrp = dcmstack.dcmmeta.NiftiWrapper.from_sequence(stacks)
except dcmstack.InvalidStackError as e:
raise NIMSDicomError('cannot reconstruct %s: %s' % (self.filepath, str(e)))
nii = nii_wrp.nii_img
self.data = {'': nii.get_data()}
self.qto_xyz = nii.get_affine()
generic_mr.post_convert(self)
def multicoil_convert(self):
"""GE specific multicoil converesion. requires partial volume and missing slices check."""
log.debug('multicoil recon')
generic_mr.partial_vol_check(self)
stacks = []
group_id = 0
for group in self._dcm_groups:
group_id += 1
log.debug('multicoil - %2s, %s dicom' % (str(group_id), str(len(group))))
num_positions = len(set([d.SliceLocation for d in group]))
if num_positions != self.num_slices:
raise NIMSDicomError('coil %s has %s unique positions; expected %s' % (group_id, num_positions, self.num_slices))
stack = dcmstack.DicomStack()
for dcm in group:
meta = MetaExtractor(dcm)
stack.add_dcm(dcm, meta)
nii_wrp = stack.to_nifti_wrapper()
stacks.append(nii_wrp)
try:
nii_wrp = dcmstack.dcmmeta.NiftiWrapper.from_sequence(stacks)
except dcmstack.InvalidStackError as e:
raise NIMSDicomError('cannot reconstruct %s: %s' % (self.filepath, e)) # XXX FAIL! unexpected for recon to fail
# raise NIMSDicomError('cannot reconstruct %s: %s' % (self.filepath, e), log_level=logging.ERROR)
del self._dcm_groups, self._dcm_list, stacks, stack, dcm
nii = nii_wrp.nii_img
self.data = {'': nii.get_data()}
self.qto_xyz = nii.get_affine()
del nii_wrp, nii
generic_mr.post_convert(self)
def convert(self):
"""
Composer function, determines which convert function to use.
Called by NIMSDicom load_data if dicom manufacturer is GE Medical Systems.
"""
if self.is_non_image:
generic_mr.non_image_handler(self)
elif self.is_localizer:
generic_mr.localizer_convert(self)
elif self.is_multicoil:
multicoil_convert(self)
elif self.is_fastcard:
fastcard_convert(self)
else:
generic_mr.standard_convert(self)
| 45.61204 | 159 | 0.673779 |
4a2044dbb84ac3ca5d4a3fa65ef46bcc3204d88d | 26,719 | py | Python | pennylane/plugins/default_gaussian.py | shashanka300/pennylane | 194ccd00a9a7f7075c37680c970e56bab4808e60 | [
"Apache-2.0"
] | 1 | 2019-02-28T09:16:33.000Z | 2019-02-28T09:16:33.000Z | pennylane/plugins/default_gaussian.py | shashanka300/pennylane | 194ccd00a9a7f7075c37680c970e56bab4808e60 | [
"Apache-2.0"
] | null | null | null | pennylane/plugins/default_gaussian.py | shashanka300/pennylane | 194ccd00a9a7f7075c37680c970e56bab4808e60 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=inconsistent-return-statements
"""
Default Gaussian plugin
=======================
**Module name:** :mod:`pennylane.plugins.default_gaussian`
**Short name:** ``"default.gaussian"``
.. currentmodule:: pennylane.plugins.default_gaussian
The :code:`default.gaussian` plugin is meant to be used as a template for writing PennyLane
device plugins for new CV backends.
It implements the necessary :class:`~pennylane._device.Device` methods as well as all built-in
:mod:`continuous-variable Gaussian operations <pennylane.ops.cv>` and
:mod:`expectations <pennylane.expval.cv>`, and provides a very simple simulation of a
Gaussian-based quantum circuit architecture.
The following is the technical documentation of the implementation of the plugin. You will
not need to read and understand this to use this plugin.
Auxillary functions
-------------------
.. autosummary::
partitions
fock_prob
Gates and operations
--------------------
.. autosummary::
rotation
displacement
squeezing
quadratic_phase
beamsplitter
two_mode_squeezing
controlled_addition
controlled_phase
interferometer
State preparation
-----------------
.. autosummary::
squeezed_cov
vacuum_state
coherent_state
squeezed_state
displaced_squeezed_state
thermal_state
gaussian_state
set_state
Expectations
------------
.. autosummary::
photon_number
homodyne
poly_quad_expectations
fock_expectation
Classes
-------
.. autosummary::
DefaultGaussian
Code details
^^^^^^^^^^^^
"""
# pylint: disable=attribute-defined-outside-init
import logging as log
import numpy as np
from scipy.special import factorial as fac
import pennylane as qml
from pennylane import Device
log.getLogger()
# tolerance for numerical errors
tolerance = 1e-10
#========================================================
# auxillary functions
#========================================================
def partitions(s, include_singles=True):
"""Partitions a sequence into all groupings of pairs and singles of elements.
Args:
s (sequence): the sequence to partition
include_singles (bool): if False, only partitions into pairs
is returned.
Returns:
tuple: returns a nested tuple, containing all partitions of the sequence.
"""
# pylint: disable=too-many-branches
if len(s) == 2:
if include_singles:
yield (s[0],), (s[1],)
yield tuple(s),
else:
# pull off a single item and partition the rest
if include_singles:
if len(s) > 1:
item_partition = (s[0],)
rest = s[1:]
rest_partitions = partitions(rest, include_singles)
for p in rest_partitions:
yield ((item_partition),) + p
else:
yield tuple(s),
# pull off a pair of items and partition the rest
for idx1 in range(1, len(s)):
item_partition = (s[0], s[idx1])
rest = s[1:idx1] + s[idx1+1:]
rest_partitions = partitions(rest, include_singles)
for p in rest_partitions:
yield ((item_partition),) + p
def fock_prob(mu, cov, event, hbar=2.):
r"""Returns the probability of detection of a particular PNR detection event.
For more details, see:
* Kruse, R., Hamilton, C. S., Sansoni, L., Barkhofen, S., Silberhorn, C., & Jex, I.
"A detailed study of Gaussian Boson Sampling." `arXiv:1801.07488. (2018).
<https://arxiv.org/abs/1801.07488>`_
* Hamilton, C. S., Kruse, R., Sansoni, L., Barkhofen, S., Silberhorn, C., & Jex, I.
"Gaussian boson sampling." `Physical review letters, 119(17), 170501. (2017).
<https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.119.170501>`_
Args:
mu (array): length-:math:`2N` means vector
cov (array): :math:`2N\times 2N` covariance matrix
event (array): length-:math:`N` array of non-negative integers representing the
PNR detection event of the multi-mode system.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
float: probability of detecting the event
"""
# number of modes
N = len(mu)//2
I = np.identity(N)
# mean displacement of each mode
alpha = (mu[:N] + 1j*mu[N:])/np.sqrt(2*hbar)
# the expectation values (<a_1>, <a_2>,...,<a_N>, <a^\dagger_1>, ..., <a^\dagger_N>)
beta = np.concatenate([alpha, alpha.conj()])
x = cov[:N, :N]*2/hbar
xp = cov[:N, N:]*2/hbar
p = cov[N:, N:]*2/hbar
# the (Hermitian) matrix elements <a_i^\dagger a_j>
aidaj = (x+p+1j*(xp-xp.T)-2*I)/4
# the (symmetric) matrix elements <a_i a_j>
aiaj = (x-p+1j*(xp+xp.T))/4
# calculate the covariance matrix sigma_Q appearing in the Q function:
# Q(alpha) = exp[-(alpha-beta).sigma_Q^{-1}.(alpha-beta)/2]/|sigma_Q|
Q = np.block([[aidaj, aiaj.conj()], [aiaj, aidaj.conj()]]) + np.identity(2*N)
# inverse Q matrix
Qinv = np.linalg.inv(Q)
# 1/sqrt(|Q|)
sqrt_Qdet = 1/np.sqrt(np.linalg.det(Q).real)
prefactor = np.exp(-beta @ Qinv @ beta.conj()/2)
if np.all(np.array(event) == 0):
# all PNRs detect the vacuum state
return (prefactor*sqrt_Qdet).real/np.prod(fac(event))
# the matrix X_n = [[0, I_n], [I_n, 0]]
O = np.zeros_like(I)
X = np.block([[O, I], [I, O]])
gamma = X @ Qinv.conj() @ beta
# For each mode, repeat the mode number event[i] times
ind = [i for sublist in [[idx]*j for idx, j in enumerate(event)] for i in sublist]
# extend the indices for xp-ordering of the Gaussian state
ind += [i+N for i in ind]
if np.linalg.norm(beta) < tolerance:
# state has no displacement
part = partitions(ind, include_singles=False)
else:
part = partitions(ind, include_singles=True)
# calculate Hamilton's A matrix: A = X.(I-Q^{-1})*
A = X @ (np.identity(2*N)-Qinv).conj()
summation = np.sum([np.prod([gamma[i[0]] if len(i) == 1 else A[i] for i in p]) for p in part])
return (prefactor*sqrt_Qdet*summation).real/np.prod(fac(event))
#========================================================
# parametrized gates
#========================================================
def rotation(phi):
"""Rotation in the phase space.
Args:
phi (float): rotation parameter
Returns:
array: symplectic transformation matrix
"""
return np.array([[np.cos(phi), -np.sin(phi)],
[np.sin(phi), np.cos(phi)]])
def displacement(state, wire, alpha, hbar=2):
"""Displacement in the phase space.
Args:
state (tuple): contains means vector and covariance matrix
wire (int): wire that the displacement acts on
alpha (float): complex displacement
Returns:
tuple: contains the vector of means and covariance matrix
"""
mu = state[0]
mu[wire] += alpha.real*np.sqrt(2*hbar)
mu[wire+len(mu)//2] += alpha.imag*np.sqrt(2*hbar)
return mu, state[1]
def squeezing(r, phi):
"""Squeezing in the phase space.
Args:
r (float): squeezing magnitude
phi (float): rotation parameter
Returns:
array: symplectic transformation matrix
"""
cp = np.cos(phi)
sp = np.sin(phi)
ch = np.cosh(r)
sh = np.sinh(r)
return np.array([[ch-cp*sh, -sp*sh],
[-sp*sh, ch+cp*sh]])
def quadratic_phase(s):
"""Quadratic phase shift.
Args:
s (float): gate parameter
Returns:
array: symplectic transformation matrix
"""
return np.array([[1, 0],
[s, 1]])
def beamsplitter(theta, phi):
r"""Beamsplitter.
Args:
theta (float): transmittivity angle (:math:`t=\cos\theta`)
phi (float): phase angle (:math:`r=e^{i\phi}\sin\theta`)
Returns:
array: symplectic transformation matrix
"""
cp = np.cos(phi)
sp = np.sin(phi)
ct = np.cos(theta)
st = np.sin(theta)
S = np.array([[ct, -cp*st, 0, -st*sp],
[cp*st, ct, -st*sp, 0],
[0, st*sp, ct, -cp*st],
[st*sp, 0, cp*st, ct]])
return S
def two_mode_squeezing(r, phi):
"""Two-mode squeezing.
Args:
r (float): squeezing magnitude
phi (float): rotation parameter
Returns:
array: symplectic transformation matrix
"""
cp = np.cos(phi)
sp = np.sin(phi)
ch = np.cosh(r)
sh = np.sinh(r)
S = np.array([[ch, cp*sh, 0, sp*sh],
[cp*sh, ch, sp*sh, 0],
[0, sp*sh, ch, -cp*sh],
[sp*sh, 0, -cp*sh, ch]])
return S
def controlled_addition(s):
"""CX gate.
Args:
s (float): gate parameter
Returns:
array: symplectic transformation matrix
"""
S = np.array([[1, 0, 0, 0],
[s, 1, 0, 0],
[0, 0, 1, -s],
[0, 0, 0, 1]])
return S
def controlled_phase(s):
"""CZ gate.
Args:
s (float): gate parameter
Returns:
array: symplectic transformation matrix
"""
S = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, s, 1, 0],
[s, 0, 0, 1]])
return S
def interferometer(U):
"""Interferometer
Args:
U (array): unitary matrix
Returns:
array: symplectic transformation matrix
"""
N = 2*len(U)
X = U.real
Y = U.imag
rows = np.arange(N).reshape(2, -1).T.flatten()
S = np.vstack([np.hstack([X, -Y]),
np.hstack([Y, X])])[:, rows][rows]
return S
#========================================================
# Arbitrary states and operators
#========================================================
def squeezed_cov(r, phi, hbar=2):
r"""Returns the squeezed covariance matrix of a squeezed state.
Args:
r (float): the squeezing magnitude
p (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed state
"""
cov = np.array([[np.exp(-2*r), 0],
[0, np.exp(2*r)]]) * hbar/2
R = rotation(phi/2)
return R @ cov @ R.T
def vacuum_state(wires, hbar=2.):
r"""Returns the vacuum state.
Args:
basis (str): Returns the vector of means and the covariance matrix
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the vacuum state
"""
means = np.zeros((2*wires))
cov = np.identity(2*wires) * hbar/2
state = [means, cov]
return state
def coherent_state(a, phi=0, hbar=2.):
r"""Returns a coherent state.
Args:
a (complex) : the displacement
phi (float): the phase
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the coherent state
"""
alpha = a*np.exp(1j*phi)
means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)
cov = np.identity(2) * hbar/2
state = [means, cov]
return state
def squeezed_state(r, phi, hbar=2.):
r"""Returns a squeezed state.
Args:
r (float): the squeezing magnitude
phi (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed state
"""
means = np.zeros((2))
state = [means, squeezed_cov(r, phi, hbar)]
return state
def displaced_squeezed_state(a, phi_a, r, phi_r, hbar=2.):
r"""Returns a squeezed coherent state
Args:
a (real): the displacement magnitude
phi_a (real): the displacement phase
r (float): the squeezing magnitude
phi_r (float): the squeezing phase :math:`\phi_r`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the squeezed coherent state
"""
alpha = a * np.exp(1j*phi_a)
means = np.array([alpha.real, alpha.imag]) * np.sqrt(2*hbar)
state = [means, squeezed_cov(r, phi_r, hbar)]
return state
def thermal_state(nbar, hbar=2.):
r"""Returns a thermal state.
Args:
nbar (float): the mean photon number
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the thermal state
"""
means = np.zeros([2])
state = [means, (2*nbar+1)*np.identity(2)*hbar/2]
return state
def gaussian_state(mu, cov, hbar=2.):
r"""Returns a Gaussian state.
This is simply a bare wrapper function,
since the means vector and covariance matrix
can be passed via the parameters unchanged.
Note that both the means vector and covariance
matrix should be in :math:`(\x_1,\dots, \x_N, \p_1, \dots, \p_N)`
ordering.
Args:
mu (array): vector means. Must be length-:math:`2N`,
where N is the number of modes
cov (array): covariance matrix. Must be dimension :math:`2N\times 2N`,
where N is the number of modes
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
array: the thermal state
"""
# pylint: disable=unused-argument
return mu, cov
def set_state(state, wire, mu, cov):
r"""Inserts a single mode Gaussian into the
state representation of the complete system.
Args:
state (tuple): contains means vector
and covariance matrix of existing state
wire (int): wire corresponding to the new Gaussian state
mu (array): vector of means to insert
cov (array): covariance matrix to insert
Returns:
tuple: contains the vector of means and covariance matrix.
"""
mu0 = state[0]
cov0 = state[1]
N = len(mu0)//2
# insert the new state into the means vector
mu0[[wire, wire+N]] = mu
# insert the new state into the covariance matrix
ind = np.concatenate([np.array([wire]), np.array([wire])+N])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
cov0[rows, cols] = cov
return mu0, cov0
#========================================================
# expectations
#========================================================
def photon_number(mu, cov, wires, params, hbar=2.):
r"""Calculates the mean photon number for a given one-mode state.
Args:
mu (array): length-2 vector of means
cov (array): :math:`2\times 2` covariance matrix
wires (Sequence[int]): wires to calculate the expectation for
params (None): no parameters are used for this expectation value
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: contains the photon number expectation and variance
"""
# pylint: disable=unused-argument
ex = (np.trace(cov) + mu.T @ mu)/(2*hbar) - 1/2
var = (np.trace(cov @ cov) + 2*mu.T @ cov @ mu)/(2*hbar**2) - 1/4
return ex, var
def homodyne(phi=None):
"""Function factory that returns the Homodyne expectation of a one mode state.
Args:
phi (float): the default phase space axis to perform the Homodyne measurement
Returns:
function: A function that accepts a single mode means vector, covariance matrix,
and phase space angle phi, and returns the quadrature expectation
value and variance.
"""
if phi is not None:
def _homodyne(mu, cov, wires, params, hbar=2.):
"""Arbitrary angle homodyne expectation."""
# pylint: disable=unused-argument
rot = rotation(phi)
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
return muphi[0], covphi[0, 0]
return _homodyne
def _homodyne(mu, cov, wires, params, hbar=2.):
"""Arbitrary angle homodyne expectation."""
# pylint: disable=unused-argument
rot = rotation(params[0])
muphi = rot.T @ mu
covphi = rot.T @ cov @ rot
return muphi[0], covphi[0, 0]
return _homodyne
def poly_quad_expectations(mu, cov, wires, params, hbar=2.):
r"""Calculates the expectation and variance for an arbitrary
polynomial of quadrature operators.
Args:
mu (array): length-2 vector of means
cov (array): :math:`2\times 2` covariance matrix
wires (Sequence[int]): wires to calculate the expectation for
params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear
and quadratic coefficients of the quadrature operators
:math:`(\I, \x_0, \p_0, \x_1, \p_1,\dots)`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the mean and variance of the quadrature-polynomial observable
"""
Q = params[0]
N = len(mu)//2
# HACK, we need access to the Poly instance in order to expand the matrix!
op = qml.expval.PolyXP(Q, wires=wires, do_queue=False)
Q = op.heisenberg_obs(N)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2*A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2*N).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
def fock_expectation(mu, cov, wires, params, hbar=2.):
r"""Calculates the expectation and variance of a Fock state probability.
Args:
mu (array): length-:math:`2N` vector of means
cov (array): :math:`2N\times 2N` covariance matrix
wires (Sequence[int]): wires to calculate the expectation for
params (Sequence[int]): the Fock state to return the expectation value for
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
Returns:
tuple: the Fock state expectation and variance
"""
# pylint: disable=unused-argument
ex = fock_prob(mu, cov, params[0], hbar=hbar)
# var[|n><n|] = E[|n><n|^2] - E[|n><n|]^2 = E[|n><n|] - E[|n><n|]^2
var = ex - ex**2
return ex, var
def identity(*_, **__):
r"""Returns 1.
Returns:
tuple: the Fock state expectation and variance
"""
return 1, 0
#========================================================
# device
#========================================================
class DefaultGaussian(Device):
r"""Default Gaussian device for PennyLane.
Args:
wires (int): the number of modes to initialize the device in
shots (int): How many times should the circuit be evaluated (or sampled) to estimate
the expectation values. 0 yields the exact result.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`
"""
name = 'Default Gaussian PennyLane plugin'
short_name = 'default.gaussian'
pennylane_requires = '0.3.0'
version = '0.3.0'
author = 'Xanadu Inc.'
_operation_map = {
'Beamsplitter': beamsplitter,
'ControlledAddition': controlled_addition,
'ControlledPhase': controlled_phase,
'Displacement': displacement,
'QuadraticPhase': quadratic_phase,
'Rotation': rotation,
'Squeezing': squeezing,
'TwoModeSqueezing': two_mode_squeezing,
'CoherentState': coherent_state,
'DisplacedSqueezedState': displaced_squeezed_state,
'SqueezedState': squeezed_state,
'ThermalState': thermal_state,
'GaussianState': gaussian_state,
'Interferometer': interferometer
}
_expectation_map = {
'MeanPhoton': photon_number,
'X': homodyne(0),
'P': homodyne(np.pi/2),
'Homodyne': homodyne(None),
'PolyXP': poly_quad_expectations,
'NumberState': fock_expectation,
'Identity': identity
}
_circuits = {}
def __init__(self, wires, *, shots=0, hbar=2):
super().__init__(wires, shots)
self.eng = None
self.hbar = hbar
self.reset()
def pre_apply(self):
self.reset()
def apply(self, operation, wires, par):
if operation == 'Displacement':
self._state = displacement(self._state, wires[0], par[0]*np.exp(1j*par[1]))
return # we are done here
if operation == 'GaussianState':
if wires != list(range(self.num_wires)):
raise ValueError("GaussianState means vector or covariance matrix is "
"the incorrect size for the number of subsystems.")
self._state = self._operation_map[operation](*par, hbar=self.hbar)
return # we are done here
if operation == 'Interferometer':
if par[0].shape[0] != len(wires):
raise ValueError("Interferomer unitary matrix applied to the incorrect "
"number of subsystems.")
if len(wires) > 2:
raise ValueError("Only 2-mode interferometers are currently supported.")
if 'State' in operation:
# set the new device state
mu, cov = self._operation_map[operation](*par, hbar=self.hbar)
# state preparations only act on at most 1 subsystem
self._state = set_state(self._state, wires[0], mu, cov)
return # we are done here
# get the symplectic matrix
S = self._operation_map[operation](*par)
# expand the symplectic to act on the proper subsystem
if len(wires) == 1:
S = self.expand_one(S, wires[0])
elif len(wires) == 2:
S = self.expand_two(S, wires)
# apply symplectic matrix to the means vector
means = S @ self._state[0]
# apply symplectic matrix to the covariance matrix
cov = S @ self._state[1] @ S.T
self._state = [means, cov]
def expand_one(self, S, wire):
r"""Expands a one-mode Symplectic matrix S to act on the entire subsystem.
Args:
S (array): :math:`2\times 2` Symplectic matrix
wire (int): the wire S acts on
Returns:
array: the resulting :math:`2N\times 2N` Symplectic matrix
"""
S2 = np.identity(2*self.num_wires)
ind = np.concatenate([np.array([wire]), np.array([wire])+self.num_wires])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
S2[rows, cols] = S.copy()
return S2
def expand_two(self, S, wires):
r"""Expands a two-mode Symplectic matrix S to act on the entire subsystem.
Args:
S (array): :math:`4\times 4` Symplectic matrix
wires (Sequence[int]): the list of two wires S acts on
Returns:
array: the resulting :math:`2N\times 2N` Symplectic matrix
"""
S2 = np.identity(2*self.num_wires)
w = np.array(wires)
S2[w.reshape(-1, 1), w.reshape(1, -1)] = S[:2, :2].copy() #X
S2[(w+self.num_wires).reshape(-1, 1), (w+self.num_wires).reshape(1, -1)] = S[2:, 2:].copy() #P
S2[w.reshape(-1, 1), (w+self.num_wires).reshape(1, -1)] = S[:2, 2:].copy() #XP
S2[(w+self.num_wires).reshape(-1, 1), w.reshape(1, -1)] = S[2:, :2].copy() #PX
return S2
def expval(self, expectation, wires, par):
mu, cov = self.reduced_state(wires)
ev, var = self._expectation_map[expectation](mu, cov, wires, par, hbar=self.hbar)
if self.shots != 0:
# estimate the ev
# use central limit theorem, sample normal distribution once, only ok if n_eval is large
# (see https://en.wikipedia.org/wiki/Berry%E2%80%93Esseen_theorem)
ev = np.random.normal(ev, np.sqrt(var / self.shots))
return ev
def reset(self):
"""Reset the device"""
# init the state vector to |00..0>
self._state = vacuum_state(self.num_wires, self.hbar)
def reduced_state(self, wires):
r""" Returns the vector of means and the covariance matrix of the specified wires.
Args:
wires (int of Sequence[int]): indices of the requested wires
Returns:
tuple (means, cov): means is an array containing the vector of means,
and cov is a square array containing the covariance matrix
"""
if wires == list(range(self.num_wires)):
# reduced state is full state
return self._state
# reduce rho down to specified subsystems
if isinstance(wires, int):
wires = [wires]
if np.any(np.array(wires) > self.num_wires):
raise ValueError("The specified wires cannot "
"be larger than the number of subsystems.")
ind = np.concatenate([np.array(wires), np.array(wires)+self.num_wires])
rows = ind.reshape(-1, 1)
cols = ind.reshape(1, -1)
return self._state[0][ind], self._state[1][rows, cols]
@property
def operations(self):
return set(self._operation_map.keys())
@property
def expectations(self):
return set(self._expectation_map.keys())
| 30.021348 | 102 | 0.580224 |
4a2045f366e4b97ae82b723ce264a0c2350500ec | 2,003 | py | Python | uasyncio.core/test_cancel.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 126 | 2019-07-19T14:42:41.000Z | 2022-03-21T22:22:19.000Z | uasyncio.core/test_cancel.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 38 | 2019-08-28T01:46:31.000Z | 2022-03-17T05:46:51.000Z | uasyncio.core/test_cancel.py | MaxTurchin/pycopy-lib | d7a69fc2a28031e2ca475c29239f715c1809d8cc | [
"PSF-2.0"
] | 55 | 2019-08-02T09:32:33.000Z | 2021-12-22T11:25:51.000Z | # Runs both in Pycopy and CPython.
import time
try:
import uasyncio.core as asyncio
is_uasyncio = True
except ImportError:
import asyncio
is_uasyncio = False
import logging
#logging.basicConfig(level=logging.DEBUG)
#asyncio.set_debug(True)
BUSY_SLEEP = 0
output = []
cancelled = False
# Can be used to make test run faster/slower and/or avoid floating point
# rounding errors.
def delay(n):
return n / 10
def print1(msg):
print(msg)
output.append(msg)
def looper1(iters):
global cancelled
try:
for i in range(iters):
print1("ping1")
if BUSY_SLEEP:
t = time.time()
while time.time() - t < delay(1):
yield from asyncio.sleep(0)
else:
yield from asyncio.sleep(delay(1))
return 10
except asyncio.CancelledError:
print1("cancelled")
cancelled = True
def looper2(iters):
for i in range(iters):
print1("ping2")
if BUSY_SLEEP:
t = time.time()
while time.time() - t < delay(1):
yield from asyncio.sleep(0)
else:
yield from asyncio.sleep(delay(1))
return 10
def run_to():
coro = looper1(10)
task = loop.create_task(coro)
yield from asyncio.sleep(delay(3))
if is_uasyncio:
asyncio.cancel(coro)
else:
task.cancel()
# Need another eventloop iteration for cancellation to be actually
# processed and to see side effects of the cancellation.
yield from asyncio.sleep(0)
assert cancelled
coro = looper2(10)
task = loop.create_task(coro)
yield from asyncio.sleep(delay(2))
if is_uasyncio:
asyncio.cancel(coro)
else:
task.cancel()
yield from asyncio.sleep(0)
# Once saw 3 ping3's output on CPython 3.5.2
assert output == ['ping1', 'ping1', 'ping1', 'cancelled', 'ping2', 'ping2']
loop = asyncio.get_event_loop()
loop.run_until_complete(run_to())
| 23.290698 | 79 | 0.611583 |
4a204651f96b81873673bb3b7f64210e8895edab | 1,656 | py | Python | migrations/extract-views.py | dreibh/planetlab-lxc-plcapi | 065dfc54a2b668e99eab343d113f1a31fb154b13 | [
"BSD-3-Clause"
] | null | null | null | migrations/extract-views.py | dreibh/planetlab-lxc-plcapi | 065dfc54a2b668e99eab343d113f1a31fb154b13 | [
"BSD-3-Clause"
] | null | null | null | migrations/extract-views.py | dreibh/planetlab-lxc-plcapi | 065dfc54a2b668e99eab343d113f1a31fb154b13 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import sys
import re
class Schema:
def __init__ (self,input,output=None):
self.input=input
self.output=output
# left part is non-greedy
comment = re.compile("(.*?)--.*")
spaces = re.compile("^\s+(\S.*)")
view = re.compile("(?i)\s*create\s+(or\s+replace)?\s+view.*")
def parse (self):
if self.output:
outfile = open(self.output, "a")
else:
outfile = sys.stdout
with open(self.input) as feed:
contents = feed.read()
parts = contents.split(";")
for part in parts:
# normalize: remove comments, linebreaks, trailing spaces..
lines = part.split('\n')
out_lines = []
for line in lines:
# remove comment
match = Schema.comment.match(line)
if match:
line = match.group(1)
out_lines.append(line)
# get them together
out_line = " ".join(out_lines)
# remove trailing spaces
match = Schema.spaces.match(out_line)
if match:
out_line = match.group(1)
match = Schema.view.match(out_line)
if match:
outfile.write("{};\n".format(out_line))
if outfile != sys.stdout:
outfile.close()
if __name__ == '__main__':
if len(sys.argv) not in [2, 3]:
print('Usage:', sys.argv[0], 'input [output]')
sys.exit(1)
input = sys.argv[1]
try:
output = sys.argv[2]
except Exception:
output = None
Schema(input, output).parse()
| 29.052632 | 71 | 0.513889 |
4a20466e0db5bda07794fb1433f7da4c5a959fe4 | 3,461 | py | Python | Final/dmfinal/court.py | may811204/EE380LDataMining | fea8470e51e2db3805979199562edaddf2f03f5b | [
"Apache-2.0"
] | null | null | null | Final/dmfinal/court.py | may811204/EE380LDataMining | fea8470e51e2db3805979199562edaddf2f03f5b | [
"Apache-2.0"
] | 1 | 2018-02-22T21:45:43.000Z | 2018-02-22T21:45:43.000Z | Final/dmfinal/court.py | may811204/EE380LDataMining | fea8470e51e2db3805979199562edaddf2f03f5b | [
"Apache-2.0"
] | 2 | 2018-04-25T01:39:47.000Z | 2018-05-09T23:34:31.000Z | def CourtShape():
court_shapes = []
outer_lines_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-250',
y0='-47.5',
x1='250',
y1='422.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(outer_lines_shape)
backboard_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-30',
y0='-7.5',
x1='30',
y1='-6.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
),
fillcolor='rgba(10, 10, 10, 1)'
)
court_shapes.append(backboard_shape)
outer_three_sec_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-80',
y0='-47.5',
x1='80',
y1='143.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(outer_three_sec_shape)
inner_three_sec_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-60',
y0='-47.5',
x1='60',
y1='143.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(inner_three_sec_shape)
left_line_shape = dict(
type='line',
xref='x',
yref='y',
x0='-220',
y0='-47.5',
x1='-220',
y1='92.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(left_line_shape)
right_line_shape = dict(
type='line',
xref='x',
yref='y',
x0='220',
y0='-47.5',
x1='220',
y1='92.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(right_line_shape)
three_point_arc_shape = dict(
type='path',
xref='x',
yref='y',
path='M -220 92.5 C -70 300, 70 300, 220 92.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(three_point_arc_shape)
center_circle_shape = dict(
type='circle',
xref='x',
yref='y',
x0='60',
y0='482.5',
x1='-60',
y1='362.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(center_circle_shape)
res_circle_shape = dict(
type='circle',
xref='x',
yref='y',
x0='20',
y0='442.5',
x1='-20',
y1='402.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(res_circle_shape)
free_throw_circle_shape = dict(
type='circle',
xref='x',
yref='y',
x0='60',
y0='200',
x1='-60',
y1='80',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(free_throw_circle_shape)
res_area_shape = dict(
type='circle',
xref='x',
yref='y',
x0='40',
y0='40',
x1='-40',
y1='-40',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1,
dash='dot'
)
)
court_shapes.append(res_area_shape)
return court_shapes | 19.777143 | 55 | 0.421843 |
4a20472796ec9c8ed6cec91dd68817f17ae8da6b | 272 | py | Python | Elementary/Conditions/Conditionals.py | ArnobMahmud/Python-Series | 5ea451ceae25367c1439d9785cf9b45b3b658623 | [
"MIT"
] | null | null | null | Elementary/Conditions/Conditionals.py | ArnobMahmud/Python-Series | 5ea451ceae25367c1439d9785cf9b45b3b658623 | [
"MIT"
] | null | null | null | Elementary/Conditions/Conditionals.py | ArnobMahmud/Python-Series | 5ea451ceae25367c1439d9785cf9b45b3b658623 | [
"MIT"
] | null | null | null | '''
Author : Arnob Mahmud
Mail : arnob.tech.me @ gmail.com
'''
num1 = int(input("Enter 1st number :"))
num2 = int(input("Enter 2nd number :"))
if num1 > num2:
print("Your 1st number is the maximum value")
else:
print("Your 2nd number is the maximum value") | 22.666667 | 49 | 0.647059 |
4a2049014f133993d47156c88193a047bc2ccc2c | 502 | py | Python | sources/4chan.py | spmassot/FaceProject | 14ca44c05dfc570a21548ca56e075e550c2c9abc | [
"MIT"
] | 1 | 2018-03-14T12:38:43.000Z | 2018-03-14T12:38:43.000Z | sources/4chan.py | automatonymous/FaceProject | 14ca44c05dfc570a21548ca56e075e550c2c9abc | [
"MIT"
] | 2 | 2021-02-08T20:19:52.000Z | 2021-06-01T21:52:31.000Z | sources/4chan.py | automatonymous/FaceProject | 14ca44c05dfc570a21548ca56e075e550c2c9abc | [
"MIT"
] | null | null | null | import re
import basc_py4chan
from pandas import DataFrame
def get_board(board):
return [get_thread(x) for x in basc_py4chan.Board(board).get_threads()]
def get_thread(thread):
return [get_post(x) for x in thread.all_posts]
def get_post(post):
reply = re.compile('>>\d+')
txt = post.text_comment.strip().replace('\n', ' ')
if not txt:
return None
return (post.post_id,post.text_comment.strip().replace('\n',''),)
if __name__ == '__main__':
print(get_board('b'))
| 25.1 | 75 | 0.673307 |
4a20495f639c765495a4ba9bc3f445aea710132e | 1,811 | py | Python | src/setup.py | konung-yaropolk/pyABF | b5620e73ac5d060129b844da44f8b2611536ac56 | [
"MIT"
] | null | null | null | src/setup.py | konung-yaropolk/pyABF | b5620e73ac5d060129b844da44f8b2611536ac56 | [
"MIT"
] | null | null | null | src/setup.py | konung-yaropolk/pyABF | b5620e73ac5d060129b844da44f8b2611536ac56 | [
"MIT"
] | null | null | null | import os
from setuptools import setup
from setuptools import find_packages
import sys
if sys.version_info[:2] < (3, 6):
raise RuntimeError("Python version >= 3.6 required.")
# load the description
PATH_HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.abspath(PATH_HERE+"/README.rst")) as f:
long_description = f.read()
# standard pypi stuff
setup(
name='pyabf',
version='2.3.1',
author='Scott W Harden',
author_email='[email protected]',
python_requires='>=3.6',
packages=find_packages(),
include_package_data=True,
url='http://swharden.com/pyabf',
license='MIT License',
platforms='any',
description='Python library for reading files in Axon Binary Format (ABF)',
long_description=long_description,
install_requires=[
'matplotlib>=2.1.0',
'numpy>=1.13.3',
'pytest>=3.0.7',
],
classifiers=[
'Programming Language :: Python :: 3',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
project_urls={
'Bug Reports': 'https://github.com/swharden/pyABF/issues',
'Source': 'https://github.com/swharden/pyABF',
'Documentation': 'https://swharden.com/pyabf/tutorial',
},
)
| 34.169811 | 79 | 0.6455 |
4a204961c896b47a81f45b332d5560563d610d75 | 579 | py | Python | oldp/apps/courts/migrations/0014_auto_20181203_1057.py | docsuleman/oldp | 8dcaa8e6e435794c872346b5014945ace885adb4 | [
"MIT"
] | 66 | 2018-05-07T12:34:39.000Z | 2022-02-23T20:14:24.000Z | oldp/apps/courts/migrations/0014_auto_20181203_1057.py | Justice-PLP-DHV/oldp | eadf235bb0925453d9a5b81963a0ce53afeb17fd | [
"MIT"
] | 68 | 2018-06-11T16:13:17.000Z | 2022-02-10T08:03:26.000Z | oldp/apps/courts/migrations/0014_auto_20181203_1057.py | Justice-PLP-DHV/oldp | eadf235bb0925453d9a5b81963a0ce53afeb17fd | [
"MIT"
] | 15 | 2018-06-23T19:41:13.000Z | 2021-08-18T08:21:49.000Z | # Generated by Django 2.1.2 on 2018-12-03 10:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courts', '0013_auto_20181128_1054'),
]
operations = [
migrations.RenameField(
model_name='court',
old_name='updated',
new_name='updated_date',
),
migrations.AddField(
model_name='court',
name='aliases',
field=models.TextField(blank=True, help_text='List of aliases (one per line)', null=True),
),
]
| 24.125 | 102 | 0.580311 |
4a2049a3dc233dd76a08db94dcc57e0f5ef382e8 | 1,345 | py | Python | Dynamic Obstacle Simulation/State_Estimator.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | 3 | 2022-01-07T19:37:03.000Z | 2022-03-15T08:50:28.000Z | Dynamic Obstacle Simulation/State_Estimator.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | Dynamic Obstacle Simulation/State_Estimator.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 12:26:45 2020
@author: vxr131730
2D State Estimator Class to be used for the CARLA.
"""
###############################################################################
###############################################################################
import UKF_Estimator as UKF_Estimator
import EKF_Estimator as EKF_Estimator
import config
###############################################################################
###############################################################################
class State_Estimator:
def __init__(self, estimator_params):
self.params = estimator_params
self.estimates = None
# Plug in an Estimator based on config selection
if config.estimatorSelector:
self.estimator = UKF_Estimator.UKF()
else:
self.estimator = EKF_Estimator.EKF()
###########################################################################
def Get_Estimate(self):
estimates = self.estimator.Estimate(self.params)
return estimates
###############################################################################
############################################################################### | 32.804878 | 80 | 0.346468 |
4a204a582373f7101ac773222bc18c61f01367f2 | 18,023 | py | Python | synapse/app/_base.py | afranke/synapse | f563676c097b830346acc7a4ce3e910c6b10c4c3 | [
"Apache-2.0"
] | null | null | null | synapse/app/_base.py | afranke/synapse | f563676c097b830346acc7a4ce3e910c6b10c4c3 | [
"Apache-2.0"
] | null | null | null | synapse/app/_base.py | afranke/synapse | f563676c097b830346acc7a4ce3e910c6b10c4c3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 New Vector Ltd
# Copyright 2019-2021 The Matrix.org Foundation C.I.C
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import gc
import logging
import os
import platform
import signal
import socket
import sys
import traceback
import warnings
from typing import TYPE_CHECKING, Awaitable, Callable, Iterable
from cryptography.utils import CryptographyDeprecationWarning
from typing_extensions import NoReturn
import twisted
from twisted.internet import defer, error, reactor
from twisted.logger import LoggingFile, LogLevel
from twisted.protocols.tls import TLSMemoryBIOFactory
import synapse
from synapse.api.constants import MAX_PDU_SIZE
from synapse.app import check_bind_error
from synapse.app.phone_stats_home import start_phone_stats_home
from synapse.config.homeserver import HomeServerConfig
from synapse.config.server import ManholeConfig
from synapse.crypto import context_factory
from synapse.events.presence_router import load_legacy_presence_router
from synapse.events.spamcheck import load_legacy_spam_checkers
from synapse.events.third_party_rules import load_legacy_third_party_event_rules
from synapse.logging.context import PreserveLoggingContext
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.metrics.jemalloc import setup_jemalloc_stats
from synapse.util.caches.lrucache import setup_expire_lru_cache_entries
from synapse.util.daemonize import daemonize_process
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
# list of tuples of function, args list, kwargs dict
_sighup_callbacks = []
def register_sighup(func, *args, **kwargs):
"""
Register a function to be called when a SIGHUP occurs.
Args:
func (function): Function to be called when sent a SIGHUP signal.
*args, **kwargs: args and kwargs to be passed to the target function.
"""
_sighup_callbacks.append((func, args, kwargs))
def start_worker_reactor(appname, config, run_command=reactor.run):
"""Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
the reactor. Pulls configuration from the 'worker' settings in 'config'.
Args:
appname (str): application name which will be sent to syslog
config (synapse.config.Config): config object
run_command (Callable[]): callable that actually runs the reactor
"""
logger = logging.getLogger(config.worker.worker_app)
start_reactor(
appname,
soft_file_limit=config.server.soft_file_limit,
gc_thresholds=config.server.gc_thresholds,
pid_file=config.worker.worker_pid_file,
daemonize=config.worker.worker_daemonize,
print_pidfile=config.server.print_pidfile,
logger=logger,
run_command=run_command,
)
def start_reactor(
appname,
soft_file_limit,
gc_thresholds,
pid_file,
daemonize,
print_pidfile,
logger,
run_command=reactor.run,
):
"""Run the reactor in the main process
Daemonizes if necessary, and then configures some resources, before starting
the reactor
Args:
appname (str): application name which will be sent to syslog
soft_file_limit (int):
gc_thresholds:
pid_file (str): name of pid file to write to if daemonize is True
daemonize (bool): true to run the reactor in a background process
print_pidfile (bool): whether to print the pid file, if daemonize is True
logger (logging.Logger): logger instance to pass to Daemonize
run_command (Callable[]): callable that actually runs the reactor
"""
def run():
logger.info("Running")
setup_jemalloc_stats()
change_resource_limit(soft_file_limit)
if gc_thresholds:
gc.set_threshold(*gc_thresholds)
run_command()
# make sure that we run the reactor with the sentinel log context,
# otherwise other PreserveLoggingContext instances will get confused
# and complain when they see the logcontext arbitrarily swapping
# between the sentinel and `run` logcontexts.
#
# We also need to drop the logcontext before forking if we're daemonizing,
# otherwise the cputime metrics get confused about the per-thread resource usage
# appearing to go backwards.
with PreserveLoggingContext():
if daemonize:
if print_pidfile:
print(pid_file)
daemonize_process(pid_file, logger)
run()
def quit_with_error(error_string: str) -> NoReturn:
message_lines = error_string.split("\n")
line_length = min(max(len(line) for line in message_lines), 80) + 2
sys.stderr.write("*" * line_length + "\n")
for line in message_lines:
sys.stderr.write(" %s\n" % (line.rstrip(),))
sys.stderr.write("*" * line_length + "\n")
sys.exit(1)
def handle_startup_exception(e: Exception) -> NoReturn:
# Exceptions that occur between setting up the logging and forking or starting
# the reactor are written to the logs, followed by a summary to stderr.
logger.exception("Exception during startup")
quit_with_error(
f"Error during initialisation:\n {e}\nThere may be more information in the logs."
)
def redirect_stdio_to_logs() -> None:
streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)]
for (stream, level) in streams:
oldStream = getattr(sys, stream)
loggingFile = LoggingFile(
logger=twisted.logger.Logger(namespace=stream),
level=level,
encoding=getattr(oldStream, "encoding", None),
)
setattr(sys, stream, loggingFile)
print("Redirected stdout/stderr to logs")
def register_start(cb: Callable[..., Awaitable], *args, **kwargs) -> None:
"""Register a callback with the reactor, to be called once it is running
This can be used to initialise parts of the system which require an asynchronous
setup.
Any exception raised by the callback will be printed and logged, and the process
will exit.
"""
async def wrapper():
try:
await cb(*args, **kwargs)
except Exception:
# previously, we used Failure().printTraceback() here, in the hope that
# would give better tracebacks than traceback.print_exc(). However, that
# doesn't handle chained exceptions (with a __cause__ or __context__) well,
# and I *think* the need for Failure() is reduced now that we mostly use
# async/await.
# Write the exception to both the logs *and* the unredirected stderr,
# because people tend to get confused if it only goes to one or the other.
#
# One problem with this is that if people are using a logging config that
# logs to the console (as is common eg under docker), they will get two
# copies of the exception. We could maybe try to detect that, but it's
# probably a cost we can bear.
logger.fatal("Error during startup", exc_info=True)
print("Error during startup:", file=sys.__stderr__)
traceback.print_exc(file=sys.__stderr__)
# it's no use calling sys.exit here, since that just raises a SystemExit
# exception which is then caught by the reactor, and everything carries
# on as normal.
os._exit(1)
reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
def listen_metrics(bind_addresses, port):
"""
Start Prometheus metrics server.
"""
from synapse.metrics import RegistryProxy, start_http_server
for host in bind_addresses:
logger.info("Starting metrics listener on %s:%d", host, port)
start_http_server(port, addr=host, registry=RegistryProxy)
def listen_manhole(
bind_addresses: Iterable[str],
port: int,
manhole_settings: ManholeConfig,
manhole_globals: dict,
):
# twisted.conch.manhole 21.1.0 uses "int_from_bytes", which produces a confusing
# warning. It's fixed by https://github.com/twisted/twisted/pull/1522), so
# suppress the warning for now.
warnings.filterwarnings(
action="ignore",
category=CryptographyDeprecationWarning,
message="int_from_bytes is deprecated",
)
from synapse.util.manhole import manhole
listen_tcp(
bind_addresses,
port,
manhole(settings=manhole_settings, globals=manhole_globals),
)
def listen_tcp(bind_addresses, port, factory, reactor=reactor, backlog=50):
"""
Create a TCP socket for a port and several addresses
Returns:
list[twisted.internet.tcp.Port]: listening for TCP connections
"""
r = []
for address in bind_addresses:
try:
r.append(reactor.listenTCP(port, factory, backlog, address))
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
return r
def listen_ssl(
bind_addresses, port, factory, context_factory, reactor=reactor, backlog=50
):
"""
Create an TLS-over-TCP socket for a port and several addresses
Returns:
list of twisted.internet.tcp.Port listening for TLS connections
"""
r = []
for address in bind_addresses:
try:
r.append(
reactor.listenSSL(port, factory, context_factory, backlog, address)
)
except error.CannotListenError as e:
check_bind_error(e, address, bind_addresses)
return r
def refresh_certificate(hs):
"""
Refresh the TLS certificates that Synapse is using by re-reading them from
disk and updating the TLS context factories to use them.
"""
if not hs.config.server.has_tls_listener():
return
hs.config.tls.read_certificate_from_disk()
hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config)
if hs._listening_services:
logger.info("Updating context factories...")
for i in hs._listening_services:
# When you listenSSL, it doesn't make an SSL port but a TCP one with
# a TLS wrapping factory around the factory you actually want to get
# requests. This factory attribute is public but missing from
# Twisted's documentation.
if isinstance(i.factory, TLSMemoryBIOFactory):
addr = i.getHost()
logger.info(
"Replacing TLS context factory on [%s]:%i", addr.host, addr.port
)
# We want to replace TLS factories with a new one, with the new
# TLS configuration. We do this by reaching in and pulling out
# the wrappedFactory, and then re-wrapping it.
i.factory = TLSMemoryBIOFactory(
hs.tls_server_context_factory, False, i.factory.wrappedFactory
)
logger.info("Context factories updated.")
async def start(hs: "HomeServer"):
"""
Start a Synapse server or worker.
Should be called once the reactor is running.
Will start the main HTTP listeners and do some other startup tasks, and then
notify systemd.
Args:
hs: homeserver instance
"""
# Set up the SIGHUP machinery.
if hasattr(signal, "SIGHUP"):
reactor = hs.get_reactor()
@wrap_as_background_process("sighup")
def handle_sighup(*args, **kwargs):
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
sdnotify(b"RELOADING=1")
for i, args, kwargs in _sighup_callbacks:
i(*args, **kwargs)
sdnotify(b"READY=1")
# We defer running the sighup handlers until next reactor tick. This
# is so that we're in a sane state, e.g. flushing the logs may fail
# if the sighup happens in the middle of writing a log entry.
def run_sighup(*args, **kwargs):
# `callFromThread` should be "signal safe" as well as thread
# safe.
reactor.callFromThread(handle_sighup, *args, **kwargs)
signal.signal(signal.SIGHUP, run_sighup)
register_sighup(refresh_certificate, hs)
# Load the certificate from disk.
refresh_certificate(hs)
# Start the tracer
synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa
# Instantiate the modules so they can register their web resources to the module API
# before we start the listeners.
module_api = hs.get_module_api()
for module, config in hs.config.modules.loaded_modules:
module(config=config, api=module_api)
load_legacy_spam_checkers(hs)
load_legacy_third_party_event_rules(hs)
load_legacy_presence_router(hs)
# If we've configured an expiry time for caches, start the background job now.
setup_expire_lru_cache_entries(hs)
# It is now safe to start your Synapse.
hs.start_listening()
hs.get_datastore().db_pool.start_profiling()
hs.get_pusherpool().start()
# Log when we start the shut down process.
hs.get_reactor().addSystemEventTrigger(
"before", "shutdown", logger.info, "Shutting down..."
)
setup_sentry(hs)
setup_sdnotify(hs)
# If background tasks are running on the main process, start collecting the
# phone home stats.
if hs.config.worker.run_background_tasks:
start_phone_stats_home(hs)
# We now freeze all allocated objects in the hopes that (almost)
# everything currently allocated are things that will be used for the
# rest of time. Doing so means less work each GC (hopefully).
#
# This only works on Python 3.7
if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
gc.collect()
gc.freeze()
# Speed up shutdowns by freezing all allocated objects. This moves everything
# into the permanent generation and excludes them from the final GC.
# Unfortunately only works on Python 3.7
if platform.python_implementation() == "CPython" and sys.version_info >= (3, 7):
atexit.register(gc.freeze)
def setup_sentry(hs):
"""Enable sentry integration, if enabled in configuration
Args:
hs (synapse.server.HomeServer)
"""
if not hs.config.metrics.sentry_enabled:
return
import sentry_sdk
sentry_sdk.init(
dsn=hs.config.metrics.sentry_dsn, release=get_version_string(synapse)
)
# We set some default tags that give some context to this instance
with sentry_sdk.configure_scope() as scope:
scope.set_tag("matrix_server_name", hs.config.server.server_name)
app = (
hs.config.worker.worker_app
if hs.config.worker.worker_app
else "synapse.app.homeserver"
)
name = hs.get_instance_name()
scope.set_tag("worker_app", app)
scope.set_tag("worker_name", name)
def setup_sdnotify(hs):
"""Adds process state hooks to tell systemd what we are up to."""
# Tell systemd our state, if we're using it. This will silently fail if
# we're not using systemd.
sdnotify(b"READY=1\nMAINPID=%i" % (os.getpid(),))
hs.get_reactor().addSystemEventTrigger(
"before", "shutdown", sdnotify, b"STOPPING=1"
)
sdnotify_sockaddr = os.getenv("NOTIFY_SOCKET")
def sdnotify(state):
"""
Send a notification to systemd, if the NOTIFY_SOCKET env var is set.
This function is based on the sdnotify python package, but since it's only a few
lines of code, it's easier to duplicate it here than to add a dependency on a
package which many OSes don't include as a matter of principle.
Args:
state (bytes): notification to send
"""
if not isinstance(state, bytes):
raise TypeError("sdnotify should be called with a bytes")
if not sdnotify_sockaddr:
return
addr = sdnotify_sockaddr
if addr[0] == "@":
addr = "\0" + addr[1:]
try:
with socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) as sock:
sock.connect(addr)
sock.sendall(state)
except Exception as e:
# this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET
# unless systemd is expecting us to notify it.
logger.warning("Unable to send notification to systemd: %s", e)
def max_request_body_size(config: HomeServerConfig) -> int:
"""Get a suitable maximum size for incoming HTTP requests"""
# Other than media uploads, the biggest request we expect to see is a fully-loaded
# /federation/v1/send request.
#
# The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are
# limited to 65536 bytes (possibly slightly more if the sender didn't use canonical
# json encoding); there is no specced limit to EDUs (see
# https://github.com/matrix-org/matrix-doc/issues/3121).
#
# in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M)
#
max_request_size = 200 * MAX_PDU_SIZE
# if we have a media repo enabled, we may need to allow larger uploads than that
if config.media.can_load_media_repo:
max_request_size = max(max_request_size, config.media.max_upload_size)
return max_request_size
| 35.064202 | 91 | 0.68579 |
4a204b80a32e22d344628ca9e5d06d94963eab6c | 2,343 | py | Python | backend/migrations/0026_auto_20180805_1216.py | ad-freiburg/qleverUI | 105ef23447eb691a5da8a4c92be3b4150aac72c6 | [
"Apache-2.0"
] | 4 | 2021-10-09T20:36:33.000Z | 2022-02-12T09:19:17.000Z | backend/migrations/0026_auto_20180805_1216.py | ad-freiburg/qleverUI | 105ef23447eb691a5da8a4c92be3b4150aac72c6 | [
"Apache-2.0"
] | 29 | 2019-09-01T08:48:12.000Z | 2021-06-01T15:57:24.000Z | backend/migrations/0026_auto_20180805_1216.py | ad-freiburg/qleverUI | 105ef23447eb691a5da8a4c92be3b4150aac72c6 | [
"Apache-2.0"
] | 4 | 2019-01-04T19:44:55.000Z | 2020-12-05T03:40:42.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-08-05 12:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0025_auto_20180616_1246'),
]
operations = [
migrations.RemoveField(
model_name='object',
name='backend',
),
migrations.RemoveField(
model_name='pattern',
name='backend',
),
migrations.RemoveField(
model_name='predicate',
name='backend',
),
migrations.RemoveField(
model_name='subject',
name='backend',
),
migrations.DeleteModel(
name='Suggestion',
),
migrations.DeleteModel(
name='Synonym',
),
migrations.RemoveField(
model_name='backend',
name='getPatternsFromQLever',
),
migrations.RemoveField(
model_name='backend',
name='getPredicateNamesFromRelation',
),
migrations.RemoveField(
model_name='backend',
name='getSubjectNamesFromRelation',
),
migrations.RemoveField(
model_name='backend',
name='predicateNameRelation',
),
migrations.RemoveField(
model_name='backend',
name='subjectNameRelation',
),
migrations.RemoveField(
model_name='backend',
name='subjectOrderRelation',
),
migrations.AlterField(
model_name='backend',
name='dynamicSuggestions',
field=models.IntegerField(choices=[(2, '3. SPARQL & context sensitive entities'), (1, '2. SPARQL & context insensitive entities'), (0, '1. SPARQL syntax & keywords only')], default=True, help_text='If you want to disable the dynamic suggestions from QLever or QLever UI by default change this option.', verbose_name='Default suggestion mode'),
),
migrations.DeleteModel(
name='Object',
),
migrations.DeleteModel(
name='Pattern',
),
migrations.DeleteModel(
name='predicate',
),
migrations.DeleteModel(
name='Subject',
),
]
| 29.658228 | 355 | 0.554417 |
4a204e23f313194fe3fec32cba68c9c2c0664272 | 4,581 | py | Python | GAN/infogan/infogan_pytorch.py | nikhaas/generative-models | 566ac2e95971ab55d65169e565b9d53023ccc113 | [
"Unlicense"
] | 37 | 2018-08-18T16:05:39.000Z | 2019-06-05T19:01:19.000Z | GAN/infogan/infogan_pytorch.py | bimal08844/generative-models | 566ac2e95971ab55d65169e565b9d53023ccc113 | [
"Unlicense"
] | null | null | null | GAN/infogan/infogan_pytorch.py | bimal08844/generative-models | 566ac2e95971ab55d65169e565b9d53023ccc113 | [
"Unlicense"
] | 34 | 2018-05-12T14:26:29.000Z | 2018-08-25T14:56:41.000Z | import torch
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
Z_dim = 16
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
cnt = 0
lr = 1e-3
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / np.sqrt(in_dim / 2.)
return Variable(torch.randn(*size) * xavier_stddev, requires_grad=True)
""" ==================== GENERATOR ======================== """
Wzh = xavier_init(size=[Z_dim + 10, h_dim])
bzh = Variable(torch.zeros(h_dim), requires_grad=True)
Whx = xavier_init(size=[h_dim, X_dim])
bhx = Variable(torch.zeros(X_dim), requires_grad=True)
def G(z, c):
inputs = torch.cat([z, c], 1)
h = nn.relu(inputs @ Wzh + bzh.repeat(inputs.size(0), 1))
X = nn.sigmoid(h @ Whx + bhx.repeat(h.size(0), 1))
return X
""" ==================== DISCRIMINATOR ======================== """
Wxh = xavier_init(size=[X_dim, h_dim])
bxh = Variable(torch.zeros(h_dim), requires_grad=True)
Why = xavier_init(size=[h_dim, 1])
bhy = Variable(torch.zeros(1), requires_grad=True)
def D(X):
h = nn.relu(X @ Wxh + bxh.repeat(X.size(0), 1))
y = nn.sigmoid(h @ Why + bhy.repeat(h.size(0), 1))
return y
""" ====================== Q(c|X) ========================== """
Wqxh = xavier_init(size=[X_dim, h_dim])
bqxh = Variable(torch.zeros(h_dim), requires_grad=True)
Whc = xavier_init(size=[h_dim, 10])
bhc = Variable(torch.zeros(10), requires_grad=True)
def Q(X):
h = nn.relu(X @ Wqxh + bqxh.repeat(X.size(0), 1))
c = nn.softmax(h @ Whc + bhc.repeat(h.size(0), 1))
return c
G_params = [Wzh, bzh, Whx, bhx]
D_params = [Wxh, bxh, Why, bhy]
Q_params = [Wqxh, bqxh, Whc, bhc]
params = G_params + D_params + Q_params
""" ===================== TRAINING ======================== """
def reset_grad():
for p in params:
p.grad.data.zero_()
G_solver = optim.Adam(G_params, lr=1e-3)
D_solver = optim.Adam(D_params, lr=1e-3)
Q_solver = optim.Adam(G_params + Q_params, lr=1e-3)
def sample_c(size):
c = np.random.multinomial(1, 10*[0.1], size=size)
c = Variable(torch.from_numpy(c.astype('float32')))
return c
for it in range(100000):
# Sample data
X, _ = mnist.train.next_batch(mb_size)
X = Variable(torch.from_numpy(X))
z = Variable(torch.randn(mb_size, Z_dim))
c = sample_c(mb_size)
# Dicriminator forward-loss-backward-update
G_sample = G(z, c)
D_real = D(X)
D_fake = D(G_sample)
D_loss = -torch.mean(torch.log(D_real + 1e-8) + torch.log(1 - D_fake + 1e-8))
D_loss.backward()
D_solver.step()
# Housekeeping - reset gradient
reset_grad()
# Generator forward-loss-backward-update
G_sample = G(z, c)
D_fake = D(G_sample)
G_loss = -torch.mean(torch.log(D_fake + 1e-8))
G_loss.backward()
G_solver.step()
# Housekeeping - reset gradient
reset_grad()
# Q forward-loss-backward-update
G_sample = G(z, c)
Q_c_given_x = Q(G_sample)
crossent_loss = torch.mean(-torch.sum(c * torch.log(Q_c_given_x + 1e-8), dim=1))
ent_loss = torch.mean(-torch.sum(c * torch.log(c + 1e-8), dim=1))
mi_loss = crossent_loss + ent_loss
mi_loss.backward()
Q_solver.step()
# Housekeeping - reset gradient
reset_grad()
# Print and plot every now and then
if it % 1000 == 0:
idx = np.random.randint(0, 10)
c = np.zeros([mb_size, 10])
c[range(mb_size), idx] = 1
c = Variable(torch.from_numpy(c.astype('float32')))
samples = G(z, c).data.numpy()[:16]
print('Iter-{}; D_loss: {}; G_loss: {}; Idx: {}'
.format(it, D_loss.data.numpy(), G_loss.data.numpy(), idx))
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'
.format(str(cnt).zfill(3)), bbox_inches='tight')
cnt += 1
plt.close(fig)
| 25.881356 | 84 | 0.599651 |
4a204ea1fc8b6e71fc0fed96fc78883601fb488f | 5,907 | py | Python | jina/executors/rankers/__init__.py | HarshCasper/jina | 81ab098b140b74ad1cfdfde9218cec7a40923749 | [
"Apache-2.0"
] | 1 | 2021-02-25T19:28:50.000Z | 2021-02-25T19:28:50.000Z | jina/executors/rankers/__init__.py | HarshCasper/jina | 81ab098b140b74ad1cfdfde9218cec7a40923749 | [
"Apache-2.0"
] | 1 | 2021-02-27T05:56:45.000Z | 2021-02-27T05:57:03.000Z | jina/executors/rankers/__init__.py | deepampatel/jina | 97f9e97a4a678a28bdeacbc7346eaf7bbd2aeb89 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
import numpy as np
from .. import BaseExecutor
class BaseRanker(BaseExecutor):
"""The base class for a `Ranker`"""
def score(self, *args, **kwargs):
raise NotImplementedError
class Chunk2DocRanker(BaseRanker):
""" A :class:`Chunk2DocRanker` translates the chunk-wise score (distance) to the doc-wise score.
In the query-time, :class:`Chunk2DocRanker` is an almost-always required component.
Because in the end we want to retrieve top-k documents of given query-document not top-k chunks of
given query-chunks. The purpose of :class:`Chunk2DocRanker` is to aggregate the already existed top-k chunks
into documents.
The key function here is :func:`score`.
.. seealso::
:mod:`jina.drivers.handlers.score`
"""
required_keys = {'text'} #: a set of ``str``, key-values to extracted from the chunk-level protobuf message
COL_MATCH_PARENT_ID = 'match_parent_id'
COL_MATCH_ID = 'match_id'
COL_DOC_CHUNK_ID = 'doc_chunk_id'
COL_SCORE = 'score'
def score(self, match_idx: 'np.ndarray', query_chunk_meta: Dict, match_chunk_meta: Dict) -> 'np.ndarray':
"""
Translate the chunk-level top-k results into doc-level top-k results. Some score functions may leverage the
meta information of the query, hence the meta info of the query chunks and matched chunks are given
as arguments.
:param match_idx: A [N x 4] numpy ``ndarray``, column-wise:
- ``match_idx[:, 0]``: ``doc_id`` of the matched chunks, integer
- ``match_idx[:, 1]``: ``chunk_id`` of the matched chunks, integer
- ``match_idx[:, 2]``: ``chunk_id`` of the query chunks, integer
- ``match_idx[:, 3]``: distance/metric/score between the query and matched chunks, float
:type match_idx: np.ndarray.
:param query_chunk_meta: The meta information of the query chunks, where the key is query chunks' ``chunk_id``,
the value is extracted by the ``required_keys``.
:type query_chunk_meta: Dict.
:param match_chunk_meta: The meta information of the matched chunks, where the key is matched chunks'
``chunk_id``, the value is extracted by the ``required_keys``.
:type query_chunk_meta: Dict.
:return: A [N x 2] numpy ``ndarray``, where the first column is the matched documents' ``doc_id`` (integer)
the second column is the score/distance/metric between the matched doc and the query doc (float).
:rtype: np.ndarray.
"""
_groups = self.group_by_doc_id(match_idx)
r = []
for _g in _groups:
_doc_id, _doc_score = self._get_score(_g, query_chunk_meta, match_chunk_meta)
r.append((_doc_id, _doc_score))
return self.sort_doc_by_score(r)
def group_by_doc_id(self, match_idx):
"""
Group the ``match_idx`` by ``doc_id``.
:return: an iterator over the groups.
:rtype: :class:`Chunk2DocRanker`.
"""
return self._group_by(match_idx, self.COL_MATCH_PARENT_ID)
@staticmethod
def _group_by(match_idx, col_name):
# sort by ``col
_sorted_m = np.sort(match_idx, order=col_name)
_, _doc_counts = np.unique(_sorted_m[col_name], return_counts=True)
# group by ``col``
return np.split(_sorted_m, np.cumsum(_doc_counts))[:-1]
def _get_score(self, match_idx, query_chunk_meta, match_chunk_meta, *args, **kwargs):
raise NotImplementedError
@staticmethod
def sort_doc_by_score(r):
"""
Sort a list of (``doc_id``, ``score``) tuples by the ``score``.
:param r: List of Tuples with document id and score
:type r: List[Tuple[Any, Any]]
:return: A `np.ndarray` in the shape of [N x 2], where `N` in the length of the input list.
:rtype: np.ndarray
"""
r = np.array(r, dtype=[
(Chunk2DocRanker.COL_MATCH_PARENT_ID, np.object),
(Chunk2DocRanker.COL_SCORE, np.float64)]
)
return np.sort(r, order=Chunk2DocRanker.COL_SCORE)[::-1]
def get_doc_id(self, match_with_same_doc_id):
"""Return document id that matches with given id :param:`match_with_same_doc_id`"""
return match_with_same_doc_id[0][self.COL_MATCH_PARENT_ID]
class Match2DocRanker(BaseRanker):
"""
Re-scores the matches for a document. This Ranker is only responsible for
calculating new scores and not for the actual sorting. The sorting is handled
in the respective ``Matches2DocRankDriver``.
Possible implementations:
- ReverseRanker (reverse scores of all matches)
- BucketShuffleRanker (first buckets matches and then sort each bucket).
"""
COL_MATCH_ID = 'match_id'
COL_SCORE = 'score'
def score(self, query_meta: Dict, old_match_scores: Dict, match_meta: Dict) -> 'np.ndarray':
"""
Calculates the new scores for matches and returns them.
:param query_meta: Dictionary containing all the query meta information
requested by the `required_keys` class_variable.
:type query_meta: Dict
:param old_match_scores: Contains old scores in the format {match_id: score}
:type old_match_scores: Dict
:param match_meta: Dictionary containing all the matches meta information
requested by the `required_keys` class_variable.
Format: {match_id: {attribute: attribute_value}}e.g.{5: {"length": 3}}
:type match_meta: Dict
:return: A `np.ndarray` in the shape of [N x 2] where `N` is the length of
the `old_match_scores`. Semantic: [[match_id, new_score]]
:rtype: np.ndarray
"""
raise NotImplementedError
| 42.496403 | 119 | 0.654816 |
4a204fdeadb2233c723275eed47a581597ebab1e | 902 | py | Python | Facebook AutoPokeBack/pokeback.py | cclauss/Browser-Automation | 7baca74d40ac850f9570d7e40a47021dc0e8e387 | [
"Apache-2.0"
] | 35 | 2016-07-16T07:05:24.000Z | 2021-07-07T15:18:55.000Z | Facebook AutoPokeBack/pokeback.py | cclauss/Browser-Automation | 7baca74d40ac850f9570d7e40a47021dc0e8e387 | [
"Apache-2.0"
] | null | null | null | Facebook AutoPokeBack/pokeback.py | cclauss/Browser-Automation | 7baca74d40ac850f9570d7e40a47021dc0e8e387 | [
"Apache-2.0"
] | 7 | 2016-07-27T10:25:10.000Z | 2019-12-06T08:45:03.000Z | import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import getpass
print "Enter your email ID"
email_id = raw_input(">>")
print "Enter your password"
#password = raw_input(">>")
password = getpass.getpass(">>")
driver = webdriver.Firefox()
driver.get("https://www.facebook.com/pokes")
driver.maximize_window()
email = driver.find_element_by_xpath("//label[@for = 'email']")
email.send_keys(email_id)
driver.find_element_by_xpath("//label[@for = 'pass']").click()
passwd = driver.find_element_by_xpath("//label[@for = 'pass']")
passwd.send_keys(password)
passwd.send_keys(Keys.ENTER)
pokes = driver.find_elements_by_link_text('Poke Back')
for poke in pokes:
poke.click()
driver.find_element_by_id('userNavigationLabel').click()
time.sleep(1)
logout = driver.find_element_by_class_name('_w0d')
logout.submit()
driver.implicitly_wait(2)
driver.close()
| 23.736842 | 63 | 0.756098 |
4a204fe29f92931ba1820c5dc9e7af35e05754a3 | 13,536 | py | Python | qchem/tests/test_two_particle.py | tehruhn/pennylane | a556c0bc3b56c32e034c2d647a0da1d44a07d9bd | [
"Apache-2.0"
] | null | null | null | qchem/tests/test_two_particle.py | tehruhn/pennylane | a556c0bc3b56c32e034c2d647a0da1d44a07d9bd | [
"Apache-2.0"
] | null | null | null | qchem/tests/test_two_particle.py | tehruhn/pennylane | a556c0bc3b56c32e034c2d647a0da1d44a07d9bd | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion.hamiltonians import MolecularData
ref_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_ref_files")
v_op_1 = {
(): 0.0,
((0, 1), (0, 1), (0, 0), (0, 0)): 0.3411947665760211,
((0, 1), (1, 1), (1, 0), (0, 0)): 0.3411947665760211,
((1, 1), (0, 1), (0, 0), (1, 0)): 0.3411947665760211,
((1, 1), (1, 1), (1, 0), (1, 0)): 0.3411947665760211,
((0, 1), (0, 1), (2, 0), (2, 0)): 0.08950028803070323,
((0, 1), (1, 1), (3, 0), (2, 0)): 0.08950028803070323,
((1, 1), (0, 1), (2, 0), (3, 0)): 0.08950028803070323,
((1, 1), (1, 1), (3, 0), (3, 0)): 0.08950028803070323,
((0, 1), (2, 1), (0, 0), (2, 0)): 0.08950028803070323,
((0, 1), (3, 1), (1, 0), (2, 0)): 0.08950028803070323,
((1, 1), (2, 1), (0, 0), (3, 0)): 0.08950028803070323,
((1, 1), (3, 1), (1, 0), (3, 0)): 0.08950028803070323,
((0, 1), (2, 1), (2, 0), (0, 0)): 0.3353663891543792,
((0, 1), (3, 1), (3, 0), (0, 0)): 0.3353663891543792,
((1, 1), (2, 1), (2, 0), (1, 0)): 0.3353663891543792,
((1, 1), (3, 1), (3, 0), (1, 0)): 0.3353663891543792,
((2, 1), (0, 1), (0, 0), (2, 0)): 0.3353663891543792,
((2, 1), (1, 1), (1, 0), (2, 0)): 0.3353663891543792,
((3, 1), (0, 1), (0, 0), (3, 0)): 0.3353663891543792,
((3, 1), (1, 1), (1, 0), (3, 0)): 0.3353663891543792,
((2, 1), (0, 1), (2, 0), (0, 0)): 0.08950028803070323,
((2, 1), (1, 1), (3, 0), (0, 0)): 0.08950028803070323,
((3, 1), (0, 1), (2, 0), (1, 0)): 0.08950028803070323,
((3, 1), (1, 1), (3, 0), (1, 0)): 0.08950028803070323,
((2, 1), (2, 1), (0, 0), (0, 0)): 0.08950028803070323,
((2, 1), (3, 1), (1, 0), (0, 0)): 0.08950028803070323,
((3, 1), (2, 1), (0, 0), (1, 0)): 0.08950028803070323,
((3, 1), (3, 1), (1, 0), (1, 0)): 0.08950028803070323,
((2, 1), (2, 1), (2, 0), (2, 0)): 0.352552816086392,
((2, 1), (3, 1), (3, 0), (2, 0)): 0.352552816086392,
((3, 1), (2, 1), (2, 0), (3, 0)): 0.352552816086392,
((3, 1), (3, 1), (3, 0), (3, 0)): 0.352552816086392,
}
v_op_2 = {
(): 0.6823895331520422,
((0, 1), (0, 0)): 1.1624649805561105,
((1, 1), (1, 0)): 1.1624649805561105,
((0, 1), (0, 1), (0, 0), (0, 0)): 0.352552816086392,
((0, 1), (1, 1), (1, 0), (0, 0)): 0.352552816086392,
((1, 1), (0, 1), (0, 0), (1, 0)): 0.352552816086392,
((1, 1), (1, 1), (1, 0), (1, 0)): 0.352552816086392,
}
v_op_3 = {
(): 1.6585666870874103,
((0, 1), (0, 0)): 0.7200645027092623,
((1, 1), (1, 0)): 0.7200645027092623,
((0, 1), (2, 0)): 0.01568677271778423,
((1, 1), (3, 0)): 0.01568677271778423,
((2, 1), (0, 0)): 0.01568677271778432,
((3, 1), (1, 0)): 0.01568677271778432,
((2, 1), (2, 0)): 0.7696051973390092,
((3, 1), (3, 0)): 0.7696051973390092,
((0, 1), (0, 1), (0, 0), (0, 0)): 0.24365548437056841,
((0, 1), (1, 1), (1, 0), (0, 0)): 0.24365548437056841,
((1, 1), (0, 1), (0, 0), (1, 0)): 0.24365548437056841,
((1, 1), (1, 1), (1, 0), (1, 0)): 0.24365548437056841,
((0, 1), (0, 1), (0, 0), (2, 0)): -0.02428978770367371,
((0, 1), (1, 1), (1, 0), (2, 0)): -0.02428978770367371,
((1, 1), (0, 1), (0, 0), (3, 0)): -0.02428978770367371,
((1, 1), (1, 1), (1, 0), (3, 0)): -0.02428978770367371,
((0, 1), (0, 1), (2, 0), (0, 0)): -0.024289787703673717,
((0, 1), (1, 1), (3, 0), (0, 0)): -0.024289787703673717,
((1, 1), (0, 1), (2, 0), (1, 0)): -0.024289787703673717,
((1, 1), (1, 1), (3, 0), (1, 0)): -0.024289787703673717,
((0, 1), (0, 1), (2, 0), (2, 0)): 0.006531987971873417,
((0, 1), (1, 1), (3, 0), (2, 0)): 0.006531987971873417,
((1, 1), (0, 1), (2, 0), (3, 0)): 0.006531987971873417,
((1, 1), (1, 1), (3, 0), (3, 0)): 0.006531987971873417,
((0, 1), (2, 1), (0, 0), (0, 0)): -0.02428978770367371,
((0, 1), (3, 1), (1, 0), (0, 0)): -0.02428978770367371,
((1, 1), (2, 1), (0, 0), (1, 0)): -0.02428978770367371,
((1, 1), (3, 1), (1, 0), (1, 0)): -0.02428978770367371,
((0, 1), (2, 1), (0, 0), (2, 0)): 0.006531987971873418,
((0, 1), (3, 1), (1, 0), (2, 0)): 0.006531987971873418,
((1, 1), (2, 1), (0, 0), (3, 0)): 0.006531987971873418,
((1, 1), (3, 1), (1, 0), (3, 0)): 0.006531987971873418,
((0, 1), (2, 1), (2, 0), (0, 0)): 0.11180501845367194,
((0, 1), (3, 1), (3, 0), (0, 0)): 0.11180501845367194,
((1, 1), (2, 1), (2, 0), (1, 0)): 0.11180501845367194,
((1, 1), (3, 1), (3, 0), (1, 0)): 0.11180501845367194,
((0, 1), (2, 1), (2, 0), (2, 0)): 0.0037420836721733935,
((0, 1), (3, 1), (3, 0), (2, 0)): 0.0037420836721733935,
((1, 1), (2, 1), (2, 0), (3, 0)): 0.0037420836721733935,
((1, 1), (3, 1), (3, 0), (3, 0)): 0.0037420836721733935,
((2, 1), (0, 1), (0, 0), (0, 0)): -0.02428978770367371,
((2, 1), (1, 1), (1, 0), (0, 0)): -0.02428978770367371,
((3, 1), (0, 1), (0, 0), (1, 0)): -0.02428978770367371,
((3, 1), (1, 1), (1, 0), (1, 0)): -0.02428978770367371,
((2, 1), (0, 1), (0, 0), (2, 0)): 0.11180501845367194,
((2, 1), (1, 1), (1, 0), (2, 0)): 0.11180501845367194,
((3, 1), (0, 1), (0, 0), (3, 0)): 0.11180501845367194,
((3, 1), (1, 1), (1, 0), (3, 0)): 0.11180501845367194,
((2, 1), (0, 1), (2, 0), (0, 0)): 0.006531987971873418,
((2, 1), (1, 1), (3, 0), (0, 0)): 0.006531987971873418,
((3, 1), (0, 1), (2, 0), (1, 0)): 0.006531987971873418,
((3, 1), (1, 1), (3, 0), (1, 0)): 0.006531987971873418,
((2, 1), (0, 1), (2, 0), (2, 0)): 0.003742083672173376,
((2, 1), (1, 1), (3, 0), (2, 0)): 0.003742083672173376,
((3, 1), (0, 1), (2, 0), (3, 0)): 0.003742083672173376,
((3, 1), (1, 1), (3, 0), (3, 0)): 0.003742083672173376,
((2, 1), (2, 1), (0, 0), (0, 0)): 0.006531987971873423,
((2, 1), (3, 1), (1, 0), (0, 0)): 0.006531987971873423,
((3, 1), (2, 1), (0, 0), (1, 0)): 0.006531987971873423,
((3, 1), (3, 1), (1, 0), (1, 0)): 0.006531987971873423,
((2, 1), (2, 1), (0, 0), (2, 0)): 0.0037420836721733645,
((2, 1), (3, 1), (1, 0), (2, 0)): 0.0037420836721733645,
((3, 1), (2, 1), (0, 0), (3, 0)): 0.0037420836721733645,
((3, 1), (3, 1), (1, 0), (3, 0)): 0.0037420836721733645,
((2, 1), (2, 1), (2, 0), (0, 0)): 0.0037420836721733727,
((2, 1), (3, 1), (3, 0), (0, 0)): 0.0037420836721733727,
((3, 1), (2, 1), (2, 0), (1, 0)): 0.0037420836721733727,
((3, 1), (3, 1), (3, 0), (1, 0)): 0.0037420836721733727,
((2, 1), (2, 1), (2, 0), (2, 0)): 0.16894113834436625,
((2, 1), (3, 1), (3, 0), (2, 0)): 0.16894113834436625,
((3, 1), (2, 1), (2, 0), (3, 0)): 0.16894113834436625,
((3, 1), (3, 1), (3, 0), (3, 0)): 0.16894113834436625,
}
v_op_4 = {
(): 15.477079668766912,
((0, 1), (0, 0)): 4.348718720149925,
((1, 1), (1, 0)): 4.348718720149925,
((2, 1), (2, 0)): 4.7839298822632825,
((3, 1), (3, 0)): 4.7839298822632825,
((4, 1), (4, 0)): 3.9720173194427786,
((5, 1), (5, 0)): 3.9720173194427786,
((0, 1), (0, 1), (0, 0), (0, 0)): 0.3913181430816212,
((0, 1), (1, 1), (1, 0), (0, 0)): 0.3913181430816212,
((1, 1), (0, 1), (0, 0), (1, 0)): 0.3913181430816212,
((1, 1), (1, 1), (1, 0), (1, 0)): 0.3913181430816212,
((0, 1), (0, 1), (2, 0), (2, 0)): 0.02795519311163234,
((0, 1), (1, 1), (3, 0), (2, 0)): 0.02795519311163234,
((1, 1), (0, 1), (2, 0), (3, 0)): 0.02795519311163234,
((1, 1), (1, 1), (3, 0), (3, 0)): 0.02795519311163234,
((0, 1), (0, 1), (4, 0), (4, 0)): 0.03448849005311543,
((0, 1), (1, 1), (5, 0), (4, 0)): 0.03448849005311543,
((1, 1), (0, 1), (4, 0), (5, 0)): 0.03448849005311543,
((1, 1), (1, 1), (5, 0), (5, 0)): 0.03448849005311543,
((0, 1), (2, 1), (0, 0), (2, 0)): 0.02795519311163234,
((0, 1), (3, 1), (1, 0), (2, 0)): 0.02795519311163234,
((1, 1), (2, 1), (0, 0), (3, 0)): 0.02795519311163234,
((1, 1), (3, 1), (1, 0), (3, 0)): 0.02795519311163234,
((0, 1), (2, 1), (2, 0), (0, 0)): 0.36443868319762635,
((0, 1), (3, 1), (3, 0), (0, 0)): 0.36443868319762635,
((1, 1), (2, 1), (2, 0), (1, 0)): 0.36443868319762635,
((1, 1), (3, 1), (3, 0), (1, 0)): 0.36443868319762635,
((0, 1), (4, 1), (0, 0), (4, 0)): 0.03448849005311544,
((0, 1), (5, 1), (1, 0), (4, 0)): 0.03448849005311544,
((1, 1), (4, 1), (0, 0), (5, 0)): 0.03448849005311544,
((1, 1), (5, 1), (1, 0), (5, 0)): 0.03448849005311544,
((0, 1), (4, 1), (4, 0), (0, 0)): 0.3041058444913643,
((0, 1), (5, 1), (5, 0), (0, 0)): 0.3041058444913643,
((1, 1), (4, 1), (4, 0), (1, 0)): 0.3041058444913643,
((1, 1), (5, 1), (5, 0), (1, 0)): 0.3041058444913643,
((2, 1), (0, 1), (0, 0), (2, 0)): 0.36443868319762635,
((2, 1), (1, 1), (1, 0), (2, 0)): 0.36443868319762635,
((3, 1), (0, 1), (0, 0), (3, 0)): 0.36443868319762635,
((3, 1), (1, 1), (1, 0), (3, 0)): 0.36443868319762635,
((2, 1), (0, 1), (2, 0), (0, 0)): 0.02795519311163234,
((2, 1), (1, 1), (3, 0), (0, 0)): 0.02795519311163234,
((3, 1), (0, 1), (2, 0), (1, 0)): 0.02795519311163234,
((3, 1), (1, 1), (3, 0), (1, 0)): 0.02795519311163234,
((2, 1), (2, 1), (0, 0), (0, 0)): 0.02795519311163234,
((2, 1), (3, 1), (1, 0), (0, 0)): 0.02795519311163234,
((3, 1), (2, 1), (0, 0), (1, 0)): 0.02795519311163234,
((3, 1), (3, 1), (1, 0), (1, 0)): 0.02795519311163234,
((2, 1), (2, 1), (2, 0), (2, 0)): 0.44007954668752236,
((2, 1), (3, 1), (3, 0), (2, 0)): 0.44007954668752236,
((3, 1), (2, 1), (2, 0), (3, 0)): 0.44007954668752236,
((3, 1), (3, 1), (3, 0), (3, 0)): 0.44007954668752236,
((2, 1), (2, 1), (4, 0), (4, 0)): 0.012175350836228654,
((2, 1), (3, 1), (5, 0), (4, 0)): 0.012175350836228654,
((3, 1), (2, 1), (4, 0), (5, 0)): 0.012175350836228654,
((3, 1), (3, 1), (5, 0), (5, 0)): 0.012175350836228654,
((2, 1), (4, 1), (2, 0), (4, 0)): 0.012175350836228654,
((2, 1), (5, 1), (3, 0), (4, 0)): 0.012175350836228654,
((3, 1), (4, 1), (2, 0), (5, 0)): 0.012175350836228654,
((3, 1), (5, 1), (3, 0), (5, 0)): 0.012175350836228654,
((2, 1), (4, 1), (4, 0), (2, 0)): 0.3124924051344332,
((2, 1), (5, 1), (5, 0), (2, 0)): 0.3124924051344332,
((3, 1), (4, 1), (4, 0), (3, 0)): 0.3124924051344332,
((3, 1), (5, 1), (5, 0), (3, 0)): 0.3124924051344332,
((4, 1), (0, 1), (0, 0), (4, 0)): 0.3041058444913643,
((4, 1), (1, 1), (1, 0), (4, 0)): 0.3041058444913643,
((5, 1), (0, 1), (0, 0), (5, 0)): 0.3041058444913643,
((5, 1), (1, 1), (1, 0), (5, 0)): 0.3041058444913643,
((4, 1), (0, 1), (4, 0), (0, 0)): 0.034488490053115425,
((4, 1), (1, 1), (5, 0), (0, 0)): 0.034488490053115425,
((5, 1), (0, 1), (4, 0), (1, 0)): 0.034488490053115425,
((5, 1), (1, 1), (5, 0), (1, 0)): 0.034488490053115425,
((4, 1), (2, 1), (2, 0), (4, 0)): 0.3124924051344332,
((4, 1), (3, 1), (3, 0), (4, 0)): 0.3124924051344332,
((5, 1), (2, 1), (2, 0), (5, 0)): 0.3124924051344332,
((5, 1), (3, 1), (3, 0), (5, 0)): 0.3124924051344332,
((4, 1), (2, 1), (4, 0), (2, 0)): 0.012175350836228654,
((4, 1), (3, 1), (5, 0), (2, 0)): 0.012175350836228654,
((5, 1), (2, 1), (4, 0), (3, 0)): 0.012175350836228654,
((5, 1), (3, 1), (5, 0), (3, 0)): 0.012175350836228654,
((4, 1), (4, 1), (0, 0), (0, 0)): 0.03448849005311543,
((4, 1), (5, 1), (1, 0), (0, 0)): 0.03448849005311543,
((5, 1), (4, 1), (0, 0), (1, 0)): 0.03448849005311543,
((5, 1), (5, 1), (1, 0), (1, 0)): 0.03448849005311543,
((4, 1), (4, 1), (2, 0), (2, 0)): 0.012175350836228654,
((4, 1), (5, 1), (3, 0), (2, 0)): 0.012175350836228654,
((5, 1), (4, 1), (2, 0), (3, 0)): 0.012175350836228654,
((5, 1), (5, 1), (3, 0), (3, 0)): 0.012175350836228654,
((4, 1), (4, 1), (4, 0), (4, 0)): 0.3097576511847101,
((4, 1), (5, 1), (5, 0), (4, 0)): 0.3097576511847101,
((5, 1), (4, 1), (4, 0), (5, 0)): 0.3097576511847101,
((5, 1), (5, 1), (5, 0), (5, 0)): 0.3097576511847101,
}
@pytest.mark.parametrize(
("name", "core", "active", "v_op_exp"),
[
("h2_pyscf", None, None, v_op_1),
("h2_pyscf", [0], None, v_op_2),
("h2_pyscf", None, [0, 1], v_op_1),
("lih", [0], [1, 2], v_op_3),
("h2o_psi4", [0, 1, 2], [3, 4, 6], v_op_4),
],
)
def test_table_two_particle(name, core, active, v_op_exp):
r"""Test the FermionOperator built by the function `two_particle` of the `obs` module"""
hf_data = MolecularData(filename=os.path.join(ref_dir, name))
v_op = qchem.two_particle(hf_data.two_body_integrals, core=core, active=active)
assert v_op.terms == v_op_exp
v_me_1D = np.array([1, 2, 3, 4])
v_me_4D = np.full((2, 2, 2, 2), 0.5)
@pytest.mark.parametrize(
("v_me", "core", "active", "msg_match"),
[
(v_me_1D, [0], None, "'matrix_elements' must be a 4D array"),
(v_me_4D, [-1, 0, 1, 2], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, [0, 1, 2, 3], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, None, [-1, 0], "Indices of active orbitals must be between 0 and"),
(v_me_4D, None, [2, 6], "Indices of active orbitals must be between 0 and"),
],
)
def test_exceptions_two_particle(v_me, core, active, msg_match):
"""Test that the function `'two_particle'` throws an exception
if the dimension of the matrix elements array is not a 4D array or
if the indices of core and/or active orbitals are out of range."""
with pytest.raises(ValueError, match=msg_match):
qchem.two_particle(v_me, core=core, active=active)
| 50.319703 | 92 | 0.472444 |
4a2050364f33480277b8f704e9fac86d472650f3 | 956 | py | Python | services/core-api/app/api/utils/include/user_info.py | bcgov/mds | 6c427a66a5edb4196222607291adef8fd6677038 | [
"Apache-2.0"
] | 25 | 2018-07-09T19:04:37.000Z | 2022-03-15T17:27:10.000Z | services/document-manager/backend/app/utils/include/user_info.py | bcgov/mds | 6c427a66a5edb4196222607291adef8fd6677038 | [
"Apache-2.0"
] | 983 | 2018-04-25T20:08:07.000Z | 2022-03-31T21:45:20.000Z | services/document-manager/backend/app/utils/include/user_info.py | bcgov/mds | 6c427a66a5edb4196222607291adef8fd6677038 | [
"Apache-2.0"
] | 58 | 2018-05-15T22:35:50.000Z | 2021-11-29T19:40:52.000Z | from app.extensions import jwt
from jose import jwt as jwt_jose
VALID_REALM = ['idir']
DUMMY_AUTH_CLAIMS = {
"iss": "test_issuer",
"typ": "Bearer",
"username": "mds",
"preferred_username": "mds",
"email": "test-email",
"given_name": "test-given-name",
"realm_access": {
"roles": []
}
}
class User:
_test_mode = False
def get_user_raw_info(self):
if self._test_mode:
return DUMMY_AUTH_CLAIMS
token = jwt.get_token_auth_header()
return jwt_jose.get_unverified_claims(token)
def get_user_email(self):
raw_info = self.get_user_raw_info()
return raw_info.get('email')
def get_user_username(self):
raw_info = self.get_user_raw_info()
realms = list(set(VALID_REALM) & set(raw_info['realm_access']['roles']))
return realms[0] + '\\' + raw_info['preferred_username'] if realms else raw_info[
'preferred_username']
| 25.837838 | 89 | 0.631799 |
4a2050802be200725063777ec46badc9f28cf3b6 | 165 | py | Python | bin/ominoes/hexominoes-parallelogram-35x6.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | bin/ominoes/hexominoes-parallelogram-35x6.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | bin/ominoes/hexominoes-parallelogram-35x6.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | 1 | 2022-01-02T16:54:14.000Z | 2022-01-02T16:54:14.000Z | #!/usr/bin/env python
# $Id$
"""many solutions"""
import puzzler
from puzzler.puzzles.hexominoes import HexominoesParallelogram35x6 as puzzle
puzzler.run(puzzle)
| 16.5 | 76 | 0.775758 |
4a2050b96329efc6233b9c7195d7e8f69b01d37a | 1,342 | py | Python | commands/developer-utils/gitlab/gitlab-todos.template.py | daviddzhou/script-commands | 393ae1d4ec0a8f3ddf85c0d5ec83a48d6d0268cc | [
"MIT"
] | 3,305 | 2020-09-30T17:38:45.000Z | 2022-03-31T21:09:34.000Z | commands/developer-utils/gitlab/gitlab-todos.template.py | devrrior/script-commands | 90d69de3404d379eacaf0c07ecbffca7096b8db1 | [
"MIT"
] | 500 | 2020-10-01T07:23:54.000Z | 2022-03-31T13:31:00.000Z | commands/developer-utils/gitlab/gitlab-todos.template.py | devrrior/script-commands | 90d69de3404d379eacaf0c07ecbffca7096b8db1 | [
"MIT"
] | 787 | 2020-09-30T20:36:47.000Z | 2022-03-31T20:09:44.000Z | #!/usr/bin/env python3
# How to use this script?
# It's a template which needs further setup. Duplicate the file,
# remove `.template.` from the filename and set an Personal access token as
# well as the GitLab instance url if it is not gitlab.com in gitlabconfig.py
# You need to copy gitlabconfig.py and gitlabhelper.py next to the script command
# otherwise it won't work. gitlabconfig.py and gitlabhelper.py are shared between
# all gitlab script commands.
#
# API: https://docs.gitlab.com/ee/api
# Parameters
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title To-Dos
# @raycast.mode fullOutput
# Optional parameters:
# @raycast.packageName GitLab
# @raycast.icon images/gitlab.png
# Documentation:
# @raycast.author Michael Aigner
# @raycast.authorURL https://github.com/tonka3000
# @raycast.description Show todos from GitLab
# Configuration
# see gitlabconfig.py
# Main program
from gitlabhelper import GitLab
gitlab = GitLab()
data = gitlab.get_call("todos")
print(f"GitLab To-Do List on {gitlab.instance}:\n")
todo_count = len(data)
print(f"To Do {todo_count}")
for todo in data:
project_name = todo.get("project", {}).get("name_with_namespace")
title = todo.get("target", {}).get("title")
web_url = todo.get("target", {}).get("web_url")
print(f"* {title} at {project_name}")
print(f"{web_url}\n") | 28.553191 | 81 | 0.735469 |
4a205381388c96dc0b81d351c8b3305e872ae184 | 1,343 | py | Python | tests/device/test_read_measurement.py | Sensirion/python-i2c-svm4 | f0d408614eedab04042ca683ecda1e723a5908e6 | [
"BSD-3-Clause"
] | null | null | null | tests/device/test_read_measurement.py | Sensirion/python-i2c-svm4 | f0d408614eedab04042ca683ecda1e723a5908e6 | [
"BSD-3-Clause"
] | null | null | null | tests/device/test_read_measurement.py | Sensirion/python-i2c-svm4 | f0d408614eedab04042ca683ecda1e723a5908e6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# (c) Copyright 2021 Sensirion AG, Switzerland
from __future__ import absolute_import, division, print_function
from sensirion_i2c_svm41.response_types import Humidity, \
Temperature, AirQualityVoc, AirQualityNox
import pytest
import time
@pytest.mark.needs_device
def test(device):
"""
Test if read_measured_values() returns the expected values.
"""
device.start_measurement()
time.sleep(1.1)
# check the read values
humidity, temperature, air_quality_voc, air_quality_nox =\
device.read_measured_values()
assert type(humidity) is Humidity
assert type(humidity.ticks) is int
assert type(humidity.percent_rh) is float
assert type(temperature) is Temperature
assert type(temperature.ticks) is int
assert type(temperature.degrees_celsius) is float
assert type(temperature.degrees_fahrenheit) is float
assert type(air_quality_voc) is AirQualityVoc
assert type(air_quality_voc.voc_index) is float
assert type(air_quality_nox) is AirQualityNox
assert type(air_quality_nox.nox_index) is float
# use default formatting for printing output:
print("{}, {}, {}, {}".format(humidity,
temperature,
air_quality_voc,
air_quality_nox))
| 35.342105 | 64 | 0.691735 |
4a20560cdbe337e740fc5664fbd4b686034d2980 | 797 | py | Python | booksapi/booksapi/urls.py | lkry95/micropythonapi | 8900c7ffcdff4e824a10de95354908aff1b060f2 | [
"MIT"
] | null | null | null | booksapi/booksapi/urls.py | lkry95/micropythonapi | 8900c7ffcdff4e824a10de95354908aff1b060f2 | [
"MIT"
] | null | null | null | booksapi/booksapi/urls.py | lkry95/micropythonapi | 8900c7ffcdff4e824a10de95354908aff1b060f2 | [
"MIT"
] | null | null | null | """booksapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('books.urls')),
]
| 33.208333 | 77 | 0.70138 |
4a2056aac112b01a68e643fd4ce8b2f516254768 | 7,043 | py | Python | phy/apps/template/gui.py | GiocomoLab/phy | b97d510b5414f65733e3d50ba205d736d42962cd | [
"BSD-3-Clause"
] | null | null | null | phy/apps/template/gui.py | GiocomoLab/phy | b97d510b5414f65733e3d50ba205d736d42962cd | [
"BSD-3-Clause"
] | null | null | null | phy/apps/template/gui.py | GiocomoLab/phy | b97d510b5414f65733e3d50ba205d736d42962cd | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Template GUI."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
from operator import itemgetter
from pathlib import Path
import numpy as np
from phylib import _add_log_file
from phylib.io.model import TemplateModel, get_template_params, load_model
from phylib.utils import Bunch, connect
from phy.cluster.views import ScatterView
from phy.gui import create_app, run_app
from ..base import WaveformMixin, FeatureMixin, TemplateMixin, TraceMixin, BaseController
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Custom views
#------------------------------------------------------------------------------
class TemplateFeatureView(ScatterView):
"""Scatter view showing the template features."""
pass
#------------------------------------------------------------------------------
# Template Controller
#------------------------------------------------------------------------------
class TemplateController(WaveformMixin, FeatureMixin, TemplateMixin, TraceMixin, BaseController):
"""Controller for the Template GUI.
Constructor
-----------
dir_path : str or Path
Path to the data directory
config_dir : str or Path
Path to the configuration directory
model : Model
Model object, optional (it is automatically created otherwise)
plugins : list
List of plugins to manually activate, optional (the plugins are automatically loaded from
the user configuration directory).
clear_cache : boolean
Whether to clear the cache on startup.
enable_threading : boolean
Whether to enable threading in the views when selecting clusters.
"""
gui_name = 'TemplateGUI'
# Specific views implemented in this class.
_new_views = ('TemplateFeatureView',)
# Classes to load by default, in that order. The view refresh follows the same order
# when the cluster selection changes.
default_views = (
'WaveformView',
'CorrelogramView',
'ISIView',
'FeatureView',
'AmplitudeView',
'FiringRateView',
'TraceView',
'ProbeView',
'TemplateFeatureView',
)
# Internal methods
# -------------------------------------------------------------------------
def _create_model(self, dir_path=None, **kwargs):
return TemplateModel(dir_path=dir_path, **kwargs)
def _set_supervisor(self):
super(TemplateController, self)._set_supervisor()
supervisor = self.supervisor
@connect(sender=supervisor)
def on_attach_gui(sender):
@supervisor.actions.add(shortcut='shift+ctrl+k', set_busy=True)
def split_init(cluster_ids=None):
"""Split a cluster according to the original templates."""
if cluster_ids is None:
cluster_ids = supervisor.selected
s = supervisor.clustering.spikes_in_clusters(cluster_ids)
supervisor.actions.split(s, self.model.spike_templates[s])
self.color_selector = supervisor.color_selector
def _set_similarity_functions(self):
super(TemplateController, self)._set_similarity_functions()
self.similarity_functions['template'] = self.template_similarity
self.similarity = 'template'
def _get_template_features(self, cluster_ids, load_all=False):
"""Get the template features of a pair of clusters."""
if len(cluster_ids) != 2:
return
assert len(cluster_ids) == 2
clu0, clu1 = cluster_ids
s0 = self._get_feature_view_spike_ids(clu0, load_all=load_all)
s1 = self._get_feature_view_spike_ids(clu1, load_all=load_all)
n0 = self.get_template_counts(clu0)
n1 = self.get_template_counts(clu1)
t0 = self.model.get_template_features(s0)
t1 = self.model.get_template_features(s1)
x0 = np.average(t0, weights=n0, axis=1)
y0 = np.average(t0, weights=n1, axis=1)
x1 = np.average(t1, weights=n0, axis=1)
y1 = np.average(t1, weights=n1, axis=1)
return [
Bunch(x=x0, y=y0, spike_ids=s0),
Bunch(x=x1, y=y1, spike_ids=s1),
]
def _set_view_creator(self):
super(TemplateController, self)._set_view_creator()
self.view_creator['TemplateFeatureView'] = self.create_template_feature_view
# Public methods
# -------------------------------------------------------------------------
def get_best_channels(self, cluster_id):
"""Return the best channels of a given cluster."""
template_id = self.get_template_for_cluster(cluster_id)
return self.model.get_template(template_id).channel_ids
def template_similarity(self, cluster_id):
"""Return the list of similar clusters to a given cluster."""
# Templates of the cluster.
temp_i = np.nonzero(self.get_template_counts(cluster_id))[0]
# The similarity of the cluster with each template.
sims = np.max(self.model.similar_templates[temp_i, :], axis=0)
def _sim_ij(cj):
# Templates of the cluster.
if cj < self.model.n_templates:
return float(sims[cj])
temp_j = np.nonzero(self.get_template_counts(cj))[0]
return float(np.max(sims[temp_j]))
out = [(cj, _sim_ij(cj))
for cj in self.supervisor.clustering.cluster_ids]
# NOTE: hard-limit to 100 for performance reasons.
return sorted(out, key=itemgetter(1), reverse=True)[:100]
def get_template_amplitude(self, template_id):
"""Return the maximum amplitude of a template's waveforms across all channels."""
waveforms = self.model.get_template_waveforms(template_id)
assert waveforms.ndim == 2 # shape: (n_samples, n_channels)
return (waveforms.max(axis=0) - waveforms.min(axis=0)).max()
def create_template_feature_view(self):
if self.model.template_features is None:
return
return TemplateFeatureView(coords=self._get_template_features)
#------------------------------------------------------------------------------
# Template commands
#------------------------------------------------------------------------------
def template_gui(params_path, **kwargs): # pragma: no cover
"""Launch the Template GUI."""
# Create a `phy.log` log file with DEBUG level.
_add_log_file(Path(params_path).parent / 'phy.log')
create_app()
controller = TemplateController(**get_template_params(params_path), **kwargs)
gui = controller.create_gui()
gui.show()
run_app()
gui.close()
controller.model.close()
def template_describe(params_path):
"""Describe a template dataset."""
model = load_model(params_path)
model.describe()
model.close()
| 35.215 | 97 | 0.592929 |
4a2057104e3be164c79bc3c79b23a052e5adcebe | 860 | py | Python | backend/student_profile/migrations/0004_instructorloadcoursemajor.py | yamamz/enrollment-system-vue-django | ab4627071a037881db0a968a52a34c5c57f3f698 | [
"MIT"
] | null | null | null | backend/student_profile/migrations/0004_instructorloadcoursemajor.py | yamamz/enrollment-system-vue-django | ab4627071a037881db0a968a52a34c5c57f3f698 | [
"MIT"
] | null | null | null | backend/student_profile/migrations/0004_instructorloadcoursemajor.py | yamamz/enrollment-system-vue-django | ab4627071a037881db0a968a52a34c5c57f3f698 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2020-08-25 10:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('student_profile', '0003_auto_20200817_0835'),
]
operations = [
migrations.CreateModel(
name='InstructorLoadCourseMajor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('instructor_load_subject', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='instructor_load_subject', to='student_profile.InstructorLoadSubject')),
('major', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='student_profile.Major')),
],
),
]
| 37.391304 | 203 | 0.67093 |
4a2058173fbb3bb4dc5c3524932fe6fa962863b9 | 64 | py | Python | fishhook/__main__.py | evpeople/fish-hook | d4746d41b8c3a1d57ffd79907a8fb475579a7b03 | [
"MIT"
] | 33 | 2017-02-21T06:55:38.000Z | 2021-09-07T05:53:14.000Z | fishhook/__main__.py | evpeople/fish-hook | d4746d41b8c3a1d57ffd79907a8fb475579a7b03 | [
"MIT"
] | 36 | 2019-08-22T11:12:08.000Z | 2022-03-31T12:47:26.000Z | fishhook/__main__.py | evpeople/fish-hook | d4746d41b8c3a1d57ffd79907a8fb475579a7b03 | [
"MIT"
] | 2 | 2017-02-21T11:03:52.000Z | 2021-07-02T08:42:33.000Z | from .command import main
if __name__ == '__main__':
main() | 16 | 26 | 0.671875 |
4a205883625f05419f5cba56ecb0527dc45bfcec | 25,433 | py | Python | lib/streamlit/state/session_state.py | arraydude/streamlit-1 | ab7c339623b457c71e3b77e3162c86962ac167d0 | [
"Apache-2.0"
] | 1 | 2022-01-19T10:48:49.000Z | 2022-01-19T10:48:49.000Z | lib/streamlit/state/session_state.py | arraydude/streamlit-1 | ab7c339623b457c71e3b77e3162c86962ac167d0 | [
"Apache-2.0"
] | 52 | 2021-10-04T21:52:48.000Z | 2021-12-29T02:18:44.000Z | lib/streamlit/state/session_state.py | arraydude/streamlit-1 | ab7c339623b457c71e3b77e3162c86962ac167d0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import json
from streamlit.stats import CacheStat, CacheStatsProvider
from streamlit.type_util import Key
from typing import (
TYPE_CHECKING,
Any,
KeysView,
cast,
Dict,
Iterator,
MutableMapping,
Optional,
Union,
Tuple,
Callable,
Set,
List,
)
import attr
from pympler.asizeof import asizeof
import streamlit as st
from streamlit import logger as _logger
from streamlit.errors import StreamlitAPIException
from streamlit.proto.WidgetStates_pb2 import WidgetState as WidgetStateProto
from streamlit.proto.WidgetStates_pb2 import WidgetStates as WidgetStatesProto
if TYPE_CHECKING:
from streamlit.server.server import SessionInfo
logger = _logger.get_logger(__name__)
GENERATED_WIDGET_KEY_PREFIX = "$$GENERATED_WIDGET_KEY"
STREAMLIT_INTERNAL_KEY_PREFIX = "$$STREAMLIT_INTERNAL_KEY"
SCRIPT_RUN_WITHOUT_ERRORS_KEY = (
f"{STREAMLIT_INTERNAL_KEY_PREFIX}_SCRIPT_RUN_WITHOUT_ERRORS"
)
@attr.s(auto_attribs=True, slots=True, frozen=True)
class Serialized:
value: WidgetStateProto
@attr.s(auto_attribs=True, slots=True, frozen=True)
class Value:
value: Any
WState = Union[Serialized, Value]
WidgetArgs = Tuple[Any, ...]
WidgetCallback = Callable[..., None]
# A deserializer receives the value from whatever field is set on the
# WidgetState proto, and returns a regular python value. A serializer
# receives a regular python value, and returns something suitable for
# a value field on WidgetState proto. They should be inverses.
WidgetDeserializer = Callable[[Any, str], Any]
WidgetSerializer = Callable[[Any], Any]
WidgetKwargs = Dict[str, Any]
@attr.s(auto_attribs=True, slots=True, frozen=True)
class WidgetMetadata:
id: str
deserializer: WidgetDeserializer = attr.ib(repr=False)
serializer: WidgetSerializer = attr.ib(repr=False)
value_type: Any
callback: Optional[WidgetCallback] = None
callback_args: Optional[WidgetArgs] = None
callback_kwargs: Optional[WidgetKwargs] = None
@attr.s(auto_attribs=True, slots=True)
class WStates(MutableMapping[str, Any]):
states: Dict[str, WState] = attr.Factory(dict)
widget_metadata: Dict[str, WidgetMetadata] = attr.Factory(dict)
def __getitem__(self, k: str) -> Any:
item = self.states.get(k)
if item is not None:
if isinstance(item, Value):
return item.value
else:
metadata = self.widget_metadata.get(k)
if metadata is None:
# No deserializer, which should only happen if state is
# gotten from a reconnecting browser and the script is
# trying to access it. Pretend it doesn't exist.
raise KeyError(k)
value_type = cast(str, item.value.WhichOneof("value"))
value = item.value.__getattribute__(value_type)
# Array types are messages with data in a `data` field
if value_type in [
"double_array_value",
"int_array_value",
"string_array_value",
]:
value = value.data
elif value_type == "json_value":
value = json.loads(value)
deserialized = metadata.deserializer(value, metadata.id)
# Update metadata to reflect information from WidgetState proto
self.set_widget_metadata(attr.evolve(metadata, value_type=value_type))
self.states[k] = Value(deserialized)
return deserialized
else:
raise KeyError(k)
def __setitem__(self, k: str, v: WState):
self.states[k] = v
def __delitem__(self, k: str) -> None:
del self.states[k]
def __len__(self) -> int:
return len(self.states)
def __iter__(self):
# For this and many other methods, we can't simply delegate to the
# states field, because we need to invoke `__getitem__` for any
# values, to handle deserialization and unwrapping of values.
for key in self.states:
yield key
def keys(self) -> KeysView[str]:
return KeysView(self.states)
def items(self) -> Set[Tuple[str, Any]]: # type: ignore
return {(k, self[k]) for k in self}
def values(self) -> Set[Any]: # type: ignore
return {self[wid] for wid in self}
def update(self, other: "WStates"): # type: ignore
self.states.update(other.states)
self.widget_metadata.update(other.widget_metadata)
def set_widget_from_proto(self, widget_state: WidgetStateProto):
self[widget_state.id] = Serialized(widget_state)
def set_from_value(self, k: str, v: Any):
self[k] = Value(v)
def set_widget_metadata(self, widget_meta: WidgetMetadata):
self.widget_metadata[widget_meta.id] = widget_meta
def cull_nonexistent(self, widget_ids: Set[str]) -> None:
"""Removes items in state that aren't present in a set of provided
widget_ids.
"""
self.states = {k: v for k, v in self.states.items() if k in widget_ids}
def get_serialized(
self, k: str, default: Optional[WidgetStateProto] = None
) -> Optional[WidgetStateProto]:
widget = WidgetStateProto()
widget.id = k
item = self.states.get(k)
if item is not None:
if isinstance(item, Value):
metadata = self.widget_metadata.get(k)
if metadata is None:
return default
else:
field = metadata.value_type
serialized = metadata.serializer(item.value)
if field in (
"double_array_value",
"int_array_value",
"string_array_value",
):
arr = getattr(widget, field)
arr.data.extend(serialized)
elif field == "json_value":
setattr(widget, field, json.dumps(serialized))
elif field == "file_uploader_state_value":
widget.file_uploader_state_value.CopyFrom(serialized)
else:
setattr(widget, field, serialized)
return widget
else:
return item.value
else:
return default
def as_widget_states(self) -> List[WidgetStateProto]:
states = [
self.get_serialized(widget_id)
for widget_id in self.states.keys()
if self.get_serialized(widget_id)
]
states = cast(List[WidgetStateProto], states)
return states
def call_callback(self, widget_id: str) -> None:
metadata = self.widget_metadata.get(widget_id)
assert metadata is not None
callback = metadata.callback
if callback is None:
return
args = metadata.callback_args or ()
kwargs = metadata.callback_kwargs or {}
callback(*args, **kwargs)
def _missing_key_error_message(key: str) -> str:
return (
f'st.session_state has no key "{key}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
)
def _missing_attr_error_message(attr_name: str) -> str:
return (
f'st.session_state has no attribute "{attr_name}". Did you forget to initialize it? '
f"More info: https://docs.streamlit.io/library/advanced-features/session-state#initialization"
)
@attr.s(auto_attribs=True, slots=True)
class SessionState(MutableMapping[str, Any]):
"""SessionState allows users to store values that persist between app
reruns.
SessionState objects are created lazily when a script accesses
st.session_state.
Example
-------
>>> if "num_script_runs" not in st.session_state:
... st.session_state.num_script_runs = 0
>>> st.session_state.num_script_runs += 1
>>> st.write(st.session_state.num_script_runs) # writes 1
The next time your script runs, the value of
st.session_state.num_script_runs will be preserved.
>>> st.session_state.num_script_runs += 1
>>> st.write(st.session_state.num_script_runs) # writes 2
"""
# All the values from previous script runs, squished together to save memory
_old_state: Dict[str, Any] = attr.Factory(dict)
# Values set in session state during the current script run, possibly for
# setting a widget's value. Keyed by a user provided string.
_new_session_state: Dict[str, Any] = attr.Factory(dict)
# Widget values from the frontend, usually one changing prompted the script rerun
_new_widget_state: WStates = attr.Factory(WStates)
# Keys used for widgets will be eagerly converted to the matching widget id
_key_id_mapping: Dict[str, str] = attr.Factory(dict)
# is it possible for a value to get through this without being deserialized?
def compact_state(self) -> None:
for key_or_wid in self:
self._old_state[key_or_wid] = self[key_or_wid]
self._new_session_state.clear()
self._new_widget_state.clear()
def _compact(self) -> "SessionState":
state: SessionState = self.copy()
state.compact_state()
return state
def clear_state(self) -> None:
self._old_state.clear()
self._new_session_state.clear()
self._new_widget_state.clear()
self._key_id_mapping.clear()
def _safe_widget_state(self) -> Dict[str, Any]:
"""Returns widget states for all widgets with deserializers registered.
On a browser tab reconnect, it's possible for widgets in
self._new_widget_state to not have deserializers registered, which will
result in trying to access them raising a KeyError. This results in
things exploding if we try to naively use the splat operator on
self._new_widget_state in _merged_state below.
"""
wstate = {}
for k in self._new_widget_state.keys():
try:
wstate[k] = self._new_widget_state[k]
except KeyError:
pass
return wstate
@property
def _merged_state(self) -> Dict[str, Any]:
return {k: self[k] for k in self}
@property
def filtered_state(self) -> Dict[str, Any]:
"""The combined session and widget state, excluding keyless widgets."""
wid_key_map = self.reverse_key_wid_map
state: Dict[str, Any] = {}
# We can't write `for k, v in self.items()` here because doing so will
# run into a `KeyError` if widget metadata has been cleared (which
# happens when the streamlit server restarted or the cache was cleared),
# then we receive a widget's state from a browser.
for k in self.keys():
if not is_widget_id(k) and not is_internal_key(k):
state[k] = self[k]
elif is_keyed_widget_id(k):
try:
key = wid_key_map[k]
state[key] = self[k]
except KeyError:
# Widget id no longer maps to a key, it is a not yet
# cleared value in old state for a reset widget
pass
return state
@property
def reverse_key_wid_map(self) -> Dict[str, str]:
wid_key_map = {v: k for k, v in self._key_id_mapping.items()}
return wid_key_map
def keys(self) -> Set[str]: # type: ignore
"""All keys active in Session State, with widget keys converted
to widget ids when one is known."""
old_keys = {self._get_widget_id(k) for k in self._old_state.keys()}
new_widget_keys = set(self._new_widget_state.keys())
new_session_state_keys = {
self._get_widget_id(k) for k in self._new_session_state.keys()
}
return old_keys | new_widget_keys | new_session_state_keys
def is_new_state_value(self, user_key: str) -> bool:
return user_key in self._new_session_state
def is_new_widget_value(self, widget_id: str) -> bool:
return widget_id in self._new_widget_state
def __iter__(self) -> Iterator[Any]:
return iter(self.keys())
def __len__(self) -> int:
return len(self.keys())
def __str__(self):
return str(self._merged_state)
def __getitem__(self, key: str) -> Any:
wid_key_map = self.reverse_key_wid_map
widget_id = self._get_widget_id(key)
if widget_id in wid_key_map and widget_id == key:
# the "key" is a raw widget id, so get its associated user key for lookup
key = wid_key_map[widget_id]
try:
return self._getitem(widget_id, key)
except KeyError:
raise KeyError(_missing_key_error_message(key))
def _getitem(self, widget_id: Optional[str], user_key: Optional[str]) -> Any:
"""Get the value of an entry in Session State, using either the
user-provided key or a widget id as appropriate for the internal dict
being accessed.
At least one of the arguments must have a value."""
assert user_key is not None or widget_id is not None
if user_key is not None:
try:
return self._new_session_state[user_key]
except KeyError:
pass
if widget_id is not None:
try:
return self._new_widget_state[widget_id]
except KeyError:
pass
# Typically, there won't be both a widget id and an associated state key in
# old state at the same time, so the order we check is arbitrary.
# The exception is if session state is set and then a later run has
# a widget created, so the widget id entry should be newer.
# The opposite case shouldn't happen, because setting the value of a widget
# through session state will result in the next widget state reflecting that
# value.
if widget_id is not None:
try:
return self._old_state[widget_id]
except KeyError:
pass
if user_key is not None:
try:
return self._old_state[user_key]
except KeyError:
pass
raise KeyError
def __setitem__(self, user_key: str, value: Any) -> None:
from streamlit.script_run_context import get_script_run_ctx
ctx = get_script_run_ctx()
if ctx is not None:
widget_id = self._key_id_mapping.get(user_key, None)
widget_ids = ctx.widget_ids_this_run
form_ids = ctx.form_ids_this_run
if widget_id in widget_ids or user_key in form_ids:
raise StreamlitAPIException(
f"`st.session_state.{user_key}` cannot be modified after the widget"
f" with key `{user_key}` is instantiated."
)
self._new_session_state[user_key] = value
def __delitem__(self, key: str) -> None:
widget_id = self._get_widget_id(key)
if not (key in self or widget_id in self):
raise KeyError(_missing_key_error_message(key))
if key in self._new_session_state:
del self._new_session_state[key]
if key in self._old_state:
del self._old_state[key]
if key in self._key_id_mapping:
del self._key_id_mapping[key]
if widget_id in self._new_widget_state:
del self._new_widget_state[widget_id]
if widget_id in self._old_state:
del self._old_state[widget_id]
def update(self, other: "SessionState"): # type: ignore
self._new_session_state.update(other._new_session_state)
self._new_widget_state.update(other._new_widget_state)
self._old_state.update(other._old_state)
self._key_id_mapping.update(other._key_id_mapping)
def set_widgets_from_proto(self, widget_states: WidgetStatesProto):
for state in widget_states.widgets:
self._new_widget_state.set_widget_from_proto(state)
def call_callbacks(self):
from streamlit.script_runner import RerunException
changed_widget_ids = [
wid for wid in self._new_widget_state if self._widget_changed(wid)
]
for wid in changed_widget_ids:
try:
self._new_widget_state.call_callback(wid)
except RerunException:
st.warning(
"Calling st.experimental_rerun() within a callback is a no-op."
)
def _widget_changed(self, widget_id: str) -> bool:
new_value = self._new_widget_state.get(widget_id)
old_value = self._old_state.get(widget_id)
changed: bool = new_value != old_value
return changed
def reset_triggers(self) -> None:
"""Sets all trigger values in our state dictionary to False."""
for state_id in self._new_widget_state:
metadata = self._new_widget_state.widget_metadata.get(state_id)
if metadata is not None:
if metadata.value_type == "trigger_value":
self._new_widget_state[state_id] = Value(False)
for state_id in self._old_state:
metadata = self._new_widget_state.widget_metadata.get(state_id)
if metadata is not None:
if metadata.value_type == "trigger_value":
self._old_state[state_id] = False
def cull_nonexistent(self, widget_ids: Set[str]):
self._new_widget_state.cull_nonexistent(widget_ids)
# Remove entries from _old_state corresponding to
# widgets not in widget_ids.
self._old_state = {
k: v
for k, v in self._old_state.items()
if (k in widget_ids or not is_widget_id(k))
}
def set_metadata(self, widget_metadata: WidgetMetadata) -> None:
widget_id = widget_metadata.id
self._new_widget_state.widget_metadata[widget_id] = widget_metadata
def maybe_set_new_widget_value(
self, widget_id: str, user_key: Optional[str] = None
) -> None:
"""Add the value of a new widget to session state."""
widget_metadata = self._new_widget_state.widget_metadata[widget_id]
deserializer = widget_metadata.deserializer
initial_widget_value = deepcopy(deserializer(None, widget_metadata.id))
if widget_id not in self and (user_key is None or user_key not in self):
# This is the first time this widget is being registered, so we save
# its value in widget state.
self._new_widget_state.set_from_value(widget_id, initial_widget_value)
def should_set_frontend_state_value(
self, widget_id: str, user_key: Optional[str]
) -> bool:
"""Keep widget_state and session_state in sync when a widget is registered.
This method returns whether the frontend needs to be updated with the
new value of this widget.
"""
if user_key is None:
return False
return self.is_new_state_value(user_key)
def get_value_for_registration(self, widget_id: str) -> Any:
"""Get the value of a widget, for use as its return value.
Returns a copy, so reference types can't be accidentally mutated by user code.
"""
value = self[widget_id]
return deepcopy(value)
def as_widget_states(self) -> List[WidgetStateProto]:
return self._new_widget_state.as_widget_states()
def _get_widget_id(self, k: str) -> str:
"""Turns a value that might be a widget id or a user provided key into
an appropriate widget id.
"""
return self._key_id_mapping.get(k, k)
def set_key_widget_mapping(self, widget_id: str, user_key: str) -> None:
self._key_id_mapping[user_key] = widget_id
def copy(self):
return deepcopy(self)
def set_keyed_widget(
self, metadata: WidgetMetadata, widget_id: str, user_key: str
) -> None:
self.set_metadata(metadata)
self.set_key_widget_mapping(widget_id, user_key)
self.maybe_set_new_widget_value(widget_id, user_key)
def set_unkeyed_widget(self, metadata: WidgetMetadata, widget_id: str) -> None:
self.set_metadata(metadata)
self.maybe_set_new_widget_value(widget_id)
def get_metadata_by_key(self, user_key: str) -> WidgetMetadata:
widget_id = self._key_id_mapping[user_key]
return self._new_widget_state.widget_metadata[widget_id]
def get_stats(self) -> List[CacheStat]:
stat = CacheStat("st_session_state", "", asizeof(self))
return [stat]
def is_widget_id(key: str) -> bool:
return key.startswith(GENERATED_WIDGET_KEY_PREFIX)
# TODO: It would be better to make key vs not visible through more principled means
def is_keyed_widget_id(key: str) -> bool:
return is_widget_id(key) and not key.endswith("-None")
def is_internal_key(key: str) -> bool:
return key.startswith(STREAMLIT_INTERNAL_KEY_PREFIX)
_state_use_warning_already_displayed = False
def get_session_state() -> SessionState:
"""Get the SessionState object for the current session.
Note that in streamlit scripts, this function should not be called
directly. Instead, SessionState objects should be accessed via
st.session_state.
"""
global _state_use_warning_already_displayed
from streamlit.script_run_context import get_script_run_ctx
ctx = get_script_run_ctx()
# If there is no report context because the script is run bare, have
# session state act as an always empty dictionary, and print a warning.
if ctx is None:
if not _state_use_warning_already_displayed:
_state_use_warning_already_displayed = True
if not st._is_running_with_streamlit:
logger.warning(
"Session state does not function when running a script without `streamlit run`"
)
return SessionState()
return ctx.session_state
class LazySessionState(MutableMapping[str, Any]):
"""A lazy wrapper around SessionState.
SessionState can't be instantiated normally in lib/streamlit/__init__.py
because there may not be a AppSession yet. Instead we have this wrapper,
which delegates to the SessionState for the active AppSession. This will
only be interacted within an app script, that is, when a AppSession is
guaranteed to exist.
"""
def _validate_key(self, key) -> None:
if key.startswith(GENERATED_WIDGET_KEY_PREFIX):
raise StreamlitAPIException(
f"Keys beginning with {GENERATED_WIDGET_KEY_PREFIX} are reserved."
)
def __iter__(self) -> Iterator[Any]:
state = get_session_state()
return iter(state.filtered_state)
def __len__(self) -> int:
state = get_session_state()
return len(state.filtered_state)
def __str__(self):
state = get_session_state()
return str(state.filtered_state)
def __getitem__(self, key: Key) -> Any:
key = str(key)
self._validate_key(key)
state = get_session_state()
return state[key]
def __setitem__(self, key: Key, value: Any) -> None:
key = str(key)
self._validate_key(key)
state = get_session_state()
state[key] = value
def __delitem__(self, key: Key) -> None:
key = str(key)
self._validate_key(key)
state = get_session_state()
del state[key]
def __getattr__(self, key: str) -> Any:
self._validate_key(key)
try:
return self[key]
except KeyError:
raise AttributeError(_missing_attr_error_message(key))
def __setattr__(self, key: str, value: Any) -> None:
self._validate_key(key)
self[key] = value
def __delattr__(self, key: str) -> None:
self._validate_key(key)
try:
del self[key]
except KeyError:
raise AttributeError(_missing_attr_error_message(key))
def to_dict(self) -> Dict[str, Any]:
state = get_session_state()
return state.filtered_state
@attr.s(auto_attribs=True, slots=True)
class SessionStateStatProvider(CacheStatsProvider):
_session_info_by_id: Dict[str, "SessionInfo"]
def get_stats(self) -> List[CacheStat]:
stats: List[CacheStat] = []
for session_info in self._session_info_by_id.values():
session_state = session_info.session.session_state
stats.extend(session_state.get_stats())
return stats
| 35.620448 | 102 | 0.644202 |
4a2058dbb817841def23862858681b41a65d6a34 | 3,086 | py | Python | torchelastic/rendezvous/test/rendezvous/api_test.py | yifuwang/elastic | 250667dc77fd95a0dd60d144fb452f896d95682b | [
"BSD-3-Clause"
] | null | null | null | torchelastic/rendezvous/test/rendezvous/api_test.py | yifuwang/elastic | 250667dc77fd95a0dd60d144fb452f896d95682b | [
"BSD-3-Clause"
] | null | null | null | torchelastic/rendezvous/test/rendezvous/api_test.py | yifuwang/elastic | 250667dc77fd95a0dd60d144fb452f896d95682b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Tuple
from torchelastic.rendezvous import (
RendezvousHandler,
RendezvousHandlerFactory,
RendezvousParameters,
)
def create_mock_rdzv_handler(ignored: RendezvousParameters) -> RendezvousHandler:
return MockRendezvousHandler()
class MockRendezvousHandler(RendezvousHandler):
def next_rendezvous(
self,
# pyre-ignore[11]: Annotation `Store` is not defined as a type.
# pyre-ignore[10]: Name `torch` is used but not defined.
) -> Tuple["torch.distributed.Store", int, int]: # noqa F821
raise NotImplementedError()
def is_closed(self) -> bool:
return False
def set_closed(self):
pass
def num_nodes_waiting(self) -> int:
return -1
def get_run_id(self) -> str:
return ""
class RendezvousHandlerFactoryTest(unittest.TestCase):
def test_double_registration(self):
factory = RendezvousHandlerFactory()
factory.register("mock", create_mock_rdzv_handler)
with self.assertRaises(ValueError):
factory.register("mock", create_mock_rdzv_handler)
def test_no_factory_method_found(self):
factory = RendezvousHandlerFactory()
rdzv_params = RendezvousParameters(
backend="mock", endpoint="", run_id="foobar", min_nodes=1, max_nodes=2
)
with self.assertRaises(ValueError):
factory.create_rdzv_handler(rdzv_params)
def test_create_rdzv_handler(self):
rdzv_params = RendezvousParameters(
backend="mock", endpoint="", run_id="foobar", min_nodes=1, max_nodes=2
)
factory = RendezvousHandlerFactory()
factory.register("mock", create_mock_rdzv_handler)
mock_rdzv_handler = factory.create_rdzv_handler(rdzv_params)
self.assertTrue(isinstance(mock_rdzv_handler, MockRendezvousHandler))
class RendezvousParametersTest(unittest.TestCase):
def test_get_or_default(self):
params = RendezvousParameters(
backend="foobar",
endpoint="localhost",
run_id="1234",
min_nodes=1,
max_nodes=1,
timeout1=None,
timeout2=10,
)
self.assertEqual(30, params.get("timeout1", 30))
self.assertEqual(10, params.get("timeout2", 20))
self.assertEqual(60, params.get("timeout3", 60))
def test_get(self):
params = RendezvousParameters(
backend="foobar",
endpoint="localhost",
run_id="1234",
min_nodes=1,
max_nodes=1,
timeout1=None,
timeout2=10,
)
with self.assertRaises(KeyError):
params.get("timeout3")
with self.assertRaises(KeyError):
params.get("timeout1")
self.assertEqual(10, params.get("timeout2"))
| 29.673077 | 82 | 0.649384 |
4a205a403cb396b7e6c9089fdd832a7771d25773 | 58,209 | bzl | Python | tensorflow/workspace.bzl | anonymous-313/tensorflow | b82785818b6b020d62340eaaece32b9c75858185 | [
"Apache-2.0"
] | null | null | null | tensorflow/workspace.bzl | anonymous-313/tensorflow | b82785818b6b020d62340eaaece32b9c75858185 | [
"Apache-2.0"
] | null | null | null | tensorflow/workspace.bzl | anonymous-313/tensorflow | b82785818b6b020d62340eaaece32b9c75858185 | [
"Apache-2.0"
] | null | null | null | # TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/gpus:rocm_configure.bzl", "rocm_configure")
load("//third_party/tensorrt:tensorrt_configure.bzl", "tensorrt_configure")
load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
load("//third_party/git:git_configure.bzl", "git_configure")
load("//third_party/py:python_configure.bzl", "python_configure")
load("//third_party/systemlibs:syslibs_configure.bzl", "syslibs_configure")
load("//third_party/toolchains/remote:configure.bzl", "remote_execution_configure")
load("//third_party/toolchains/clang6:repo.bzl", "clang6_configure")
load("//third_party/toolchains/cpus/arm:arm_compiler_configure.bzl", "arm_compiler_configure")
load("//third_party/toolchains/embedded/arm-linux:arm_linux_toolchain_configure.bzl", "arm_linux_toolchain_configure")
load("//third_party:repo.bzl", "tf_http_archive")
load("//third_party/clang_toolchain:cc_configure_clang.bzl", "cc_download_clang_toolchain")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
load("@io_bazel_rules_closure//closure:defs.bzl", "filegroup_external")
load(
"//tensorflow/tools/def_file_filter:def_file_filter_configure.bzl",
"def_file_filter_configure",
)
load("//third_party/FP16:workspace.bzl", FP16 = "repo")
load("//third_party/aws:workspace.bzl", aws = "repo")
load("//third_party/clog:workspace.bzl", clog = "repo")
load("//third_party/cpuinfo:workspace.bzl", cpuinfo = "repo")
load("//third_party/dlpack:workspace.bzl", dlpack = "repo")
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
load("//third_party/hexagon:workspace.bzl", hexagon_nn = "repo")
load("//third_party/highwayhash:workspace.bzl", highwayhash = "repo")
load("//third_party/hwloc:workspace.bzl", hwloc = "repo")
load("//third_party/icu:workspace.bzl", icu = "repo")
load("//third_party/jpeg:workspace.bzl", jpeg = "repo")
load("//third_party/nasm:workspace.bzl", nasm = "repo")
load("//third_party/opencl_headers:workspace.bzl", opencl_headers = "repo")
load("//third_party/kissfft:workspace.bzl", kissfft = "repo")
load("//third_party/pasta:workspace.bzl", pasta = "repo")
load("//third_party/psimd:workspace.bzl", psimd = "repo")
load("//third_party/ruy:workspace.bzl", ruy = "repo")
load("//third_party/sobol_data:workspace.bzl", sobol_data = "repo")
load("//third_party/vulkan_headers:workspace.bzl", vulkan_headers = "repo")
load("//third_party/toolchains/remote_config:configs.bzl", "initialize_rbe_configs")
def initialize_third_party():
""" Load third party repositories. See above load() statements. """
FP16()
aws()
clog()
cpuinfo()
dlpack()
flatbuffers()
hexagon_nn()
highwayhash()
hwloc()
icu()
kissfft()
jpeg()
nasm()
opencl_headers()
pasta()
psimd()
sobol_data()
vulkan_headers()
ruy()
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix and tf_repo_name are no longer used.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
tf_repositories(path_prefix, tf_repo_name)
tf_bind()
# Toolchains & platforms required by Tensorflow to build.
def tf_toolchains():
native.register_execution_platforms("@local_execution_config_platform//:platform")
native.register_toolchains("@local_execution_config_python//:py_toolchain")
# Define all external repositories required by TensorFlow
def tf_repositories(path_prefix = "", tf_repo_name = ""):
"""All external dependencies for TF builds."""
# Initialize toolchains and platforms.
tf_toolchains()
# Loads all external repos to configure RBE builds.
initialize_rbe_configs()
# Note that we check the minimum bazel version in WORKSPACE.
clang6_configure(name = "local_config_clang6")
cc_download_clang_toolchain(name = "local_config_download_clang")
cuda_configure(name = "local_config_cuda")
tensorrt_configure(name = "local_config_tensorrt")
nccl_configure(name = "local_config_nccl")
git_configure(name = "local_config_git")
syslibs_configure(name = "local_config_syslibs")
python_configure(name = "local_config_python")
rocm_configure(name = "local_config_rocm")
remote_execution_configure(name = "local_config_remote_execution")
initialize_third_party()
# For windows bazel build
# TODO: Remove def file filter when TensorFlow can export symbols properly on Windows.
def_file_filter_configure(name = "local_config_def_file_filter")
# Point //external/local_config_arm_compiler to //external/arm_compiler
arm_compiler_configure(
name = "local_config_arm_compiler",
build_file = clean_dep("//third_party/toolchains/cpus/arm:BUILD"),
remote_config_repo_arm = "../arm_compiler",
remote_config_repo_aarch64 = "../aarch64_compiler",
)
# TFLite crossbuild toolchain for embeddeds Linux
arm_linux_toolchain_configure(
name = "local_config_embedded_arm",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:BUILD"),
aarch64_repo = "../aarch64_linux_toolchain",
armhf_repo = "../armhf_linux_toolchain",
)
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used " +
"and will be removed in the future.")
# To update any of the dependencies bellow:
# a) update URL and strip_prefix to the new git commit hash
# b) get the sha256 hash of the commit by running:
# curl -L <url> | sha256sum
# and update the sha256 with the result.
tf_http_archive(
name = "XNNPACK",
sha256 = "59ccf0c1c64899b511f8872a278e54c293970f57933b056492a364aa5ac709ec",
strip_prefix = "XNNPACK-094e692629d57ddb932fcc993193626f60daa61b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/094e692629d57ddb932fcc993193626f60daa61b.zip",
"https://github.com/google/XNNPACK/archive/094e692629d57ddb932fcc993193626f60daa61b.zip",
],
)
tf_http_archive(
name = "FXdiv",
sha256 = "3d7b0e9c4c658a84376a1086126be02f9b7f753caa95e009d9ac38d11da444db",
strip_prefix = "FXdiv-63058eff77e11aa15bf531df5dd34395ec3017c8",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip",
"https://github.com/Maratyszcza/FXdiv/archive/63058eff77e11aa15bf531df5dd34395ec3017c8.zip",
],
)
tf_http_archive(
name = "pthreadpool",
sha256 = "e576de3e2504018462a3ee2282c99c2d0d708f01d17cd2f71f9f1fe6d3ba8b9b",
strip_prefix = "pthreadpool-77f9d3bcfabd1bdb910dd33b549d5290b968ef05",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/Maratyszcza/pthreadpool/archive/77f9d3bcfabd1bdb910dd33b549d5290b968ef05.zip",
"https://github.com/Maratyszcza/pthreadpool/archive/77f9d3bcfabd1bdb910dd33b549d5290b968ef05.zip",
],
)
tf_http_archive(
name = "mkl_dnn",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn.BUILD"),
sha256 = "a0211aeb5e7dad50b97fa5dffc1a2fe2fe732572d4164e1ee8750a2ede43fbec",
strip_prefix = "oneDNN-0.21.3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v0.21.3.tar.gz",
],
)
tf_http_archive(
name = "mkl_dnn_v1",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn_v1.BUILD"),
sha256 = "5369f7b2f0b52b40890da50c0632c3a5d1082d98325d0f2bff125d19d0dcaa1d",
strip_prefix = "oneDNN-1.6.4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v1.6.4.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v1.6.4.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),
# TODO: Remove the patch when https://github.com/abseil/abseil-cpp/issues/326 is resolved
# and when TensorFlow is build against CUDA 10.2
patch_file = clean_dep("//third_party:com_google_absl_fix_mac_and_nvcc_build.patch"),
sha256 = "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a", # SHARED_ABSL_SHA
strip_prefix = "abseil-cpp-df3ea785d8c30a9503321a3d35ee7d35808f190d",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
],
)
tf_http_archive(
name = "eigen_archive",
build_file = clean_dep("//third_party:eigen.BUILD"),
patch_file = clean_dep("//third_party/eigen3:gpu_packet_math.patch"),
sha256 = "768b744d98505db4d73562b7813ee1e102dd185cf79a7ef1d5dbcc6e7e918eaf", # SHARED_EIGEN_SHA
strip_prefix = "eigen-352f1422d3ceea19a04cab297c6339e0870e1c6c",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/gitlab.com/libeigen/eigen/-/archive/352f1422d3ceea19a04cab297c6339e0870e1c6c/eigen-352f1422d3ceea19a04cab297c6339e0870e1c6c.tar.gz",
"https://gitlab.com/libeigen/eigen/-/archive/352f1422d3ceea19a04cab297c6339e0870e1c6c/eigen-352f1422d3ceea19a04cab297c6339e0870e1c6c.tar.gz",
],
)
tf_http_archive(
name = "arm_compiler",
build_file = clean_dep("//:arm_compiler.BUILD"),
sha256 = "b9e7d50ffd9996ed18900d041d362c99473b382c0ae049b2fce3290632d2656f",
strip_prefix = "rpi-newer-crosstools-eb68350c5c8ec1663b7fe52c742ac4271e3217c5/x64-gcc-6.5.0/arm-rpi-linux-gnueabihf/",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
"https://github.com/rvagg/rpi-newer-crosstools/archive/eb68350c5c8ec1663b7fe52c742ac4271e3217c5.tar.gz",
],
)
tf_http_archive(
# This is the latest `aarch64-none-linux-gnu` compiler provided by ARM
# See https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-a/downloads
# The archive contains GCC version 9.2.1
name = "aarch64_compiler",
build_file = "//:arm_compiler.BUILD",
sha256 = "8dfe681531f0bd04fb9c53cf3c0a3368c616aa85d48938eebe2b516376e06a66",
strip_prefix = "gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/9.2-2019.12/binrel/gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "aarch64_linux_toolchain",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:aarch64-linux-toolchain.BUILD"),
sha256 = "8ce3e7688a47d8cd2d8e8323f147104ae1c8139520eca50ccf8a7fa933002731",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-aarch64-linux-gnu.tar.xz",
],
)
tf_http_archive(
name = "armhf_linux_toolchain",
build_file = clean_dep("//third_party/toolchains/embedded/arm-linux:armhf-linux-toolchain.BUILD"),
sha256 = "d4f6480ecaa99e977e3833cc8a8e1263f9eecd1ce2d022bb548a24c4f32670f5",
strip_prefix = "gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/developer.arm.com/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
"https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz",
],
)
tf_http_archive(
name = "libxsmm_archive",
build_file = clean_dep("//third_party:libxsmm.BUILD"),
sha256 = "9c0af4509ea341d1ee2c6c19fc6f19289318c3bd4b17844efeb9e7f9691abf76",
strip_prefix = "libxsmm-1.14",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/hfp/libxsmm/archive/1.14.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.14.tar.gz",
],
)
tf_http_archive(
name = "com_googlesource_code_re2",
sha256 = "d070e2ffc5476c496a6a872a6f246bfddce8e7797d6ba605a7c8d72866743bf9",
strip_prefix = "re2-506cfa4bffd060c06ec338ce50ea3468daa6c814",
system_build_file = clean_dep("//third_party/systemlibs:re2.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
"https://github.com/google/re2/archive/506cfa4bffd060c06ec338ce50ea3468daa6c814.tar.gz",
],
)
tf_http_archive(
name = "com_github_google_crc32c",
sha256 = "6b3b1d861bb8307658b2407bc7a4c59e566855ef5368a60b35c893551e4788e9",
build_file = "@com_github_googlecloudplatform_google_cloud_cpp//bazel:crc32c.BUILD",
strip_prefix = "crc32c-1.0.6",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/crc32c/archive/1.0.6.tar.gz",
"https://github.com/google/crc32c/archive/1.0.6.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_google_cloud_cpp",
sha256 = "ff82045b9491f0d880fc8e5c83fd9542eafb156dcac9ff8c6209ced66ed2a7f0",
strip_prefix = "google-cloud-cpp-1.17.1",
repo_mapping = {
"@com_github_curl_curl": "@curl",
"@com_github_nlohmann_json": "@nlohmann_json_lib",
},
system_build_file = clean_dep("//third_party/systemlibs:google_cloud_cpp.BUILD"),
system_link_files = {
"//third_party/systemlibs:google_cloud_cpp.google.cloud.bigtable.BUILD": "google/cloud/bigtable/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
"https://github.com/googleapis/google-cloud-cpp/archive/v1.17.1.tar.gz",
],
)
tf_http_archive(
name = "com_github_googlecloudplatform_tensorflow_gcp_tools",
sha256 = "5e9ebe17eaa2895eb7f77fefbf52deeda7c4b63f5a616916b823eb74f3a0c542",
strip_prefix = "tensorflow-gcp-tools-2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
"https://github.com/GoogleCloudPlatform/tensorflow-gcp-tools/archive/2643d8caeba6ca2a6a0b46bb123953cb95b7e7d5.tar.gz",
],
)
tf_http_archive(
name = "com_google_googleapis",
build_file = clean_dep("//third_party/googleapis:googleapis.BUILD"),
sha256 = "7ebab01b06c555f4b6514453dc3e1667f810ef91d1d4d2d3aa29bb9fcb40a900",
strip_prefix = "googleapis-541b1ded4abadcc38e8178680b0677f65594ea6f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
"https://github.com/googleapis/googleapis/archive/541b1ded4abadcc38e8178680b0677f65594ea6f.zip",
],
)
tf_http_archive(
name = "gemmlowp",
sha256 = "43146e6f56cb5218a8caaab6b5d1601a083f1f31c06ff474a4378a7d35be9cfb", # SHARED_GEMMLOWP_SHA
strip_prefix = "gemmlowp-fda83bdc38b118cc6b56753bd540caa49e570745",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip",
"https://github.com/google/gemmlowp/archive/fda83bdc38b118cc6b56753bd540caa49e570745.zip",
],
)
tf_http_archive(
name = "farmhash_archive",
build_file = clean_dep("//third_party:farmhash.BUILD"),
sha256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0", # SHARED_FARMHASH_SHA
strip_prefix = "farmhash-816a4ae622e964763ca0862d9dbd19324a1eaf45",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
"https://github.com/google/farmhash/archive/816a4ae622e964763ca0862d9dbd19324a1eaf45.tar.gz",
],
)
tf_http_archive(
name = "png",
build_file = clean_dep("//third_party:png.BUILD"),
patch_file = clean_dep("//third_party:png_fix_rpi.patch"),
sha256 = "ca74a0dace179a8422187671aee97dd3892b53e168627145271cad5b5ac81307",
strip_prefix = "libpng-1.6.37",
system_build_file = clean_dep("//third_party/systemlibs:png.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
"https://github.com/glennrp/libpng/archive/v1.6.37.tar.gz",
],
)
tf_http_archive(
name = "org_sqlite",
build_file = clean_dep("//third_party:sqlite.BUILD"),
sha256 = "8ff0b79fd9118af7a760f1f6a98cac3e69daed325c8f9f0a581ecb62f797fd64",
strip_prefix = "sqlite-amalgamation-3340000",
system_build_file = clean_dep("//third_party/systemlibs:sqlite.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.sqlite.org/2020/sqlite-amalgamation-3340000.zip",
"https://www.sqlite.org/2020/sqlite-amalgamation-3340000.zip",
],
)
tf_http_archive(
name = "gif",
build_file = clean_dep("//third_party:gif.BUILD"),
patch_file = clean_dep("//third_party:gif_fix_strtok_r.patch"),
sha256 = "31da5562f44c5f15d63340a09a4fd62b48c45620cd302f77a6d9acf0077879bd",
strip_prefix = "giflib-5.2.1",
system_build_file = clean_dep("//third_party/systemlibs:gif.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
"https://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.2.1.tar.gz",
],
)
tf_http_archive(
name = "six_archive",
build_file = clean_dep("//third_party:six.BUILD"),
sha256 = "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259",
strip_prefix = "six-1.15.0",
system_build_file = clean_dep("//third_party/systemlibs:six.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
"https://pypi.python.org/packages/source/s/six/six-1.15.0.tar.gz",
],
)
tf_http_archive(
name = "astor_archive",
build_file = clean_dep("//third_party:astor.BUILD"),
sha256 = "95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d",
strip_prefix = "astor-0.7.1",
system_build_file = clean_dep("//third_party/systemlibs:astor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
"https://pypi.python.org/packages/99/80/f9482277c919d28bebd85813c0a70117214149a96b08981b72b63240b84c/astor-0.7.1.tar.gz",
],
)
tf_http_archive(
name = "astunparse_archive",
build_file = clean_dep("//third_party:astunparse.BUILD"),
sha256 = "5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872",
strip_prefix = "astunparse-1.6.3/lib",
system_build_file = clean_dep("//third_party/systemlibs:astunparse.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
"https://files.pythonhosted.org/packages/f3/af/4182184d3c338792894f34a62672919db7ca008c89abee9b564dd34d8029/astunparse-1.6.3.tar.gz",
],
)
filegroup_external(
name = "astunparse_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"92fc0e4f4fa9460558eedf3412b988d433a2dcbb3a9c45402a145a4fab8a6ac6": [
"https://storage.googleapis.com/mirror.tensorflow.org/raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
"https://raw.githubusercontent.com/simonpercivall/astunparse/v1.6.2/LICENSE",
],
},
)
tf_http_archive(
name = "functools32_archive",
build_file = clean_dep("//third_party:functools32.BUILD"),
sha256 = "f6253dfbe0538ad2e387bd8fdfd9293c925d63553f5813c4e587745416501e6d",
strip_prefix = "functools32-3.2.3-2",
system_build_file = clean_dep("//third_party/systemlibs:functools32.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
"https://pypi.python.org/packages/c5/60/6ac26ad05857c601308d8fb9e87fa36d0ebf889423f47c3502ef034365db/functools32-3.2.3-2.tar.gz",
],
)
tf_http_archive(
name = "gast_archive",
build_file = clean_dep("//third_party:gast.BUILD"),
sha256 = "40feb7b8b8434785585ab224d1568b857edb18297e5a3047f1ba012bc83b42c1",
strip_prefix = "gast-0.4.0",
system_build_file = clean_dep("//third_party/systemlibs:gast.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/12/59/eaa15ab9710a20e22225efd042cd2d6a0b559a0656d5baba9641a2a4a921/gast-0.4.0.tar.gz",
"https://files.pythonhosted.org/packages/83/4a/07c7e59cef23fb147454663c3271c21da68ba2ab141427c20548ae5a8a4d/gast-0.4.0.tar.gz",
],
)
tf_http_archive(
name = "termcolor_archive",
build_file = clean_dep("//third_party:termcolor.BUILD"),
sha256 = "1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b",
strip_prefix = "termcolor-1.1.0",
system_build_file = clean_dep("//third_party/systemlibs:termcolor.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
"https://pypi.python.org/packages/8a/48/a76be51647d0eb9f10e2a4511bf3ffb8cc1e6b14e9e4fab46173aa79f981/termcolor-1.1.0.tar.gz",
],
)
tf_http_archive(
name = "typing_extensions_archive",
build_file = clean_dep("//third_party:typing_extensions.BUILD"),
sha256 = "79ee589a3caca649a9bfd2a8de4709837400dfa00b6cc81962a1e6a1815969ae",
strip_prefix = "typing_extensions-3.7.4.2/src_py3",
system_build_file = clean_dep("//third_party/systemlibs:typing_extensions.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
"https://files.pythonhosted.org/packages/6a/28/d32852f2af6b5ead85d396249d5bdf450833f3a69896d76eb480d9c5e406/typing_extensions-3.7.4.2.tar.gz",
],
)
filegroup_external(
name = "typing_extensions_license",
licenses = ["notice"], # PSFL
sha256_urls = {
"ff17ce94e102024deb68773eb1cc74ca76da4e658f373531f0ac22d68a6bb1ad": [
"http://mirror.tensorflow.org/raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
"https://raw.githubusercontent.com/python/typing/master/typing_extensions/LICENSE",
],
},
)
tf_http_archive(
name = "opt_einsum_archive",
build_file = clean_dep("//third_party:opt_einsum.BUILD"),
sha256 = "d3d464b4da7ef09e444c30e4003a27def37f85ff10ff2671e5f7d7813adac35b",
strip_prefix = "opt_einsum-2.3.2",
system_build_file = clean_dep("//third_party/systemlibs:opt_einsum.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
"https://pypi.python.org/packages/f6/d6/44792ec668bcda7d91913c75237314e688f70415ab2acd7172c845f0b24f/opt_einsum-2.3.2.tar.gz",
],
)
tf_http_archive(
name = "absl_py",
sha256 = "603febc9b95a8f2979a7bdb77d2f5e4d9b30d4e0d59579f88eba67d4e4cc5462",
strip_prefix = "abseil-py-pypi-v0.9.0",
system_build_file = clean_dep("//third_party/systemlibs:absl_py.BUILD"),
system_link_files = {
"//third_party/systemlibs:absl_py.absl.BUILD": "absl/BUILD",
"//third_party/systemlibs:absl_py.absl.flags.BUILD": "absl/flags/BUILD",
"//third_party/systemlibs:absl_py.absl.testing.BUILD": "absl/testing/BUILD",
"//third_party/systemlibs:absl_py.absl.logging.BUILD": "absl/logging/BUILD",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
"https://github.com/abseil/abseil-py/archive/pypi-v0.9.0.tar.gz",
],
)
tf_http_archive(
name = "enum34_archive",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
"https://pypi.python.org/packages/bf/3e/31d502c25302814a7c2f1d3959d2a3b3f78e509002ba91aea64993936876/enum34-1.1.6.tar.gz",
],
sha256 = "8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1",
build_file = clean_dep("//third_party:enum34.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:enum34.BUILD"),
strip_prefix = "enum34-1.1.6/enum",
)
tf_http_archive(
name = "org_python_pypi_backports_weakref",
build_file = clean_dep("//third_party:backports_weakref.BUILD"),
sha256 = "8813bf712a66b3d8b85dc289e1104ed220f1878cf981e2fe756dfaabe9a82892",
strip_prefix = "backports.weakref-1.0rc1/src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
"https://pypi.python.org/packages/bc/cc/3cdb0a02e7e96f6c70bd971bc8a90b8463fda83e264fa9c5c1c98ceabd81/backports.weakref-1.0rc1.tar.gz",
],
)
tf_http_archive(
name = "dill_archive",
build_file = clean_dep("//third_party:dill.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:dill.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip",
"https://files.pythonhosted.org/packages/e2/96/518a8ea959a734b70d2e95fef98bcbfdc7adad1c1e5f5dd9148c835205a5/dill-0.3.2.zip",
],
sha256 = "6e12da0d8e49c220e8d6e97ee8882002e624f1160289ce85ec2cc0a5246b3a2e",
strip_prefix = "dill-0.3.2",
)
tf_http_archive(
name = "tblib_archive",
build_file = clean_dep("//third_party:tblib.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:tblib.BUILD"),
urls = [
"http://mirror.tensorflow.org/files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz",
"https://files.pythonhosted.org/packages/d3/41/901ef2e81d7b1e834b9870d416cb09479e175a2be1c4aa1a9dcd0a555293/tblib-1.7.0.tar.gz",
],
sha256 = "059bd77306ea7b419d4f76016aef6d7027cc8a0785579b5aad198803435f882c",
strip_prefix = "tblib-1.7.0",
)
filegroup_external(
name = "org_python_license",
licenses = ["notice"], # Python 2.0
sha256_urls = {
"e76cacdf0bdd265ff074ccca03671c33126f597f39d0ed97bc3e5673d9170cf6": [
"https://storage.googleapis.com/mirror.tensorflow.org/docs.python.org/2.7/_sources/license.rst.txt",
"https://docs.python.org/2.7/_sources/license.rst.txt",
],
},
)
tf_http_archive(
name = "com_google_protobuf",
patch_file = clean_dep("//third_party/protobuf:protobuf.patch"),
sha256 = "cfcba2df10feec52a84208693937c17a4b5df7775e1635c1e3baffc487b24c9b",
strip_prefix = "protobuf-3.9.2",
system_build_file = clean_dep("//third_party/systemlibs:protobuf.BUILD"),
system_link_files = {
"//third_party/systemlibs:protobuf.bzl": "protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
"https://github.com/protocolbuffers/protobuf/archive/v3.9.2.zip",
],
)
tf_http_archive(
name = "nsync",
sha256 = "caf32e6b3d478b78cff6c2ba009c3400f8251f646804bcb65465666a9cea93c4",
strip_prefix = "nsync-1.22.0",
system_build_file = clean_dep("//third_party/systemlibs:nsync.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/nsync/archive/1.22.0.tar.gz",
"https://github.com/google/nsync/archive/1.22.0.tar.gz",
],
)
tf_http_archive(
name = "com_google_googletest",
sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86",
strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
"https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip",
],
)
tf_http_archive(
name = "com_github_gflags_gflags",
sha256 = "ae27cdbcd6a2f935baa78e4f21f675649271634c092b1be01469440495609d0e",
strip_prefix = "gflags-2.2.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/gflags/gflags/archive/v2.2.1.tar.gz",
"https://github.com/gflags/gflags/archive/v2.2.1.tar.gz",
],
)
tf_http_archive(
name = "pcre",
build_file = clean_dep("//third_party:pcre.BUILD"),
sha256 = "aecafd4af3bd0f3935721af77b889d9024b2e01d96b58471bd91a3063fb47728",
strip_prefix = "pcre-8.44",
system_build_file = clean_dep("//third_party/systemlibs:pcre.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/ftp.exim.org/pub/pcre/pcre-8.44.tar.gz",
"https://ftp.exim.org/pub/pcre/pcre-8.44.tar.gz",
],
)
tf_http_archive(
name = "curl",
build_file = clean_dep("//third_party:curl.BUILD"),
sha256 = "01ae0c123dee45b01bbaef94c0bc00ed2aec89cb2ee0fd598e0d302a6b5e0a98",
strip_prefix = "curl-7.69.1",
system_build_file = clean_dep("//third_party/systemlibs:curl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/curl.haxx.se/download/curl-7.69.1.tar.gz",
"https://curl.haxx.se/download/curl-7.69.1.tar.gz",
],
)
# WARNING: make sure ncteisen@ and vpai@ are cc-ed on any CL to change the below rule
tf_http_archive(
name = "com_github_grpc_grpc",
sha256 = "b956598d8cbe168b5ee717b5dafa56563eb5201a947856a6688bbeac9cac4e1f",
strip_prefix = "grpc-b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd",
system_build_file = clean_dep("//third_party/systemlibs:grpc.BUILD"),
patch_file = clean_dep("//third_party/grpc:generate_cc_env_fix.patch"),
system_link_files = {
"//third_party/systemlibs:BUILD": "bazel/BUILD",
"//third_party/systemlibs:grpc.BUILD": "src/compiler/BUILD",
"//third_party/systemlibs:grpc.bazel.grpc_deps.bzl": "bazel/grpc_deps.bzl",
"//third_party/systemlibs:grpc.bazel.grpc_extra_deps.bzl": "bazel/grpc_extra_deps.bzl",
"//third_party/systemlibs:grpc.bazel.cc_grpc_library.bzl": "bazel/cc_grpc_library.bzl",
"//third_party/systemlibs:grpc.bazel.generate_cc.bzl": "bazel/generate_cc.bzl",
"//third_party/systemlibs:grpc.bazel.protobuf.bzl": "bazel/protobuf.bzl",
},
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
"https://github.com/grpc/grpc/archive/b54a5b338637f92bfcf4b0bc05e0f57a5fd8fadd.tar.gz",
],
)
tf_http_archive(
name = "linenoise",
build_file = clean_dep("//third_party:linenoise.BUILD"),
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
)
# Check out LLVM and MLIR from llvm-project.
LLVM_COMMIT = "2fa4186d4e1c0c5ce05efb4275f94bb7c2538dda"
LLVM_SHA256 = "44d53acbed53c73cffd41281e053f86850ad7032ccf3ea68f238a010767b7260"
LLVM_URLS = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
]
tf_http_archive(
name = "llvm-project",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = LLVM_URLS,
additional_build_files = {
clean_dep("//third_party/llvm:llvm.autogenerated.BUILD"): "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
# Intel openMP that is part of LLVM sources.
tf_http_archive(
name = "llvm_openmp",
build_file = clean_dep("//third_party/llvm_openmp:BUILD"),
sha256 = "d19f728c8e04fb1e94566c8d76aef50ec926cd2f95ef3bf1e0a5de4909b28b44",
strip_prefix = "openmp-10.0.1.src",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz",
"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.1/openmp-10.0.1.src.tar.xz",
],
)
tf_http_archive(
name = "lmdb",
build_file = clean_dep("//third_party:lmdb.BUILD"),
sha256 = "f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28",
strip_prefix = "lmdb-LMDB_0.9.22/libraries/liblmdb",
system_build_file = clean_dep("//third_party/systemlibs:lmdb.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
"https://github.com/LMDB/lmdb/archive/LMDB_0.9.22.tar.gz",
],
)
tf_http_archive(
name = "jsoncpp_git",
build_file = clean_dep("//third_party:jsoncpp.BUILD"),
sha256 = "77a402fb577b2e0e5d0bdc1cf9c65278915cdb25171e3452c68b6da8a561f8f0",
strip_prefix = "jsoncpp-1.9.2",
system_build_file = clean_dep("//third_party/systemlibs:jsoncpp.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/1.9.2.tar.gz",
],
)
tf_http_archive(
name = "boringssl",
sha256 = "a9c3b03657d507975a32732f04563132b4553c20747cec6dc04de475c8bdf29f",
strip_prefix = "boringssl-80ca9f9f6ece29ab132cce4cf807a9465a18cfac",
system_build_file = clean_dep("//third_party/systemlibs:boringssl.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
"https://github.com/google/boringssl/archive/80ca9f9f6ece29ab132cce4cf807a9465a18cfac.tar.gz",
],
)
tf_http_archive(
name = "zlib",
build_file = clean_dep("//third_party:zlib.BUILD"),
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
system_build_file = clean_dep("//third_party/systemlibs:zlib.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/zlib.net/zlib-1.2.11.tar.gz",
"https://zlib.net/zlib-1.2.11.tar.gz",
],
)
tf_http_archive(
name = "fft2d",
build_file = clean_dep("//third_party/fft2d:fft2d.BUILD"),
sha256 = "5f4dabc2ae21e1f537425d58a49cdca1c49ea11db0d6271e2a4b27e9697548eb",
strip_prefix = "OouraFFT-1.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
"https://github.com/petewarden/OouraFFT/archive/v1.0.tar.gz",
],
)
tf_http_archive(
name = "snappy",
build_file = clean_dep("//third_party:snappy.BUILD"),
sha256 = "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f",
strip_prefix = "snappy-1.1.8",
system_build_file = clean_dep("//third_party/systemlibs:snappy.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/snappy/archive/1.1.8.tar.gz",
"https://github.com/google/snappy/archive/1.1.8.tar.gz",
],
)
tf_http_archive(
name = "nccl_archive",
build_file = clean_dep("//third_party:nccl/archive.BUILD"),
patch_file = clean_dep("//third_party/nccl:archive.patch"),
sha256 = "3ae89ddb2956fff081e406a94ff54ae5e52359f5d645ce977c7eba09b3b782e6",
strip_prefix = "nccl-2.8.3-1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/v2.8.3-1.tar.gz",
"https://github.com/nvidia/nccl/archive/v2.8.3-1.tar.gz",
],
)
java_import_external(
name = "junit",
jar_sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://repo1.maven.org/maven2/junit/junit/4.12/junit-4.12.jar",
"https://maven.ibiblio.org/maven2/junit/junit/4.12/junit-4.12.jar",
],
licenses = ["reciprocal"], # Common Public License Version 1.0
testonly_ = True,
deps = ["@org_hamcrest_core"],
)
java_import_external(
name = "org_hamcrest_core",
jar_sha256 = "66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://repo1.maven.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
"https://maven.ibiblio.org/maven2/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
)
java_import_external(
name = "com_google_testing_compile",
jar_sha256 = "edc180fdcd9f740240da1a7a45673f46f59c5578d8cd3fbc912161f74b5aebb8",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
"https://repo1.maven.org/maven2/com/google/testing/compile/compile-testing/0.11/compile-testing-0.11.jar",
],
licenses = ["notice"], # New BSD License
testonly_ = True,
deps = ["@com_google_guava", "@com_google_truth"],
)
java_import_external(
name = "com_google_truth",
jar_sha256 = "032eddc69652b0a1f8d458f999b4a9534965c646b8b5de0eba48ee69407051df",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
"https://repo1.maven.org/maven2/com/google/truth/truth/0.32/truth-0.32.jar",
],
licenses = ["notice"], # Apache 2.0
testonly_ = True,
deps = ["@com_google_guava"],
)
java_import_external(
name = "org_checkerframework_qual",
jar_sha256 = "d261fde25d590f6b69db7721d469ac1b0a19a17ccaaaa751c31f0d8b8260b894",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
"https://repo1.maven.org/maven2/org/checkerframework/checker-qual/2.10.0/checker-qual-2.10.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
java_import_external(
name = "com_squareup_javapoet",
jar_sha256 = "5bb5abdfe4366c15c0da3332c57d484e238bd48260d6f9d6acf2b08fdde1efea",
jar_urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
"https://repo1.maven.org/maven2/com/squareup/javapoet/1.9.0/javapoet-1.9.0.jar",
],
licenses = ["notice"], # Apache 2.0
)
tf_http_archive(
name = "com_google_pprof",
build_file = clean_dep("//third_party:pprof.BUILD"),
sha256 = "e0928ca4aa10ea1e0551e2d7ce4d1d7ea2d84b2abbdef082b0da84268791d0c4",
strip_prefix = "pprof-c0fb62ec88c411cc91194465e54db2632845b650",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
"https://github.com/google/pprof/archive/c0fb62ec88c411cc91194465e54db2632845b650.tar.gz",
],
)
# The CUDA 11 toolkit ships with CUB. We should be able to delete this rule
# once TF drops support for CUDA 10.
tf_http_archive(
name = "cub_archive",
build_file = clean_dep("//third_party:cub.BUILD"),
sha256 = "162514b3cc264ac89d91898b58450190b8192e2af1142cf8ccac2d59aa160dda",
strip_prefix = "cub-1.9.9",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/NVlabs/cub/archive/1.9.9.zip",
"https://github.com/NVlabs/cub/archive/1.9.9.zip",
],
)
tf_http_archive(
name = "cython",
build_file = clean_dep("//third_party:cython.BUILD"),
delete = ["BUILD.bazel"],
sha256 = "e2e38e1f0572ca54d6085df3dec8b607d20e81515fb80215aed19c81e8fe2079",
strip_prefix = "cython-0.29.21",
system_build_file = clean_dep("//third_party/systemlibs:cython.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/cython/cython/archive/0.29.21.tar.gz",
"https://github.com/cython/cython/archive/0.29.21.tar.gz",
],
)
tf_http_archive(
name = "arm_neon_2_x86_sse",
build_file = clean_dep("//third_party:arm_neon_2_x86_sse.BUILD"),
sha256 = "213733991310b904b11b053ac224fee2d4e0179e46b52fe7f8735b8831e04dcc",
strip_prefix = "ARM_NEON_2_x86_SSE-1200fe90bb174a6224a525ee60148671a786a71f",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
"https://github.com/intel/ARM_NEON_2_x86_SSE/archive/1200fe90bb174a6224a525ee60148671a786a71f.tar.gz",
],
)
tf_http_archive(
name = "double_conversion",
build_file = clean_dep("//third_party:double_conversion.BUILD"),
sha256 = "2f7fbffac0d98d201ad0586f686034371a6d152ca67508ab611adc2386ad30de",
strip_prefix = "double-conversion-3992066a95b823efc8ccc1baf82a1cfc73f6e9b8",
system_build_file = clean_dep("//third_party/systemlibs:double_conversion.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
"https://github.com/google/double-conversion/archive/3992066a95b823efc8ccc1baf82a1cfc73f6e9b8.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_float",
build_file = clean_dep("//third_party:tflite_mobilenet_float.BUILD"),
sha256 = "2fadeabb9968ec6833bee903900dda6e61b3947200535874ce2fe42a8493abc0",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_quant",
build_file = clean_dep("//third_party:tflite_mobilenet_quant.BUILD"),
sha256 = "d32432d28673a936b2d6281ab0600c71cf7226dfe4cdcef3012555f691744166",
urls = [
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "767057f2837a46d97882734b03428e8dd640b93236052b312b2f0e45613c1cf0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/mobilenet_ssd_tflite_v1.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "a809cd290b4d6a2e8a9d5dad076e0bd695b8091974e0eed1052b480b2f21b6dc",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_0.75_quant_2018_06_29.zip",
],
)
tf_http_archive(
name = "tflite_mobilenet_ssd_quant_protobuf",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "09280972c5777f1aa775ef67cb4ac5d5ed21970acd8535aeca62450ef14f0d79",
strip_prefix = "ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
"https://storage.googleapis.com/download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_quantized_300x300_coco14_sync_2018_07_18.tar.gz",
],
)
tf_http_archive(
name = "tflite_conv_actions_frozen",
build_file = str(Label("//third_party:tflite_mobilenet.BUILD")),
sha256 = "d947b38cba389b5e2d0bfc3ea6cc49c784e187b41a071387b3742d1acac7691e",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/conv_actions_tflite.zip",
],
)
tf_http_archive(
name = "tflite_ovic_testdata",
build_file = clean_dep("//third_party:tflite_ovic_testdata.BUILD"),
sha256 = "033c941b7829b05ca55a124a26a6a0581b1ececc154a2153cafcfdb54f80dca2",
strip_prefix = "ovic",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
"https://storage.googleapis.com/download.tensorflow.org/data/ovic_2019_04_30.zip",
],
)
tf_http_archive(
name = "rules_cc",
sha256 = "cf3b76a90c86c0554c5b10f4b160f05af71d252026b71362c4674e2fb9936cf9",
strip_prefix = "rules_cc-01d4a48911d5e7591ecb1c06d3b8af47fe872371",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
"https://github.com/bazelbuild/rules_cc/archive/01d4a48911d5e7591ecb1c06d3b8af47fe872371.zip",
],
)
tf_http_archive(
name = "rules_python",
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
],
)
tf_http_archive(
name = "build_bazel_rules_android",
sha256 = "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
strip_prefix = "rules_android-0.1.1",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
"https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip",
],
)
# Apple and Swift rules.
# https://github.com/bazelbuild/rules_apple/releases
tf_http_archive(
name = "build_bazel_rules_apple",
sha256 = "ee9e6073aeb5a65c100cb9c44b0017c937706a4ae03176e14a7e78620a198079",
strip_prefix = "rules_apple-5131f3d46794bf227d296c82f30c2499c9de3c5b",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
"https://github.com/bazelbuild/rules_apple/archive/5131f3d46794bf227d296c82f30c2499c9de3c5b.tar.gz",
],
)
# https://github.com/bazelbuild/rules_swift/releases
tf_http_archive(
name = "build_bazel_rules_swift",
sha256 = "d0833bc6dad817a367936a5f902a0c11318160b5e80a20ece35fb85a5675c886",
strip_prefix = "rules_swift-3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
"https://github.com/bazelbuild/rules_swift/archive/3eeeb53cebda55b349d64c9fc144e18c5f7c0eb8.tar.gz",
],
)
# https://github.com/bazelbuild/apple_support/releases
tf_http_archive(
name = "build_bazel_apple_support",
sha256 = "ad8ae80e93612b8151019367a3d1604d7a51c14480dae1254e10252007e8260c",
strip_prefix = "apple_support-501b4afb27745c4813a88ffa28acd901408014e4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
"https://github.com/bazelbuild/apple_support/archive/501b4afb27745c4813a88ffa28acd901408014e4.tar.gz",
],
)
# https://github.com/bazelbuild/bazel-skylib/releases
tf_http_archive(
name = "bazel_skylib",
sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.3/bazel-skylib-1.0.3.tar.gz",
],
)
# https://github.com/apple/swift-protobuf/releases
tf_http_archive(
name = "com_github_apple_swift_swift_protobuf",
strip_prefix = "swift-protobuf-1.6.0/",
sha256 = "4ccf6e5ea558e8287bf6331f9f6e52b3c321fca5f1d181d03680f415c32a6bba",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/apple/swift-protobuf/archive/1.6.0.zip",
"https://github.com/apple/swift-protobuf/archive/1.6.0.zip",
],
)
# https://github.com/google/xctestrunner/releases
http_file(
name = "xctestrunner",
executable = 1,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
"https://github.com/google/xctestrunner/releases/download/0.2.9/ios_test_runner.par",
],
)
tf_http_archive(
name = "nlohmann_json_lib",
build_file = clean_dep("//third_party:nlohmann_json.BUILD"),
sha256 = "c377963a95989270c943d522bfefe7b889ef5ed0e1e15d535fd6f6f16ed70732",
strip_prefix = "json-3.4.0",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nlohmann/json/archive/v3.4.0.tar.gz",
"https://github.com/nlohmann/json/archive/v3.4.0.tar.gz",
],
)
tf_http_archive(
name = "pybind11",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/pybind/pybind11/archive/v2.6.0.tar.gz",
"https://github.com/pybind/pybind11/archive/v2.6.0.tar.gz",
],
sha256 = "90b705137b69ee3b5fc655eaca66d0dc9862ea1759226f7ccd3098425ae69571",
strip_prefix = "pybind11-2.6.0",
build_file = clean_dep("//third_party:pybind11.BUILD"),
system_build_file = clean_dep("//third_party/systemlibs:pybind11.BUILD"),
)
tf_http_archive(
name = "wrapt",
build_file = clean_dep("//third_party:wrapt.BUILD"),
sha256 = "8a6fb40e8f8b6a66b4ba81a4044c68e6a7b1782f21cfabc06fb765332b4c3e51",
strip_prefix = "wrapt-1.11.1/src/wrapt",
system_build_file = clean_dep("//third_party/systemlibs:wrapt.BUILD"),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
"https://github.com/GrahamDumpleton/wrapt/archive/1.11.1.tar.gz",
],
)
tf_http_archive(
name = "coremltools",
sha256 = "0d594a714e8a5fd5bd740ad112ef59155c0482e25fdc8f8efa5758f90abdcf1e",
strip_prefix = "coremltools-3.3",
build_file = clean_dep("//third_party:coremltools.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/apple/coremltools/archive/3.3.zip",
"https://github.com/apple/coremltools/archive/3.3.zip",
],
)
tf_http_archive(
name = "tf_toolchains",
sha256 = "eb175afa73e5a33d2b5d2aabcfde6c8c3395fd7001eb5ba765a5cd98cce714ba",
strip_prefix = "toolchains-0.0.2",
build_file = clean_dep("//third_party:tf_toolchains.BUILD"),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/toolchains/archive/v0.0.2.tar.gz",
"https://github.com/tensorflow/toolchains/archive/v0.0.2.tar.gz",
],
)
def tf_bind():
"""Bind targets for some external repositories"""
##############################################################################
# BIND DEFINITIONS
#
# Please do not add bind() definitions unless we have no other choice.
# If that ends up being the case, please leave a comment explaining
# why we can't depend on the canonical build target.
# Needed by Protobuf
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
)
native.bind(
name = "grpc_python_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_python_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@com_github_grpc_grpc//:grpc++",
)
native.bind(
name = "grpc_lib_unsecure",
actual = "@com_github_grpc_grpc//:grpc++_unsecure",
)
# Needed by Protobuf
native.bind(
name = "python_headers",
actual = clean_dep("//third_party/python_runtime:headers"),
)
# Needed by Protobuf
native.bind(
name = "six",
actual = "@six_archive//:six",
)
| 48.874055 | 203 | 0.688674 |
4a205abf57199f5ee1e9192e034224e71b3c94da | 2,918 | py | Python | examples/tutorials/configuration.py | daroari/pygmt | e022851d62814a9255ed2bb63ae092b666b832b9 | [
"BSD-3-Clause"
] | 1 | 2021-05-11T03:55:25.000Z | 2021-05-11T03:55:25.000Z | examples/tutorials/configuration.py | daroari/pygmt | e022851d62814a9255ed2bb63ae092b666b832b9 | [
"BSD-3-Clause"
] | 22 | 2021-03-24T07:58:59.000Z | 2022-03-29T12:07:09.000Z | examples/tutorials/configuration.py | daroari/pygmt | e022851d62814a9255ed2bb63ae092b666b832b9 | [
"BSD-3-Clause"
] | 1 | 2021-11-03T07:47:18.000Z | 2021-11-03T07:47:18.000Z | """
Configuring PyGMT defaults
==========================
Default GMT parameters can be set globally or locally using :class:`pygmt.config`.
"""
# sphinx_gallery_thumbnail_number = 3
import pygmt
########################################################################################
# Configuring default GMT parameters
# ----------------------------------
#
# Users can override default parameters either temporarily (locally) or permanently
# (globally) using :meth:`pygmt.config`. The full list of default parameters that can be
# changed can be found at :gmt-docs:`gmt.conf.html`.
#
# We demonstrate the usage of :meth:`pygmt.config` by configuring a map plot.
# Start with a basic figure with the default style
fig = pygmt.Figure()
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
fig.show()
########################################################################################
# Globally overriding defaults
# ----------------------------
#
# The ``MAP_FRAME_TYPE`` parameter specifies the style of map frame to use, of which there
# are 5 options: ``fancy`` (default, seen above), ``fancy+``, ``plain``, ``graph``
# (which does not apply to geographical maps) and ``inside``.
#
# The ``FORMAT_GEO_MAP`` parameter controls the format of geographical tick annotations.
# The default uses degrees and minutes. Here we specify the ticks to be a decimal number
# of degrees.
fig = pygmt.Figure()
# Configuration for the 'current figure'.
pygmt.config(MAP_FRAME_TYPE="plain")
pygmt.config(FORMAT_GEO_MAP="ddd.xx")
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
fig.show()
########################################################################################
# Locally overriding defaults
# ---------------------------
#
# It is also possible to temporarily override the default parameters, which is very
# useful for limiting the scope of changes to a particular plot. :class:`pygmt.config` is
# implemented as a context manager, which handles the setup and teardown of a GMT
# session. Python users are likely familiar with the ``with open(...) as file:`` snippet,
# which returns a ``file`` context manager. In this way, it can be used to override a parameter
# for a single command, or a sequence of commands. An application of :class:`pygmt.config`
# as a context manager is shown below:
fig = pygmt.Figure()
# This will have a fancy+ frame
with pygmt.config(MAP_FRAME_TYPE="fancy+"):
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
# Shift plot origin down by 10cm to plot another map
fig.shift_origin(yshift="-10c")
# This figure retains the default "fancy" frame
fig.basemap(region=[115, 119.5, 4, 7.5], projection="M10c", frame=True)
fig.coast(land="black", water="skyblue")
fig.show()
| 37.410256 | 95 | 0.637423 |
4a205b331b00eabe332c37d813710362792dafef | 1,616 | py | Python | src/lib/python/telemetry/build_info_inject.py | dashwood8691/f5-bigip-image-generator | 5a5b39a266a0762be5b908e6b47323efdf7d32c1 | [
"Apache-2.0"
] | 34 | 2019-08-21T01:28:27.000Z | 2021-10-05T07:21:58.000Z | src/lib/python/telemetry/build_info_inject.py | dashwood8691/f5-bigip-image-generator | 5a5b39a266a0762be5b908e6b47323efdf7d32c1 | [
"Apache-2.0"
] | 34 | 2019-09-13T10:17:31.000Z | 2022-03-09T00:01:00.000Z | src/lib/python/telemetry/build_info_inject.py | dashwood8691/f5-bigip-image-generator | 5a5b39a266a0762be5b908e6b47323efdf7d32c1 | [
"Apache-2.0"
] | 16 | 2019-08-21T20:06:17.000Z | 2022-03-25T11:59:00.000Z | """BuildInfo module for injecting info into bigip"""
# Copyright (C) 2020 F5 Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import json
from telemetry.build_info import BuildInfo
from telemetry.operation_info import OperationInfo
from telemetry.additional_info import AdditionalInfo
class BuildInfoInject(BuildInfo):
"""Class for capturing information about the environment where images are being built"""
def __init__(self):
"""
Gathers information to be injected into the iso
Info includes:
* Operation info
* Platform info
* Product info
* Environment info
* Additional info for debugging
"""
super().__init__()
operation = OperationInfo()
additional = AdditionalInfo()
self.build_info["Operation"] = operation.operation
self.build_info["additional"] = additional.additional
def to_json(self):
"""Output build info as pre-formatted JSON string"""
output = json.dumps(self.build_info, indent=4, sort_keys=True)
return output
| 31.076923 | 92 | 0.697401 |
4a205d36131fa2d502530e039b812fa9fa872c14 | 434 | py | Python | tests/test_helpers.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | 1,880 | 2019-03-21T10:08:25.000Z | 2022-03-31T12:41:55.000Z | tests/test_helpers.py | rachmadaniHaryono/dephell | 0ef500c8f2d5f05244bac191b1b1383f68464cd2 | [
"MIT"
] | 356 | 2019-03-21T19:08:56.000Z | 2021-01-08T17:45:43.000Z | tests/test_helpers.py | rachmadaniHaryono/dephell | 0ef500c8f2d5f05244bac191b1b1383f68464cd2 | [
"MIT"
] | 157 | 2019-04-23T01:13:37.000Z | 2022-03-24T22:41:18.000Z | # app
from .helpers import Fake, make_root
def test_make_deps():
root = make_root(
root=Fake('', 'a', 'b'),
a=(
Fake('1', 'b>5'),
Fake('2', 'b>5'),
Fake('3', 'b>7'),
),
b=(
Fake('4'),
Fake('5'),
Fake('6'),
),
)
names = [dep.name for dep in root.dependencies]
assert 'a' in names
assert 'b' in names
| 18.869565 | 51 | 0.403226 |
4a205dd3c32e4acee84630da7acfe2738862997b | 7,463 | py | Python | pythran/run.py | wizardxz/pythran | 9a1b1c08cf9d3478be3b6313ac8ebca9e5b88e65 | [
"BSD-3-Clause"
] | null | null | null | pythran/run.py | wizardxz/pythran | 9a1b1c08cf9d3478be3b6313ac8ebca9e5b88e65 | [
"BSD-3-Clause"
] | null | null | null | pythran/run.py | wizardxz/pythran | 9a1b1c08cf9d3478be3b6313ac8ebca9e5b88e65 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
""" Script to run Pythran file compilation with specified g++ like flags. """
import argparse
import logging
import os
import sys
import pythran
from distutils.errors import CompileError
logger = logging.getLogger("pythran")
def convert_arg_line_to_args(arg_line):
"""Read argument from file in a prettier way."""
for arg in arg_line.split():
if not arg.strip():
continue
yield arg
def compile_flags(args):
"""
Build a dictionnary with an entry for cppflags, ldflags, and cxxflags.
These options are filled according to the command line defined options
"""
compiler_options = {
'define_macros': args.defines,
'undef_macros': args.undefs,
'include_dirs': args.include_dirs,
'extra_compile_args': args.extra_flags,
'library_dirs': args.libraries_dir,
'extra_link_args': args.extra_flags,
}
for param in ('opts', ):
val = getattr(args, param, None)
if val:
compiler_options[param] = val
return compiler_options
def run():
parser = argparse.ArgumentParser(prog='pythran',
description='pythran: a python to C++ '
'compiler',
epilog="It's a megablast!",
fromfile_prefix_chars="@")
parser.add_argument('input_file', type=str,
help='the pythran module to compile, '
'either a .py or a .cpp file')
parser.add_argument('-o', dest='output_file', type=str,
help='path to generated file')
parser.add_argument('-P', dest='optimize_only', action='store_true',
help='only run the high-level optimizer, '
'do not compile')
parser.add_argument('-E', dest='translate_only', action='store_true',
help='only run the translator, do not compile')
parser.add_argument('-e', dest='raw_translate_only', action='store_true',
help='similar to -E, '
'but does not generate python glue')
parser.add_argument('-v', dest='verbose', action='store_true',
help='be more verbose')
parser.add_argument('-w', dest='warn_off', action='store_true',
help='be less verbose')
parser.add_argument('-V', '--version',
action='version',
version=pythran.version.__version__)
parser.add_argument('-p', dest='opts', metavar='pass',
action='append',
help='any pythran optimization to apply before code '
'generation',
default=list())
parser.add_argument('-I', dest='include_dirs', metavar='include_dir',
action='append',
help='any include dir relevant to the underlying C++ '
'compiler',
default=list())
parser.add_argument('-L', dest='libraries_dir', metavar='ldflags',
action='append',
help='any search dir relevant to the linker',
default=list())
parser.add_argument('-D', dest='defines', metavar='macro_definition',
action='append',
help='any macro definition relevant to '
'the underlying C++ compiler',
default=list())
parser.add_argument('-U', dest='undefs', metavar='macro_definition',
action='append',
help='any macro undef relevant to '
'the underlying C++ compiler',
default=list())
parser.convert_arg_line_to_args = convert_arg_line_to_args
args, extra = parser.parse_known_args(sys.argv[1:])
args.extra_flags = extra
if args.raw_translate_only:
args.translate_only = True
args.undefs.append('ENABLE_PYTHON_MODULE')
if args.verbose and args.warn_off:
logger.critical("Unexpected combination: -w and -v? Daoubennek?")
sys.exit(1)
if args.verbose:
logger.setLevel(logging.INFO)
if args.warn_off:
logger.setLevel(logging.ERROR)
if args.verbose and not args.warn_off:
pythran.config.lint_cfg(pythran.config.cfg)
try:
if not os.path.exists(args.input_file):
raise ValueError("input file `{0}' not found".format(
args.input_file))
module_name, ext = os.path.splitext(os.path.basename(args.input_file))
# FIXME: do we want to support other ext than .cpp?
if ext not in ['.cpp', '.py']:
raise SyntaxError("Unsupported file extension: '{0}'".format(ext))
if ext == '.cpp':
if args.optimize_only:
raise ValueError("Do you really ask for Python-to-Python "
"on this C++ input file: '{0}'?".format(
args.input_file))
if args.translate_only:
raise ValueError("Do you really ask for Python-to-C++ "
"on this C++ input file: '{0}'?".format(
args.input_file))
pythran.compile_cxxfile(module_name,
args.input_file, args.output_file,
**compile_flags(args))
else: # assume we have a .py input file here
pythran.compile_pythranfile(args.input_file,
output_file=args.output_file,
cpponly=args.translate_only,
pyonly=args.optimize_only,
**compile_flags(args))
except IOError as e:
logger.critical("I've got a bad feeling about this...\n"
"E: " + str(e))
sys.exit(1)
except ValueError as e:
logger.critical("Chair to keyboard interface error\n"
"E: " + str(e))
sys.exit(1)
except pythran.types.tog.PythranTypeError as e:
logger.critical("You shall not pass!\n"
"E: " + str(e))
sys.exit(1)
except pythran.syntax.PythranSyntaxError as e:
logger.critical("I am in trouble. Your input file does not seem "
"to match Pythran's constraints...\n"
"E: " + str(e))
sys.exit(1)
except CompileError as e:
logger.critical("Cover me Jack. Jack? Jaaaaack!!!!\n"
"E: " + str(e))
sys.exit(1)
except NotImplementedError as e:
logger.critical("MAYDAY, MAYDAY, MAYDAY; pythran compiler; "
"code area out of control\n"
"E: not implemented feature needed, "
"bash the developers")
raise # Why ? we may instead display the stacktrace and exit?
except EnvironmentError as e:
logger.critical("By Jove! Your environment does not seem "
"to provide all what we need\n"
"E: " + str(e))
sys.exit(1)
if __name__ == '__main__':
run()
| 36.583333 | 78 | 0.527938 |
4a205eb7995244e3484f3f399a6579ae0d12a610 | 9,348 | py | Python | deepa2/parsers.py | debatelab/deepa2 | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | [
"Apache-2.0"
] | null | null | null | deepa2/parsers.py | debatelab/deepa2 | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | [
"Apache-2.0"
] | null | null | null | deepa2/parsers.py | debatelab/deepa2 | 1a9e8c357d7e3924808c703ec9f4a6611a4b5f93 | [
"Apache-2.0"
] | null | null | null | """Parsers and formatters for DA2 data structures"""
import dataclasses
import logging
import re
from typing import Any, List, Dict, Tuple, Optional, Union
import jinja2
# import ttp
from deepa2 import DeepA2Item, QuotedStatement, ArgdownStatement, Formalization
class DeepA2Layouter: # pylint: disable=too-few-public-methods
"""formats DeepA2Items"""
_IGNORED_FIELDS = ["metadata", "distractors"]
_TEMPLATE_STRINGS = {
QuotedStatement: "{{ text }} (ref: ({{ ref_reco }}))",
ArgdownStatement: "{{ text }} (ref: ({{ ref_reco }}))",
Formalization: "{{ form }} (ref: ({{ ref_reco }}))",
}
_LIST_SEPARATOR = " | "
def __init__(self) -> None:
"""initialize DeepA2Parser"""
# compile templates
env = jinja2.Environment()
self._templates = {
k: env.from_string(v) for k, v in self._TEMPLATE_STRINGS.items()
}
def _format_field( # pylint: disable=too-many-return-statements
self, data: Any, field: dataclasses.Field
) -> Optional[str]:
"""formats field"""
if data is None:
return None
if not data:
return " "
if field.type == Union[str, None]:
return data
if field.type == Union[List[str], None]:
return self._format_list(data)
if field.type in [
List[Tuple[str, str]],
Union[List[Tuple[str, str]], None],
]:
return self._format_dict(dict(data))
if field.type in [
Union[List[QuotedStatement], None],
Union[List[ArgdownStatement], None],
Union[List[Formalization], None],
]:
template = self._get_template(data)
if template: # pylint: disable=no-else-return
da2list = [
template.render(**dataclasses.asdict(item))
for item in data
if dataclasses.asdict(item).get("text")
or dataclasses.asdict(item).get("form")
]
return self._format_list(da2list)
else:
logging.warning("DeepA2Layouter no template found.")
logging.warning("DeepA2Layouter couldn't format field %s", field)
return "not-formatted"
def _get_template(self, data: List) -> Optional[jinja2.Template]:
"""fetches template for DeepA2Item field"""
template = self._templates.get(data[0].__class__)
return template
def _format_list(self, da2list: List[str]) -> str:
"""formats a list of strings"""
formatted = " "
if da2list:
if len(da2list) == 1:
formatted = da2list[0]
else:
formatted = self._LIST_SEPARATOR.join(da2list)
return formatted
def _format_dict(self, da2dict: Dict[str, str]) -> str:
"""formats a dict"""
da2list = [f"{k} : {v}" for k, v in da2dict.items()]
return self._format_list(da2list)
def format(self, da2_item: DeepA2Item) -> Dict[str, Optional[str]]:
"""formats DeepA2Item fields as strings"""
da2_formatted = {
field.name: self._format_field(
data=getattr(da2_item, field.name), field=field
)
for field in dataclasses.fields(da2_item)
if field.name not in self._IGNORED_FIELDS
}
return da2_formatted
@dataclasses.dataclass
class ArgumentStatement:
"""dataclass representing a statement in an argument
fields:
text: str - the text of the statement
label: int - the label of the statement
is_conclusion: bool - whether the statement is a conclusion
uses: List[int] - the ids of the statements the statement is inferred from
inference_info: str - information about the inference (not parsed)
schemes: List[str] - the schemes used to infer the statement
variants: List[str] - the variants of the schemes used to infer the statement
"""
text: Optional[str] = None
is_conclusion: bool = False
label: Optional[int] = None
uses: Optional[List[int]] = None
inference_info: Optional[str] = None
schemes: Optional[List[str]] = None
variants: Optional[List[str]] = None
@dataclasses.dataclass
class Argument:
"""dataclass representing an argument"""
statements: List[ArgumentStatement] = dataclasses.field(default_factory=list)
class DeepA2Parser:
"""parses text as DeepA2Items"""
@staticmethod
def parse_argdown(text: str) -> Optional[Argument]:
"""parses argdown text as Argument"""
parser = ArgdownParser()
statements = parser.parse_argdown_block(text)
if not statements:
return None
argument = Argument(statements=statements)
return argument
@staticmethod
def parse_list(text: str):
"""parses list of statements"""
@staticmethod
def parse_formalization(text: str):
"""parses formalizations"""
@staticmethod
def parse_keys(text: str):
"""parses keys of formalization"""
class ArgdownParser:
"""parses text as Argdown"""
INFERENCE_PATTERN_REGEX = (
r" ---- |"
r" -- with (?P<scheme>[^\(\)]*)(?P<variant> \([^-\(\))]*\))?"
r" from (?P<uses>[\(\), 0-9]+) -- |"
r" -- (?P<info>[^-]*) -- "
)
@staticmethod
def preprocess_ad(ad_raw: str) -> str:
"""preprocess argdown text"""
ad_raw = ad_raw.replace("\n", " ")
ad_raw = re.sub(r"\s{2,}", " ", ad_raw)
ad_raw = ad_raw.replace("with?? ", "with ?? ")
return ad_raw
def parse_argdown_block(self, ad_raw: str) -> Optional[List[ArgumentStatement]]:
"""parses argdown block"""
# preprocess
ad_raw = self.preprocess_ad(ad_raw)
regex = self.INFERENCE_PATTERN_REGEX
argument_statements = []
# find all inferences
matches = re.finditer(regex, ad_raw, re.MULTILINE)
inf_args: Dict[str, Any] = {}
pointer = 0
# iterate over inferences
for match in matches:
# parse all propositions before inference matched that have not been parsed before
new_statements = self.parse_proposition_block(
ad_raw[pointer : match.start()], **inf_args
)
if not new_statements:
# if failed to parse proposition block return None
return None
argument_statements.extend(new_statements)
# update pointer and inf_args to be used for parsing next propositions block
pointer = match.end()
schemes = match.group("scheme")
variants = match.group("variant")
inference_info = match.group(0)
inf_args = {
"schemes": re.split("; |, | and ", schemes) if schemes else None,
"variants": re.split("; |, | and ", variants) if variants else None,
"uses": self.parse_uses(match.group("uses")),
"inference_info": inference_info.strip("- ")
if inference_info
else None,
}
# parse remaining propositions
if pointer > 0:
new_statements = self.parse_proposition_block(ad_raw[pointer:], **inf_args)
argument_statements.extend(new_statements)
return argument_statements
@staticmethod
def parse_proposition_block(ad_raw: str, **inf_args) -> List[ArgumentStatement]:
"""parses proposition block"""
statement_list: List[ArgumentStatement] = []
if not ad_raw:
return statement_list
# preprocess
if ad_raw[0] != " ":
ad_raw = " " + ad_raw
# match labels
regex = r" \(([0-9]*)\) "
if not re.match(regex, ad_raw):
return statement_list
matches = re.finditer(regex, ad_raw, re.MULTILINE)
label = -1
pointer = -1
# iterate over matched labels
for match in matches:
# for matched label, we're adding the previous statement
if label > -1:
statement = ArgumentStatement(
text=ad_raw[pointer : match.start()].strip(), label=label
)
statement_list.append(statement)
label = int(match.group(1)) # update label
pointer = match.end() # update pointer
if label > -1:
# add last statement
statement = ArgumentStatement(text=ad_raw[pointer:].strip(), label=label)
statement_list.append(statement)
if statement_list and "uses" in inf_args:
# update first statement with inference details
statement_list[0].is_conclusion = True
for key, value in inf_args.items():
if hasattr(statement_list[0], key):
setattr(statement_list[0], key, value)
return statement_list
@staticmethod
def parse_uses(uses_raw) -> List[int]:
"""parses list of labels used in an inference"""
if not uses_raw:
return []
regex = r"\(([0-9]+)\)"
matches = re.finditer(regex, str(uses_raw), re.MULTILINE)
return [int(match.group(1)) for match in matches]
| 33.992727 | 94 | 0.581943 |
4a2060b6aa98edcf8c508edf509416b029e661f6 | 486 | py | Python | todos_store.py | smithclay/tutorial-python | 841453e9e5cf9245d44bb205ffa22ef2b7d64620 | [
"Python-2.0",
"OLDAP-2.7"
] | 7 | 2018-06-11T05:49:33.000Z | 2022-03-15T15:06:59.000Z | todos_store.py | smithclay/tutorial-python | 841453e9e5cf9245d44bb205ffa22ef2b7d64620 | [
"Python-2.0",
"OLDAP-2.7"
] | 37 | 2018-11-06T14:20:07.000Z | 2022-01-13T07:58:52.000Z | todos_store.py | smithclay/tutorial-python | 841453e9e5cf9245d44bb205ffa22ef2b7d64620 | [
"Python-2.0",
"OLDAP-2.7"
] | 11 | 2018-11-11T16:59:38.000Z | 2022-02-23T12:37:51.000Z | class Store:
# Here will be the instance stored.
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if Store.__instance == None:
Store()
return Store.__instance
def __init__(self):
""" Virtually private constructor. """
if Store.__instance != None:
raise Exception("This class is a singleton!")
else:
Store.__instance = self
self.todos = [] | 25.578947 | 57 | 0.559671 |
4a2060c0011d622d663f03490ff708581810e5c4 | 6,449 | py | Python | alerter/test/data_store/test_starters.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 41 | 2019-08-23T12:40:42.000Z | 2022-03-28T11:06:02.000Z | alerter/test/data_store/test_starters.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 147 | 2019-08-30T22:09:48.000Z | 2022-03-30T08:46:26.000Z | alerter/test/data_store/test_starters.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 3 | 2019-09-03T21:12:28.000Z | 2021-08-18T14:27:56.000Z | import logging
import unittest
from datetime import timedelta
from unittest import mock
from parameterized import parameterized
from src.data_store.starters import (
_initialise_store_logger, _initialise_store, start_system_store,
start_github_store, start_alert_store, start_config_store,
start_chainlink_node_store)
from src.data_store.stores.alert import AlertStore
from src.data_store.stores.config import ConfigStore
from src.data_store.stores.github import GithubStore
from src.data_store.stores.node.chainlink import ChainlinkNodeStore
from src.data_store.stores.system import SystemStore
from src.message_broker.rabbitmq import RabbitMQApi
from src.utils import env
from src.utils.constants.names import (SYSTEM_STORE_NAME, GITHUB_STORE_NAME,
ALERT_STORE_NAME, CONFIG_STORE_NAME,
CL_NODE_STORE_NAME)
class TestAlertersStarters(unittest.TestCase):
def setUp(self) -> None:
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.github_store_name = 'test_github_store'
self.system_store_name = 'test_system_store'
self.alerter_store_name = 'alerter_store_name'
self.cl_node_store_name = 'cl_node_store_name'
self.connection_check_time_interval = timedelta(seconds=0)
self.rabbit_ip = env.RABBIT_IP
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.test_github_store = GithubStore(GITHUB_STORE_NAME,
self.dummy_logger,
self.rabbitmq)
self.test_system_store = SystemStore(SYSTEM_STORE_NAME,
self.dummy_logger,
self.rabbitmq)
self.test_alert_store = AlertStore(ALERT_STORE_NAME,
self.dummy_logger,
self.rabbitmq)
self.test_config_store = ConfigStore(CONFIG_STORE_NAME,
self.dummy_logger,
self.rabbitmq)
self.test_cl_node_store = ChainlinkNodeStore(CL_NODE_STORE_NAME,
self.dummy_logger,
self.rabbitmq)
def tearDown(self) -> None:
self.rabbitmq = None
self.dummy_logger = None
self.test_github_store = None
self.test_system_store = None
self.test_alert_store = None
self.test_config_store = None
self.test_cl_node_store = None
@parameterized.expand([
(GITHUB_STORE_NAME, GithubStore.__name__,),
(SYSTEM_STORE_NAME, SystemStore.__name__,),
(CL_NODE_STORE_NAME, ChainlinkNodeStore.__name__,),
(ALERT_STORE_NAME, AlertStore.__name__,),
(CONFIG_STORE_NAME, ConfigStore.__name__,),
])
@mock.patch("src.data_store.starters.create_logger")
def test_initialise_store_logger_initialises_logger_correctly(
self, store_display_name, store_module_name,
mock_create_logger) -> None:
mock_create_logger.return_value = None
_initialise_store_logger(store_display_name, store_module_name)
mock_create_logger.assert_called_once_with(
env.DATA_STORE_LOG_FILE_TEMPLATE.format(store_display_name),
store_module_name, env.LOGGING_LEVEL, rotating=True)
@parameterized.expand([
(GITHUB_STORE_NAME, GithubStore.__name__, 'mock_github_store',),
(SYSTEM_STORE_NAME, SystemStore.__name__, 'mock_system_store',),
(CL_NODE_STORE_NAME, ChainlinkNodeStore.__name__,
'mock_cl_node_store',),
(ALERT_STORE_NAME, AlertStore.__name__, 'mock_alert_store',),
(CONFIG_STORE_NAME, ConfigStore.__name__, 'mock_config_store',),
])
@mock.patch("src.data_store.starters.RabbitMQApi")
@mock.patch("src.data_store.starters.ChainlinkNodeStore")
@mock.patch("src.data_store.starters.SystemStore")
@mock.patch("src.data_store.starters.ConfigStore")
@mock.patch("src.data_store.starters.AlertStore")
@mock.patch("src.data_store.starters.GithubStore")
@mock.patch("src.data_store.starters._initialise_store_logger")
def test_initialise_store_initialises_store_correctly(
self, store_display_name, store_module_name, enabled_mock_variable,
mock_init_logger, mock_github_store, mock_alert_store,
mock_config_store, mock_system_store, mock_cl_node_store,
mock_rabbit) -> None:
mock_init_logger.return_value = self.dummy_logger
mock_var = eval(enabled_mock_variable)
mock_var.__name__ = store_module_name
mock_rabbit.__name__ = 'RabbitMQApi'
mock_rabbit.return_value = self.rabbitmq
_initialise_store(mock_var, store_display_name)
mock_init_logger.assert_called_once_with(store_display_name,
store_module_name)
mock_var.assert_called_once_with(store_display_name, self.dummy_logger,
self.rabbitmq)
@parameterized.expand([
('self.test_github_store', start_github_store, GithubStore,
GITHUB_STORE_NAME,),
('self.test_system_store', start_system_store, SystemStore,
SYSTEM_STORE_NAME,),
('self.test_cl_node_store', start_chainlink_node_store,
ChainlinkNodeStore, CL_NODE_STORE_NAME,),
('self.test_alert_store', start_alert_store, AlertStore,
ALERT_STORE_NAME,),
('self.test_config_store', start_config_store, ConfigStore,
CONFIG_STORE_NAME,),
])
@mock.patch("src.data_store.starters._initialise_store")
@mock.patch("src.data_store.starters.start_store")
def test_start_store_functions_call_sub_functions_correctly(
self, initialised_store, start_function, store_type,
store_display_name, mock_start_store, mock_init_store) -> None:
mock_init_store.return_value = eval(initialised_store)
start_function()
mock_start_store.assert_called_once_with(eval(initialised_store))
mock_init_store.assert_called_once_with(store_type, store_display_name)
| 47.419118 | 79 | 0.673438 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.