code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def memory_usage(self):
"""
Get the combined memory usage of the field data and field values.
"""
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data['field_values'] = values
return data | Get the combined memory usage of the field data and field values. |
def _query_nsot(url, headers, device=None):
'''
if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return:
'''
url = urlparse.urljoin(url, 'devices')
ret = {}
if not device:
query = salt.utils.http.query(url, header_dict=headers, decode=True)
else:
url = urlparse.urljoin(url, device)
query = salt.utils.http.query(url, header_dict=headers,
decode=True)
error = query.get('error')
if error:
log.error('can\'t get device(s) from nsot! reason: %s', error)
else:
ret = query['dict']
return ret | if a device is given, query nsot for that specific device, otherwise return
all devices
:param url: str
:param headers: dict
:param device: None or str
:return: |
def error_perturbation(C, S):
r"""Error perturbation for given sensitivity matrix.
Parameters
----------
C : (M, M) ndarray
Count matrix
S : (M, M) ndarray or (K, M, M) ndarray
Sensitivity matrix (for scalar observable) or sensitivity
tensor for vector observable
Returns
-------
X : float or (K, K) ndarray
error-perturbation (for scalar observables) or covariance matrix
(for vector-valued observable)
Notes
-----
**Scalar observable**
The sensitivity matrix :math:`S=(s_{ij})` of a scalar observable
:math:`f(T)` is defined as
.. math:: S= \left(\left. \frac{\partial f(T)}{\partial t_{ij}} \right \rvert_{T_0} \right)
evaluated at a suitable transition matrix :math:`T_0`.
The sensitivity is the variance of the observable
.. math:: \mathbb{V}(f)=\sum_{i,j,k,l} s_{ij} \text{cov}[t_{ij}, t_{kl}] s_{kl}
**Vector valued observable**
The sensitivity tensor :math:`S=(s_{ijk})` for a vector
valued observable :math:`(f_1(T),\dots,f_K(T))` is defined as
.. math:: S= \left( \left. \frac{\partial f_i(T)}{\partial t_{jk}} \right\rvert_{T_0} \right)
evaluated at a suitable transition matrix :math:`T_0`.
The sensitivity is the covariance matrix for the observable
.. math:: \text{cov}[f_{\alpha}(T),f_{\beta}(T)] = \sum_{i,j,k,l} s_{\alpha i j}
\text{cov}[t_{ij}, t_{kl}] s_{\beta kl}
"""
if issparse(C):
warnings.warn("Error-perturbation will be dense for sparse input")
C = C.toarray()
return dense.covariance.error_perturbation(C, S) | r"""Error perturbation for given sensitivity matrix.
Parameters
----------
C : (M, M) ndarray
Count matrix
S : (M, M) ndarray or (K, M, M) ndarray
Sensitivity matrix (for scalar observable) or sensitivity
tensor for vector observable
Returns
-------
X : float or (K, K) ndarray
error-perturbation (for scalar observables) or covariance matrix
(for vector-valued observable)
Notes
-----
**Scalar observable**
The sensitivity matrix :math:`S=(s_{ij})` of a scalar observable
:math:`f(T)` is defined as
.. math:: S= \left(\left. \frac{\partial f(T)}{\partial t_{ij}} \right \rvert_{T_0} \right)
evaluated at a suitable transition matrix :math:`T_0`.
The sensitivity is the variance of the observable
.. math:: \mathbb{V}(f)=\sum_{i,j,k,l} s_{ij} \text{cov}[t_{ij}, t_{kl}] s_{kl}
**Vector valued observable**
The sensitivity tensor :math:`S=(s_{ijk})` for a vector
valued observable :math:`(f_1(T),\dots,f_K(T))` is defined as
.. math:: S= \left( \left. \frac{\partial f_i(T)}{\partial t_{jk}} \right\rvert_{T_0} \right)
evaluated at a suitable transition matrix :math:`T_0`.
The sensitivity is the covariance matrix for the observable
.. math:: \text{cov}[f_{\alpha}(T),f_{\beta}(T)] = \sum_{i,j,k,l} s_{\alpha i j}
\text{cov}[t_{ij}, t_{kl}] s_{\beta kl} |
async def disable(self, reason=None):
"""Enters maintenance mode
Parameters:
reason (str): Reason of disabling
Returns:
bool: ``True`` on success
"""
params = {"enable": True, "reason": reason}
response = await self._api.put("/v1/agent/maintenance", params=params)
return response.status == 200 | Enters maintenance mode
Parameters:
reason (str): Reason of disabling
Returns:
bool: ``True`` on success |
def get_cookies_for_class(session, class_name,
cookies_file=None,
username=None,
password=None):
"""
Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed.
"""
if cookies_file:
cookies = find_cookies_for_class(cookies_file, class_name)
session.cookies.update(cookies)
logging.info('Loaded cookies from %s', cookies_file)
else:
cookies = get_cookies_from_cache(username)
session.cookies.update(cookies)
if validate_cookies(session, class_name):
logging.info('Already authenticated.')
else:
get_authentication_cookies(session, class_name, username, password)
write_cookies_to_cache(session.cookies, username) | Get the cookies for the given class.
We do not validate the cookies if they are loaded from a cookies file
because this is intended for debugging purposes or if the coursera
authentication process has changed. |
def register_chooser(self, chooser, **kwargs):
"""Adds a model chooser definition to the registry."""
if not issubclass(chooser, Chooser):
return self.register_simple_chooser(chooser, **kwargs)
self.choosers[chooser.model] = chooser(**kwargs)
return chooser | Adds a model chooser definition to the registry. |
def tz_convert(dt, to_tz, from_tz=None) -> str:
"""
Convert to tz
Args:
dt: date time
to_tz: to tz
from_tz: from tz - will be ignored if tz from dt is given
Returns:
str: date & time
Examples:
>>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')
>>> tz_convert(dt_1, to_tz='NY')
'2018-09-10 04:00:00-04:00'
>>> dt_2 = pd.Timestamp('2018-01-10 16:00')
>>> tz_convert(dt_2, to_tz='HK', from_tz='NY')
'2018-01-11 05:00:00+08:00'
>>> dt_3 = '2018-09-10 15:00'
>>> tz_convert(dt_3, to_tz='NY', from_tz='JP')
'2018-09-10 02:00:00-04:00'
"""
logger = logs.get_logger(tz_convert, level='info')
f_tz, t_tz = get_tz(from_tz), get_tz(to_tz)
from_dt = pd.Timestamp(str(dt), tz=f_tz)
logger.debug(f'converting {str(from_dt)} from {f_tz} to {t_tz} ...')
return str(pd.Timestamp(str(from_dt), tz=t_tz)) | Convert to tz
Args:
dt: date time
to_tz: to tz
from_tz: from tz - will be ignored if tz from dt is given
Returns:
str: date & time
Examples:
>>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong')
>>> tz_convert(dt_1, to_tz='NY')
'2018-09-10 04:00:00-04:00'
>>> dt_2 = pd.Timestamp('2018-01-10 16:00')
>>> tz_convert(dt_2, to_tz='HK', from_tz='NY')
'2018-01-11 05:00:00+08:00'
>>> dt_3 = '2018-09-10 15:00'
>>> tz_convert(dt_3, to_tz='NY', from_tz='JP')
'2018-09-10 02:00:00-04:00' |
def _set_size_code(self):
"""Set the code for a size operation.
"""
if not self._op.startswith(self.SIZE):
self._size_code = None
return
if len(self._op) == len(self.SIZE):
self._size_code = self.SZ_EQ
else:
suffix = self._op[len(self.SIZE):]
self._size_code = self.SZ_MAPPING.get(suffix, None)
if self._size_code is None:
raise ValueError('invalid "{}" suffix "{}"'.format(self.SIZE, suffix)) | Set the code for a size operation. |
def get(self, path):
""" Get a transform from the cache that maps along *path*, which must
be a list of Transforms to apply in reverse order (last transform is
applied first).
Accessed items have their age reset to 0.
"""
key = tuple(map(id, path))
item = self._cache.get(key, None)
if item is None:
logger.debug("Transform cache miss: %s", key)
item = [0, self._create(path)]
self._cache[key] = item
item[0] = 0 # reset age for this item
# make sure the chain is up to date
#tr = item[1]
#for i, node in enumerate(path[1:]):
# if tr.transforms[i] is not node.transform:
# tr[i] = node.transform
return item[1] | Get a transform from the cache that maps along *path*, which must
be a list of Transforms to apply in reverse order (last transform is
applied first).
Accessed items have their age reset to 0. |
def doMove(self, orgresource, dstresource, dummy = 56184, stresource = 'F', bShareFireCopy = 'false'):
"""DoMove
Args:
dummy: ???
orgresource: Path for a file which you want to move
dstresource: Destination path
bShareFireCopy: ???
Returns:
True: Move success
False: Move failed
"""
url = nurls['doMove']
data = {'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
'orgresource': orgresource,
'dstresource': dstresource,
'overwrite': overwrite,
'bShareFireCopy': bShareFireCopy,
}
r = self.session.post(url = url, data = data)
try:
j = json.loads(r.text)
except:
print '[*] Success checkUpload: 0 result'
return False
return self.resultManager(r.text) | DoMove
Args:
dummy: ???
orgresource: Path for a file which you want to move
dstresource: Destination path
bShareFireCopy: ???
Returns:
True: Move success
False: Move failed |
def home_wins(self):
"""
Returns an ``int`` of the number of games the home team won after the
conclusion of the game.
"""
try:
wins, losses = re.findall(r'\d+', self._home_record)
return wins
except ValueError:
return 0 | Returns an ``int`` of the number of games the home team won after the
conclusion of the game. |
def from_string(cls, s, name=None, modules=None, active=None):
"""
Instantiate a REPP from a string.
Args:
name (str, optional): the name of the REPP module
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations
"""
r = cls(name=name, modules=modules, active=active)
_parse_repp(s.splitlines(), r, None)
return r | Instantiate a REPP from a string.
Args:
name (str, optional): the name of the REPP module
modules (dict, optional): a mapping from identifiers to
REPP modules
active (iterable, optional): an iterable of default module
activations |
def magic_write(ofile, Recs, file_type):
"""
Parameters
_________
ofile : path to output file
Recs : list of dictionaries in MagIC format
file_type : MagIC table type (e.g., specimens)
Return :
[True,False] : True if successful
ofile : same as input
Effects :
writes a MagIC formatted file from Recs
"""
if len(Recs) < 1:
print('No records to write to file {}'.format(ofile))
return False, ""
if os.path.split(ofile)[0] != "" and not os.path.isdir(os.path.split(ofile)[0]):
os.mkdir(os.path.split(ofile)[0])
pmag_out = open(ofile, 'w+', errors="backslashreplace")
outstring = "tab \t" + file_type
outstring = outstring.strip("\n").strip(
"\r") + "\n" # make sure it's clean for Windows
pmag_out.write(outstring)
keystring = ""
keylist = []
for key in list(Recs[0].keys()):
keylist.append(key)
keylist.sort()
for key in keylist:
keystring = keystring + '\t' + key.strip()
keystring = keystring + '\n'
pmag_out.write(keystring[1:])
for Rec in Recs:
outstring = ""
for key in keylist:
try:
outstring = outstring + '\t' + str(Rec[key]).strip()
except KeyError:
if 'er_specimen_name' in list(Rec.keys()):
print(Rec['er_specimen_name'])
elif 'specimen' in list(Rec.keys()):
print(Rec['specimen'])
elif 'er_specimen_names' in list(Rec.keys()):
print('specimen names:', Rec['er_specimen_names'])
print("No data for %s" % key)
# just skip it:
outstring = outstring + "\t"
# raw_input()
outstring = outstring + '\n'
pmag_out.write(outstring[1:])
pmag_out.close()
print(len(Recs), ' records written to file ', ofile)
return True, ofile | Parameters
_________
ofile : path to output file
Recs : list of dictionaries in MagIC format
file_type : MagIC table type (e.g., specimens)
Return :
[True,False] : True if successful
ofile : same as input
Effects :
writes a MagIC formatted file from Recs |
def occupied_by_sort(self, address):
"""
Check if an address belongs to any segment, and if yes, returns the sort of the segment
:param int address: The address to check
:return: Sort of the segment that occupies this address
:rtype: str
"""
idx = self._search(address)
if len(self._list) <= idx:
return None
if self._list[idx].start <= address < self._list[idx].end:
return self._list[idx].sort
if idx > 0 and address < self._list[idx - 1].end:
# TODO: It seems that this branch is never reached. Should it be removed?
return self._list[idx - 1].sort
return None | Check if an address belongs to any segment, and if yes, returns the sort of the segment
:param int address: The address to check
:return: Sort of the segment that occupies this address
:rtype: str |
def simBirth(self,which_agents):
'''
Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and persistent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None
'''
# Get and store states for newly born agents
N = np.sum(which_agents) # Number of new consumers to make
aNrmNow_new = drawLognormal(N,mu=self.aNrmInitMean,sigma=self.aNrmInitStd,seed=self.RNG.randint(0,2**31-1))
self.pLvlNow[which_agents] = drawLognormal(N,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=self.RNG.randint(0,2**31-1))
self.aLvlNow[which_agents] = aNrmNow_new*self.pLvlNow[which_agents]
self.t_age[which_agents] = 0 # How many periods since each agent was born
self.t_cycle[which_agents] = 0 | Makes new consumers for the given indices. Initialized variables include aNrm and pLvl, as
well as time variables t_age and t_cycle. Normalized assets and persistent income levels
are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None |
def loads(cls, s: str) -> 'Money':
"""Parse from a string representation (repr)"""
try:
currency, amount = s.strip().split()
return cls(amount, currency)
except ValueError as err:
raise ValueError("failed to parse string "
" '{}': {}".format(s, err)) | Parse from a string representation (repr) |
def main(arguments=None):
"""
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="ERROR",
options_first=True
)
arguments, settings, log, dbConn = su.setup()
# tab completion for raw_input
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
readline.set_completer(tab_complete)
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
# set options interactively if user requests
if "interactiveFlag" in locals() and interactiveFlag:
# load previous settings
moduleDirectory = os.path.dirname(__file__) + "/resources"
pathToPickleFile = "%(moduleDirectory)s/previousSettings.p" % locals()
try:
with open(pathToPickleFile):
pass
previousSettingsExist = True
except:
previousSettingsExist = False
previousSettings = {}
if previousSettingsExist:
previousSettings = pickle.load(open(pathToPickleFile, "rb"))
# x-raw-input
# x-boolean-raw-input
# x-raw-input-with-default-value-from-previous-settings
# save the most recently used requests
pickleMeObjects = []
pickleMe = {}
theseLocals = locals()
for k in pickleMeObjects:
pickleMe[k] = theseLocals[k]
pickle.dump(pickleMe, open(pathToPickleFile, "wb"))
# call the worker function
# x-if-settings-or-database-credientials
if cone and filelist:
import codecs
pathToReadFile = pathToCoordinateList
readFile = codecs.open(pathToReadFile, encoding='utf-8', mode='r')
listOfCoordinates = []
for line in readFile.readlines():
line = line.strip()
[ra, dec] = line.split()
listOfCoordinates.append(str(ra) + " " + str(dec))
search = conesearch(
log=log,
radiusArcsec=radiusArcsec,
nearestOnly=nearestFlag,
unclassified=unclassifiedFlag,
listOfCoordinates=listOfCoordinates,
outputFilePath=outPutFile,
verbose=verboseFlag,
redshift=redshiftFlag)
elif cone:
search = conesearch(
log=log,
ra=ra,
dec=dec,
radiusArcsec=radiusArcsec,
nearestOnly=nearestFlag,
unclassified=unclassifiedFlag,
outputFilePath=outPutFile,
verbose=verboseFlag,
redshift=redshiftFlag
)
elif obj:
search = namesearch(
log=log,
names=objectName,
verbose=verboseFlag,
outputFilePath=outPutFile
)
search.get()
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return | *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* |
def create_response_object(self, service_id, version_number, name, status="200", response="OK", content="", request_condition=None, cache_condition=None):
"""Creates a new Response Object."""
body = self._formdata({
"name": name,
"status": status,
"response": response,
"content": content,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number), method="POST", body=body)
return FastlyResponseObject(self, content) | Creates a new Response Object. |
def guessFormat(self):
'''return quality score format -
might return several if ambiguous.'''
c = [ord(x) for x in self.quals]
mi, ma = min(c), max(c)
r = []
for entry_format, v in iteritems(RANGES):
m1, m2 = v
if mi >= m1 and ma < m2:
r.append(entry_format)
return r | return quality score format -
might return several if ambiguous. |
def export_sleep_stats(self, filename, lights_off, lights_on):
"""Create CSV with sleep statistics.
Parameters
----------
filename: str
Filename for csv export
lights_off: float
Initial time when sleeper turns off the light (or their phone) to
go to sleep, in seconds from recording start
lights_on: float
Final time when sleeper rises from bed after sleep, in seconds from
recording start
Returns
-------
float or None
If there are no epochs scored as sleep, returns None. Otherwise,
returns the sleep onset latency, for testing purposes.
Note
----
Total dark time and sleep efficiency does NOT subtract epochs marked as
Undefined or Unknown.
"""
epochs = self.get_epochs()
ep_starts = [i['start'] for i in epochs]
hypno = [i['stage'] for i in epochs]
n_ep_per_min = 60 / self.epoch_length
first = {}
latency = {}
for stage in ['NREM1', 'NREM2', 'NREM3', 'REM']:
first[stage] = next(((i, j) for i, j in enumerate(epochs) if \
j['stage'] == stage), None)
if first[stage] is not None:
latency[stage] = (first[stage][1]['start'] -
lights_off) / 60
else:
first[stage] = nan
latency[stage] = nan
idx_loff = asarray([abs(x - lights_off) for x in ep_starts]).argmin()
idx_lon = asarray([abs(x - lights_on) for x in ep_starts]).argmin()
duration = {}
for stage in ['NREM1', 'NREM2', 'NREM3', 'REM', 'Wake', 'Movement',
'Artefact']:
duration[stage] = hypno[idx_loff:idx_lon].count(
stage) / n_ep_per_min
slp_onset = sorted(first.values(), key=lambda x: x[1]['start'])[0]
wake_up = next((len(epochs) - i, j) for i, j in enumerate(
epochs[::-1]) if j['stage'] in ['NREM1', 'NREM2', 'NREM3',
'REM'])
total_dark_time = (lights_on - lights_off) / 60
#slp_period_time = (wake_up[1]['start'] - slp_onset[1]['start']) / 60
slp_onset_lat = (slp_onset[1]['start'] - lights_off) / 60
waso = hypno[slp_onset[0]:wake_up[0]].count('Wake') / n_ep_per_min
wake = waso + slp_onset_lat
total_slp_period = sum((waso, duration['NREM1'], duration['NREM2'],
duration['NREM3'], duration['REM']))
total_slp_time = total_slp_period - waso
slp_eff = total_slp_time / total_dark_time
switch = self.switch()
slp_frag = self.slp_frag()
dt_format = '%d/%m/%Y %H:%M:%S'
loff_str = (self.start_time + timedelta(seconds=lights_off)).strftime(
dt_format)
lon_str = (self.start_time + timedelta(seconds=lights_on)).strftime(
dt_format)
slp_onset_str = (self.start_time + timedelta(
seconds=slp_onset[1]['start'])).strftime(dt_format)
wake_up_str = (self.start_time + timedelta(
seconds=wake_up[1]['start'])).strftime(dt_format)
slcnrem5 = self.latency_to_consolidated(lights_off, duration=5,
stage=['NREM2', 'NREM3'])
slcnrem10 = self.latency_to_consolidated(lights_off, duration=10,
stage=['NREM2', 'NREM3'])
slcn35 = self.latency_to_consolidated(lights_off, duration=5,
stage=['NREM3'])
slcn310 = self.latency_to_consolidated(lights_off, duration=10,
stage=['NREM3'])
cycles = self.get_cycles() if self.get_cycles() else []
cyc_stats = []
for i, cyc in enumerate(cycles):
one_cyc = {}
cyc_hypno = [x['stage'] for x in self.get_epochs(time=cyc)]
one_cyc['duration'] = {}
for stage in ['NREM1', 'NREM2', 'NREM3', 'REM', 'Wake', 'Movement',
'Artefact']:
one_cyc['duration'][stage] = cyc_hypno.count(stage) # in epochs
one_cyc['tst'] = sum([one_cyc['duration'][stage] for stage in [
'NREM1', 'NREM2', 'NREM3', 'REM']])
one_cyc['tsp'] = one_cyc['tst'] + one_cyc['duration']['Wake']
one_cyc['slp_eff'] = one_cyc['tst'] / one_cyc['tsp']
one_cyc['switch'] = self.switch(time=cyc)
one_cyc['slp_frag'] = self.slp_frag(time=cyc)
cyc_stats.append(one_cyc)
with open(filename, 'w', newline='') as f:
lg.info('Writing to ' + str(filename))
cf = writer(f)
cf.writerow(['Wonambi v{}'.format(__version__)])
cf.writerow(['Variable', 'Acronym',
'Unit 1', 'Value 1',
'Unit 2', 'Value 2',
'Formula'])
cf.writerow(['Lights off', 'LOFF',
'dd/mm/yyyy HH:MM:SS', loff_str,
'seconds from recording start', lights_off,
'marker'])
cf.writerow(['Lights on', 'LON',
'dd/mm/yyyy HH:MM:SS', lon_str,
'seconds from recording start', lights_on,
'marker'])
cf.writerow(['Sleep onset', 'SO',
'dd/mm/yyyy HH:MM:SS', slp_onset_str,
'seconds from recording start', slp_onset[1]['start'],
'first sleep epoch (N1 or N2) - LOFF'])
cf.writerow(['Time of last awakening', '',
'dd/mm/yyyy HH:MM:SS', wake_up_str,
'seconds from recording start', wake_up[1]['start'],
'end time of last epoch of N1, N2, N3 or REM'])
cf.writerow(['Total dark time (Time in bed)', 'TDT (TIB)',
'Epochs', total_dark_time * n_ep_per_min,
'Minutes', total_dark_time,
'LON - LOFF'])
cf.writerow(['Sleep latency', 'SL',
'Epochs', slp_onset_lat * n_ep_per_min,
'Minutes', slp_onset_lat,
'LON - SO'])
cf.writerow(['Wake', 'W',
'Epochs', wake * n_ep_per_min,
'Minutes', wake,
'total wake duration between LOFF and LON'])
cf.writerow(['Wake after sleep onset', 'WASO',
'Epochs', waso * n_ep_per_min,
'Minutes', waso,
'W - SL'])
cf.writerow(['N1 duration', '',
'Epochs', duration['NREM1'] * n_ep_per_min,
'Minutes', duration['NREM1'],
'total N1 duration between LOFF and LON'])
cf.writerow(['N2 duration', '',
'Epochs', duration['NREM2'] * n_ep_per_min,
'Minutes', duration['NREM2'],
'total N2 duration between LOFF and LON'])
cf.writerow(['N3 duration', '',
'Epochs', duration['NREM3'] * n_ep_per_min,
'Minutes', duration['NREM3'],
'total N3 duration between LOFF and LON'])
cf.writerow(['REM duration', '',
'Epochs', duration['REM'] * n_ep_per_min,
'Minutes', duration['REM'],
'total REM duration between LOFF and LON'])
cf.writerow(['Artefact duration', '',
'Epochs',
duration['Artefact'] * n_ep_per_min,
'Minutes', duration['Artefact'],
'total Artefact duration between LOFF and LON'])
cf.writerow(['Movement duration', '',
'Epochs',
duration['Movement'] * n_ep_per_min,
'Minutes', duration['Movement'],
'total Movement duration between LOFF and LON'])
cf.writerow(['Total sleep period', 'TSP',
'Epochs', total_slp_period * n_ep_per_min,
'Minutes', total_slp_period,
'WASO + N1 + N2 + N3 + REM'])
cf.writerow(['Total sleep time', 'TST',
'Epochs', total_slp_time * n_ep_per_min,
'Minutes', total_slp_time,
'N1 + N2 + N3 + REM'])
cf.writerow(['Sleep efficiency', 'SE',
'%', slp_eff * 100,
'', '',
'TST / TDT'])
cf.writerow(['W % TSP', '',
'%', waso * 100 / total_slp_period,
'', '',
'WASO / TSP'])
cf.writerow(['N1 % TSP', '',
'%', duration['NREM1'] * 100 / total_slp_period,
'', '',
'N1 / TSP'])
cf.writerow(['N2 % TSP', '',
'%', duration['NREM2'] * 100 / total_slp_period,
'', '',
'N2 / TSP'])
cf.writerow(['N3 % TSP', '',
'%', duration['NREM3'] * 100 / total_slp_period,
'', '',
'N3 / TSP'])
cf.writerow(['REM % TSP', '',
'%', duration['REM'] * 100 / total_slp_period,
'', '',
'REM / TSP'])
cf.writerow(['N1 % TST', '',
'%', duration['NREM1'] * 100 / total_slp_time,
'', '',
'N1 / TST'])
cf.writerow(['N2 % TST', '',
'%', duration['NREM2'] * 100 / total_slp_time,
'', '',
'N2 / TST'])
cf.writerow(['N3 % TST', '',
'%', duration['NREM3'] * 100 / total_slp_time,
'', '',
'N3 / TST'])
cf.writerow(['REM % TST', '',
'%', duration['REM'] * 100 / total_slp_time,
'', '',
'REM / TST'])
cf.writerow(['Switch', '',
'N', switch,
'', '',
'number of stage shifts'])
cf.writerow(['Switch %', '',
'% epochs',
switch * 100 / total_slp_period / n_ep_per_min,
'% minutes', switch * 100 / total_slp_period,
'switch / TSP'])
cf.writerow(['Sleep fragmentation', '',
'N', slp_frag,
'', '',
('number of shifts to a lighter stage '
'(W > N1 > N2 > N3; W > N1 > REM)')])
cf.writerow(['Sleep fragmentation index', 'SFI',
'% epochs',
slp_frag * 100 / total_slp_time / n_ep_per_min,
'% minutes', slp_frag * 100 / total_slp_time,
'sleep fragmentation / TST'])
cf.writerow(['Sleep latency to N1', 'SLN1',
'Epochs', latency['NREM1'] * n_ep_per_min,
'Minutes', latency['NREM1'],
'first N1 epoch - LOFF'])
cf.writerow(['Sleep latency to N2', 'SLN2',
'Epochs', latency['NREM2'] * n_ep_per_min,
'Minutes', latency['NREM2'],
'first N2 epoch - LOFF'])
cf.writerow(['Sleep latency to N3', 'SLN3',
'Epochs', latency['NREM3'] * n_ep_per_min,
'Minutes', latency['NREM3'],
'first N3 epoch - LOFF'])
cf.writerow(['Sleep latency to REM', 'SLREM',
'Epochs', latency['REM'] * n_ep_per_min,
'Minutes', latency['REM'],
'first REM epoch - LOFF'])
cf.writerow(['Sleep latency to consolidated NREM, 5 min',
'SLCNREM5',
'Epochs', slcnrem5 * n_ep_per_min,
'Minutes', slcnrem5,
('start of first uninterrupted 5-minute period of '
'N2 and/or N3 - LOFF')])
cf.writerow(['Sleep latency to consolidated NREM, 10 min',
'SLCNREM10',
'Epochs', slcnrem10 * n_ep_per_min,
'Minutes', slcnrem10,
('start of first uninterrupted 10-minute period of '
'N2 and/or N3 - LOFF')])
cf.writerow(['Sleep latency to consolidated N3, 5 min', 'SLCN35',
'Epochs', slcn35 * n_ep_per_min,
'Minutes', slcn35,
('start of first uninterrupted 5-minute period of '
'N3 - LOFF')])
cf.writerow(['Sleep latency to consolidated N3, 10 min', 'SLCN310',
'Epochs', slcn310 * n_ep_per_min,
'Minutes', slcn310,
('start of first uninterrupted 10-minute period of '
'N3 - LOFF')])
for i in range(len(cycles)):
one_cyc = cyc_stats[i]
cf.writerow([''])
cf.writerow([f'Cycle {i + 1}'])
cf.writerow(['Cycle % duration', '',
'%', (one_cyc['tsp'] * 100 /
total_slp_period / n_ep_per_min),
'', '',
'cycle TSP / night TSP'])
for stage in ['Wake', 'NREM1', 'NREM2', 'NREM3', 'REM',
'Artefact', 'Movement']:
cf.writerow([f'{stage} (c{i + 1})', '',
'Epochs', one_cyc['duration'][stage],
'Minutes',
one_cyc['duration'][stage] / n_ep_per_min,
f'total {stage} duration in cycle {i + 1}'])
cf.writerow([f'Total sleep period (c{i + 1})',
f'TSP (c{i + 1})',
'Epochs', one_cyc['tsp'],
'Minutes', one_cyc['tsp'] / n_ep_per_min,
f'Wake + N1 + N2 + N3 + REM in cycle {i + 1}'])
cf.writerow([f'Total sleep time (c{i + 1})', f'TST (c{i + 1})',
'Epochs', one_cyc['tst'],
'Minutes', one_cyc['tst'] / n_ep_per_min,
f'N1 + N2 + N3 + REM in cycle {i + 1}'])
cf.writerow([f'Sleep efficiency (c{i + 1})', f'SE (c{i + 1})',
'%', one_cyc['slp_eff'] * 100,
'', '',
f'TST / TSP in cycle {i + 1}'])
for denom in ['TSP', 'TST']:
for stage in ['Wake', 'NREM1', 'NREM2', 'NREM3', 'REM']:
cf.writerow([f'{stage} % {denom} (c{i + 1})', '',
'%', (one_cyc['duration'][stage] /
one_cyc[denom.lower()]) * 100,
'', '',
f'{stage} / {denom} in cycle {i + 1}'])
cf.writerow([f'Switch (c{i + 1})', '',
'N', one_cyc['switch'], '', '',
f'number of stage shifts in cycle {i + 1}'])
cf.writerow([f'Switch % (c{i + 1})', '',
'% epochs', (one_cyc['switch'] * 100 /
one_cyc['tsp']),
'% minutes', (one_cyc['switch'] * 100 *
n_ep_per_min / one_cyc['tsp']),
f'switch / TSP in cycle {i + 1}'])
cf.writerow([f'Sleep fragmentation (c{i + 1})', '',
'N', one_cyc['slp_frag'], '', '',
'number of shifts to a lighter stage in cycle '
f'{i + 1}'])
cf.writerow([f'Sleep fragmentation index (c{i + 1})',
f'SFI (c{i + 1})',
'% epochs', (one_cyc['slp_frag'] * 100 /
one_cyc['tsp']),
'% minutes', (one_cyc['slp_frag'] * 100 *
n_ep_per_min / one_cyc['tsp']),
f'sleep fragmentation / TSP in cycle {i + 1}'])
return slp_onset_lat, waso, total_slp_time | Create CSV with sleep statistics.
Parameters
----------
filename: str
Filename for csv export
lights_off: float
Initial time when sleeper turns off the light (or their phone) to
go to sleep, in seconds from recording start
lights_on: float
Final time when sleeper rises from bed after sleep, in seconds from
recording start
Returns
-------
float or None
If there are no epochs scored as sleep, returns None. Otherwise,
returns the sleep onset latency, for testing purposes.
Note
----
Total dark time and sleep efficiency does NOT subtract epochs marked as
Undefined or Unknown. |
def __clear_break(self, pid, address):
"""
Used by L{dont_break_at} and L{dont_stalk_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved.
"""
if type(address) not in (int, long):
unknown = True
label = address
try:
deferred = self.__deferredBP[pid]
del deferred[label]
unknown = False
except KeyError:
## traceback.print_last() # XXX DEBUG
pass
aProcess = self.system.get_process(pid)
try:
address = aProcess.resolve_label(label)
if not address:
raise Exception()
except Exception:
## traceback.print_last() # XXX DEBUG
if unknown:
msg = ("Can't clear unknown code breakpoint"
" at %s in process ID %d")
msg = msg % (label, pid)
warnings.warn(msg, BreakpointWarning)
return
if self.has_code_breakpoint(pid, address):
self.erase_code_breakpoint(pid, address) | Used by L{dont_break_at} and L{dont_stalk_at}.
@type pid: int
@param pid: Process global ID.
@type address: int or str
@param address:
Memory address of code instruction to break at. It can be an
integer value for the actual address or a string with a label
to be resolved. |
async def play(self, author, text_channel, query, index=None, stop_current=False, shuffle=False):
"""
The play command
Args:
author (discord.Member): The member that called the command
text_channel (discord.Channel): The channel where the command was called
query (str): The argument that was passed with the command
index (str): Whether to play next or at the end of the queue
stop_current (bool): Whether to stop the currently playing song
shuffle (bool): Whether to shuffle the queue after starting
"""
if self.state == 'off':
self.state = 'starting'
self.prev_queue = []
await self.set_topic("")
# Init the music player
await self.msetup(text_channel)
# Queue the song
await self.enqueue(query, index, stop_current, shuffle)
# Connect to voice
await self.vsetup(author)
# Mark as 'ready' if everything is ok
self.state = 'ready' if self.mready and self.vready else 'off'
else:
# Queue the song
await self.enqueue(query, index, stop_current, shuffle)
if self.state == 'ready':
if self.streamer is None:
await self.vplay() | The play command
Args:
author (discord.Member): The member that called the command
text_channel (discord.Channel): The channel where the command was called
query (str): The argument that was passed with the command
index (str): Whether to play next or at the end of the queue
stop_current (bool): Whether to stop the currently playing song
shuffle (bool): Whether to shuffle the queue after starting |
def vote_cast(vote: Vote, choice_index: int, inputs: dict,
change_address: str) -> bytes:
'''vote cast transaction'''
network_params = net_query(vote.deck.network)
vote_cast_addr = vote.vote_choice_address[choice_index]
tx_fee = network_params.min_tx_fee # settle for min tx fee for now
for utxo in inputs['utxos']:
utxo['txid'] = unhexlify(utxo['txid'])
utxo['scriptSig'] = unhexlify(utxo['scriptSig'])
outputs = [
{"redeem": 0.01, "outputScript": transactions.monosig_script(vote_cast_addr)},
{"redeem": float(inputs['total']) - float(tx_fee) - float(0.01),
"outputScript": transactions.monosig_script(change_address)
}]
return transactions.make_raw_transaction(inputs['utxos'], outputs) | vote cast transaction |
def _generate_style(self):
"""
Create new Style instance.
(We don't want to do this on every key press, because each time the
renderer receives a new style class, he will redraw everything.)
"""
return generate_style(self.code_styles[self._current_code_style_name],
self.ui_styles[self._current_ui_style_name]) | Create new Style instance.
(We don't want to do this on every key press, because each time the
renderer receives a new style class, he will redraw everything.) |
def StreamingCommand(cls, usb, service, command='', timeout_ms=None):
"""One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Yields:
The responses from the service.
"""
if not isinstance(command, bytes):
command = command.encode('utf8')
connection = cls.Open(
usb, destination=b'%s:%s' % (service, command),
timeout_ms=timeout_ms)
for data in connection.ReadUntilClose():
yield data.decode('utf8') | One complete set of USB packets for a single command.
Sends service:command in a new connection, reading the data for the
response. All the data is held in memory, large responses will be slow and
can fill up memory.
Args:
usb: USB device handle with BulkRead and BulkWrite methods.
service: The service on the device to talk to.
command: The command to send to the service.
timeout_ms: Timeout for USB packets, in milliseconds.
Raises:
InterleavedDataError: Multiple streams running over usb.
InvalidCommandError: Got an unexpected response command.
Yields:
The responses from the service. |
def get_published_events(self, process=True) -> List[Event]:
"""Get a list of published (pending) events.
Return a list of Event objects which have been published
and are therefore pending to be processed. If the process argument
is set to true, any events returned from this method will also be
marked as processed by moving them to the processed events queue.
This method is intended to be used either to print the list of
pending published events, or also to recover from events
missed by the get() method. The latter of these use cases may be needed
for recovering when a subscriber drops out.
Args:
process (bool): If true, also move the events to the Processed
event queue.
Return:
list[Events], list of Event objects
"""
LOG.debug('Getting published events (%s)', self._pub_key)
if process:
LOG.debug('Marking returned published events as processed.')
DB.watch(self._pub_key, pipeline=True)
event_ids = DB.get_list(self._pub_key, pipeline=True)
if event_ids:
DB.delete(self._pub_key, pipeline=True)
DB.append_to_list(self._processed_key, *event_ids,
pipeline=True)
DB.execute()
else:
event_ids = DB.get_list(self._pub_key)
events = []
for event_id in event_ids[::-1]:
event_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_str)
event_dict['id'] = event_id
event = Event.from_config(event_dict)
LOG.debug('Loaded event: %s (%s)', event.id, event.type)
events.append(event)
return events | Get a list of published (pending) events.
Return a list of Event objects which have been published
and are therefore pending to be processed. If the process argument
is set to true, any events returned from this method will also be
marked as processed by moving them to the processed events queue.
This method is intended to be used either to print the list of
pending published events, or also to recover from events
missed by the get() method. The latter of these use cases may be needed
for recovering when a subscriber drops out.
Args:
process (bool): If true, also move the events to the Processed
event queue.
Return:
list[Events], list of Event objects |
def move_dir(
src_fs, # type: Union[Text, FS]
src_path, # type: Text
dst_fs, # type: Union[Text, FS]
dst_path, # type: Text
workers=0, # type: int
):
# type: (...) -> None
"""Move a directory from one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a directory on ``src_fs``
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a directory on ``dst_fs``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy.
"""
def src():
return manage_fs(src_fs, writeable=False)
def dst():
return manage_fs(dst_fs, create=True)
with src() as _src_fs, dst() as _dst_fs:
with _src_fs.lock(), _dst_fs.lock():
_dst_fs.makedir(dst_path, recreate=True)
copy_dir(src_fs, src_path, dst_fs, dst_path, workers=workers)
_src_fs.removetree(src_path) | Move a directory from one filesystem to another.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a directory on ``src_fs``
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a directory on ``dst_fs``.
workers (int): Use `worker` threads to copy data, or ``0`` (default) for
a single-threaded copy. |
def get(self,url,
headers=None,
token=None,
data=None,
return_json=True,
default_headers=True,
quiet=False):
'''get will use requests to get a particular url
'''
bot.debug("GET %s" %url)
return self._call(url,
headers=headers,
func=requests.get,
data=data,
return_json=return_json,
default_headers=default_headers,
quiet=quiet) | get will use requests to get a particular url |
def m_c(mcmc, scale, f, alphasMZ=0.1185, loop=3):
r"""Get running c quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_c(m_c)$"""
if scale == mcmc:
return mcmc # nothing to do
_sane(scale, f)
crd = rundec.CRunDec()
alphas_mc = alpha_s(mcmc, 4, alphasMZ=alphasMZ, loop=loop)
if f == 4:
alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)
return crd.mMS2mMS(mcmc, alphas_mc, alphas_scale, f, loop)
elif f == 3:
crd.nfMmu.Mth = 1.3
crd.nfMmu.muth = 1.3
crd.nfMmu.nf = 4
return crd.mH2mL(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)
elif f == 5:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
return crd.mL2mH(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale)) | r"""Get running c quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_c(m_c)$ |
def Save(self, token=None):
"""Generate a histogram object and store in the specified attribute."""
graph_series_by_label = {}
for active_time in self.active_days:
for label in self.categories[active_time]:
graphs_for_label = graph_series_by_label.setdefault(
label, rdf_stats.ClientGraphSeries(report_type=self._report_type))
graph = rdf_stats.Graph(title="%s day actives for %s label" %
(active_time, label))
for k, v in sorted(iteritems(self.categories[active_time][label])):
graph.Append(label=k, y_value=v)
graphs_for_label.graphs.Append(graph)
for label, graph_series in iteritems(graph_series_by_label):
client_report_utils.WriteGraphSeries(graph_series, label, token=token) | Generate a histogram object and store in the specified attribute. |
def add_cli_to_bel(main: click.Group) -> click.Group: # noqa: D202
"""Add several command to main :mod:`click` function related to export to BEL."""
@main.command()
@click.option('-o', '--output', type=click.File('w'), default=sys.stdout)
@click.option('-f', '--fmt', default='bel', show_default=True, help='BEL export format')
@click.pass_obj
def write(manager: BELManagerMixin, output: TextIO, fmt: str):
"""Write as BEL Script."""
graph = manager.to_bel()
graph.serialize(file=output, fmt=fmt)
click.echo(graph.summary_str())
return main | Add several command to main :mod:`click` function related to export to BEL. |
async def update(self):
'''
reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived.
'''
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass | reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived. |
def find_central_module(self): # type: () -> Optional[str]
"""
Get the module that is the sole module, or the module
that matches the package name/version
:return:
"""
# find modules.
mf = ModuleFinder(self.file_opener)
candidates = mf.find_by_any_method()
sub_modules = []
root_modules = []
for candidate in candidates:
if "." in candidate:
sub_modules.append(candidate)
else:
root_modules.append(candidate)
candidates = root_modules
# remove junk. Junk only has meaning in the sense of finding the central module.
candidates = self.remove_likely_non_central(candidates)
if len(candidates) == 1:
return candidates[0]
# see if there is 1 out of the many with same name pkg_foo, module_foo
if self.package_name:
if self.package_name in candidates:
return self.package_name
# I don't understand the _ to - transformations.
if self.package_name:
if self.package_name.replace("-", "_") in candidates:
return self.package_name.replace("-", "_")
if self.package_name:
if self.package_name.replace("-", "") in candidates:
return self.package_name.replace("-", "")
if self.package_name:
if self.package_name.replace("_", "") in candidates:
return self.package_name.replace("_", "")
# see if there is 1 out of the many with version in sync- pkg_foo v1.2.3, module_bar v1.2.3
# TODO:
return None | Get the module that is the sole module, or the module
that matches the package name/version
:return: |
def _clones(self):
"""Yield all machines under this pool"""
vbox = VirtualBox()
machines = []
for machine in vbox.machines:
if machine.name == self.machine_name:
continue
if machine.name.startswith(self.machine_name):
machines.append(machine)
return machines | Yield all machines under this pool |
def _get_prepare_env(self, script, job_descriptor, inputs, outputs, mounts):
"""Return a dict with variables for the 'prepare' action."""
# Add the _SCRIPT_REPR with the repr(script) contents
# Add the _META_YAML_REPR with the repr(meta) contents
# Add variables for directories that need to be created, for example:
# DIR_COUNT: 2
# DIR_0: /mnt/data/input/gs/bucket/path1/
# DIR_1: /mnt/data/output/gs/bucket/path2
# List the directories in sorted order so that they are created in that
# order. This is primarily to ensure that permissions are set as we create
# each directory.
# For example:
# mkdir -m 777 -p /root/first/second
# mkdir -m 777 -p /root/first
# *may* not actually set 777 on /root/first
docker_paths = sorted([
var.docker_path if var.recursive else os.path.dirname(var.docker_path)
for var in inputs | outputs | mounts
if var.value
])
env = {
_SCRIPT_VARNAME: repr(script.value),
_META_YAML_VARNAME: repr(job_descriptor.to_yaml()),
'DIR_COUNT': str(len(docker_paths))
}
for idx, path in enumerate(docker_paths):
env['DIR_{}'.format(idx)] = os.path.join(providers_util.DATA_MOUNT_POINT,
path)
return env | Return a dict with variables for the 'prepare' action. |
def update(self, friendly_name=values.unset, target_workers=values.unset,
reservation_activity_sid=values.unset,
assignment_activity_sid=values.unset,
max_reserved_workers=values.unset, task_order=values.unset):
"""
Update the TaskQueueInstance
:param unicode friendly_name: Human readable description of this TaskQueue
:param unicode target_workers: A string describing the Worker selection criteria for any Tasks that enter this TaskQueue.
:param unicode reservation_activity_sid: ActivitySID that will be assigned to Workers when they are reserved for a task from this TaskQueue.
:param unicode assignment_activity_sid: ActivitySID that will be assigned to Workers when they are assigned a task from this TaskQueue.
:param unicode max_reserved_workers: The maximum amount of workers to create reservations for the assignment of a task while in this queue.
:param TaskQueueInstance.TaskOrder task_order: TaskOrder will determine which order the Tasks will be assigned to Workers.
:returns: Updated TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
target_workers=target_workers,
reservation_activity_sid=reservation_activity_sid,
assignment_activity_sid=assignment_activity_sid,
max_reserved_workers=max_reserved_workers,
task_order=task_order,
) | Update the TaskQueueInstance
:param unicode friendly_name: Human readable description of this TaskQueue
:param unicode target_workers: A string describing the Worker selection criteria for any Tasks that enter this TaskQueue.
:param unicode reservation_activity_sid: ActivitySID that will be assigned to Workers when they are reserved for a task from this TaskQueue.
:param unicode assignment_activity_sid: ActivitySID that will be assigned to Workers when they are assigned a task from this TaskQueue.
:param unicode max_reserved_workers: The maximum amount of workers to create reservations for the assignment of a task while in this queue.
:param TaskQueueInstance.TaskOrder task_order: TaskOrder will determine which order the Tasks will be assigned to Workers.
:returns: Updated TaskQueueInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance |
def get_player(self, name=None, platform=None, uid=None):
"""|coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found"""
results = yield from self.get_players(name=name, platform=platform, uid=uid)
return results[0] | |coro|
Calls get_players and returns the first element,
exactly one of uid and name must be given, platform must be given
Parameters
----------
name : str
the name of the player you're searching for
platform : str
the name of the platform you're searching on (See :class:`Platforms`)
uid : str
the uid of the player you're searching for
Returns
-------
:class:`Player`
player found |
def get(self, **options):
"""Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results.
"""
sub_query = self.with_limit(1)
options = QueryOptions(sub_query).replace(batch_size=1)
for result in sub_query.run(**options):
return result
return None | Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results. |
def get_notifications(self, all=github.GithubObject.NotSet, participating=github.GithubObject.NotSet, since=github.GithubObject.NotSet, before=github.GithubObject.NotSet):
"""
:calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:param since: datetime.datetime
:param before: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification`
"""
assert all is github.GithubObject.NotSet or isinstance(all, bool), all
assert participating is github.GithubObject.NotSet or isinstance(participating, bool), participating
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
assert before is github.GithubObject.NotSet or isinstance(before, datetime.datetime), before
params = dict()
if all is not github.GithubObject.NotSet:
params["all"] = all
if participating is not github.GithubObject.NotSet:
params["participating"] = participating
if since is not github.GithubObject.NotSet:
params["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
if before is not github.GithubObject.NotSet:
params["before"] = before.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Notification.Notification,
self._requester,
"/notifications",
params
) | :calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:param since: datetime.datetime
:param before: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification` |
def graft(func=None, *, namespace=None):
"""Decorator for marking a function as a graft.
Parameters:
namespace (str): namespace of data, same format as targeting.
Returns:
Graft
For example, these grafts::
@graft
def foo_data:
return {'foo', True}
@graft(namespace='bar')
def bar_data:
return False
will be redered has::
{
'foo': True,
'bar': False
}
"""
if not func:
return functools.partial(graft, namespace=namespace)
if isinstance(func, Graft):
return func
return Graft(func, namespace=namespace) | Decorator for marking a function as a graft.
Parameters:
namespace (str): namespace of data, same format as targeting.
Returns:
Graft
For example, these grafts::
@graft
def foo_data:
return {'foo', True}
@graft(namespace='bar')
def bar_data:
return False
will be redered has::
{
'foo': True,
'bar': False
} |
def create_time_from_text(text):
"""
Parse a time in the form ``hh:mm`` or ``hhmm`` (or even ``hmm``) and return a :class:`datetime.time` object. If no
valid time can be extracted from the given string, :exc:`ValueError` will be raised.
"""
text = text.replace(':', '')
if not re.match('^\d{3,}$', text):
raise ValueError("Time must be numeric")
minutes = int(text[-2:])
hours = int(text[0:2] if len(text) > 3 else text[0])
return datetime.time(hours, minutes) | Parse a time in the form ``hh:mm`` or ``hhmm`` (or even ``hmm``) and return a :class:`datetime.time` object. If no
valid time can be extracted from the given string, :exc:`ValueError` will be raised. |
def process(self, candidates):
"""
:arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates.
"""
high_score_candidates = [c for c in candidates if c.score >= self.min_score]
if high_score_candidates != []:
return high_score_candidates
return candidates | :arg list candidates: list of Candidates
:returns: list of Candidates where score is at least min_score,
if and only if one or more Candidates have at least min_score.
Otherwise, returns original list of Candidates. |
def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a single value
if line[0].upper() == line[0] and "".join(line[1:]) == "":
line = [line[0]]
yield line | Read tab delimited file, handling ISA-Tab special case headers. |
def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT',
yes_color='#0000FF', no_color='#FF0000',
condition_node_params=None, leaf_node_params=None, **kwargs):
"""Convert specified tree to graphviz instance. IPython can automatically plot the
returned graphiz instance. Otherwise, you should call .render() method
of the returned graphiz instance.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
yes_color : str, default '#0000FF'
Edge color when meets the node condition.
no_color : str, default '#FF0000'
Edge color when doesn't meet the node condition.
condition_node_params : dict (optional)
condition node configuration,
{'shape':'box',
'style':'filled,rounded',
'fillcolor':'#78bceb'
}
leaf_node_params : dict (optional)
leaf node configuration
{'shape':'box',
'style':'filled',
'fillcolor':'#e48038'
}
kwargs :
Other keywords passed to graphviz graph_attr
Returns
-------
ax : matplotlib Axes
"""
if condition_node_params is None:
condition_node_params = {}
if leaf_node_params is None:
leaf_node_params = {}
try:
from graphviz import Digraph
except ImportError:
raise ImportError('You must install graphviz to plot tree')
if not isinstance(booster, (Booster, XGBModel)):
raise ValueError('booster must be Booster or XGBModel instance')
if isinstance(booster, XGBModel):
booster = booster.get_booster()
tree = booster.get_dump(fmap=fmap)[num_trees]
tree = tree.split()
kwargs = kwargs.copy()
kwargs.update({'rankdir': rankdir})
graph = Digraph(graph_attr=kwargs)
for i, text in enumerate(tree):
if text[0].isdigit():
node = _parse_node(
graph, text, condition_node_params=condition_node_params,
leaf_node_params=leaf_node_params)
else:
if i == 0:
# 1st string must be node
raise ValueError('Unable to parse given string as tree')
_parse_edge(graph, node, text, yes_color=yes_color,
no_color=no_color)
return graph | Convert specified tree to graphviz instance. IPython can automatically plot the
returned graphiz instance. Otherwise, you should call .render() method
of the returned graphiz instance.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
yes_color : str, default '#0000FF'
Edge color when meets the node condition.
no_color : str, default '#FF0000'
Edge color when doesn't meet the node condition.
condition_node_params : dict (optional)
condition node configuration,
{'shape':'box',
'style':'filled,rounded',
'fillcolor':'#78bceb'
}
leaf_node_params : dict (optional)
leaf node configuration
{'shape':'box',
'style':'filled',
'fillcolor':'#e48038'
}
kwargs :
Other keywords passed to graphviz graph_attr
Returns
-------
ax : matplotlib Axes |
def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
"""
Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found.
"""
records = []
rrsets = zone.iterate_rdatasets() if zone else []
for rname, rdataset in rrsets:
rtype = dns.rdatatype.to_text(rdataset.rdtype)
if ((not rdtype or rdtype == rtype)
and (not name or name == rname.to_text())):
for rdata in rdataset:
rdata = rdata.to_text()
if not content or self._convert_content(rtype, content) == rdata:
raw_rdata = self._clean_TXT_record({'type': rtype,
'content': rdata})['content']
data = {
'type': rtype,
'name': rname.to_text(True),
'ttl': int(rdataset.ttl),
'content': raw_rdata,
'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
}
records.append(data)
return records | Iterates over all records of the zone and returns a list of records filtered
by record type, name and content. The list is empty if no records found. |
def main(argv=None):
'''
Main entry-point for calling layouts directly as a program.
'''
# Prep argparse
ap = argparse.ArgumentParser(
description='Basic query options for Python HID-IO Layouts repository',
)
ap.add_argument('--list', action='store_true', help='List available layout aliases.')
ap.add_argument('--get', metavar='NAME', help='Retrieve the given layout, and return the JSON data')
# Parse arguments
args = ap.parse_args(argv)
# Create layouts context manager
mgr = Layouts()
# Check if generating a list
if args.list:
for name in mgr.list_layouts():
print(name)
# Retrieve JSON layout
if args.get is not None:
layout = mgr.get_layout(args.get)
print(json.dumps(layout.json())) | Main entry-point for calling layouts directly as a program. |
def get(self):
"""
Return the referer aka the WHOIS server of the current domain extension.
"""
if not PyFunceble.CONFIGURATION["local"]:
# We are not running a test in a local network.
if self.domain_extension not in self.ignored_extension:
# The extension of the domain we are testing is not into
# the list of ignored extensions.
# We set the referer to None as we do not have any.
referer = None
if self.domain_extension in PyFunceble.INTERN["iana_db"]:
# The domain extension is in the iana database.
if not PyFunceble.CONFIGURATION["no_whois"]:
# We are authorized to use WHOIS for the test result.
# We get the referer from the database.
referer = PyFunceble.INTERN["iana_db"][self.domain_extension]
if not referer:
# The referer is not filled.
# We log the case of the current extension.
Logs().referer_not_found(self.domain_extension)
# And we handle and return None status.
return None
# The referer is into the database.
# We return the extracted referer.
return referer
# We are not authorized to use WHOIS for the test result.
# We return None.
return None
# The domain extension is not in the iana database.
# We return False, it is an invalid domain.
return False
# The extension of the domain we are testing is not into
# the list of ignored extensions.
# We return None, the domain does not have a whois server.
return None
# We are running a test in a local network.
# We return None.
return None | Return the referer aka the WHOIS server of the current domain extension. |
def warn( callingClass, astr_key, astr_extraMsg="" ):
'''
Convenience dispatcher to the error_exit() method.
Will raise "warning" error, i.e. script processing continues.
'''
b_exitToOS = False
report( callingClass, astr_key, b_exitToOS, astr_extraMsg ) | Convenience dispatcher to the error_exit() method.
Will raise "warning" error, i.e. script processing continues. |
def _reproject(self, eopatch, src_raster):
"""
Reprojects the raster data from Geopedia's CRS (POP_WEB) to EOPatch's CRS.
"""
height, width = src_raster.shape
dst_raster = np.ones((height, width), dtype=self.raster_dtype)
src_bbox = transform_bbox(eopatch.bbox, CRS.POP_WEB)
src_transform = rasterio.transform.from_bounds(*src_bbox, width=width, height=height)
dst_bbox = eopatch.bbox
dst_transform = rasterio.transform.from_bounds(*dst_bbox, width=width, height=height)
rasterio.warp.reproject(src_raster, dst_raster,
src_transform=src_transform, src_crs={'init': CRS.ogc_string(CRS.POP_WEB)},
src_nodata=0,
dst_transform=dst_transform, dst_crs={'init': CRS.ogc_string(eopatch.bbox.crs)},
dst_nodata=self.no_data_val)
return dst_raster | Reprojects the raster data from Geopedia's CRS (POP_WEB) to EOPatch's CRS. |
def validate_permission(self, key, permission):
""" validates if group can get assigned with permission"""
if permission.perm_name not in self.__possible_permissions__:
raise AssertionError(
"perm_name is not one of {}".format(self.__possible_permissions__)
)
return permission | validates if group can get assigned with permission |
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = ifilter(filter_func, x)
return x | Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
.. versionadded:: 2.4 |
def search_customer(self, limit=100, offset=0, email_pattern=None, last_name_pattern=None,
company_name_pattern=None, with_additional_data=False):
"""Search the list of customers."""
response = self.request(E.searchCustomerRequest(
E.limit(limit),
E.offset(offset),
E.emailPattern(email_pattern or ''),
E.lastNamePattern(last_name_pattern or ''),
E.companyNamePattern(company_name_pattern or ''),
E.withAdditionalData(int(with_additional_data)),
))
return response.as_models(Customer) | Search the list of customers. |
def red_workshift(request, message=None):
'''
Redirects to the base workshift page for users who are logged in
'''
if message:
messages.add_message(request, messages.ERROR, message)
return HttpResponseRedirect(reverse('workshift:view_semester')) | Redirects to the base workshift page for users who are logged in |
def provideObjectsToLearn(self, objectNames=None):
"""
Returns the objects in a canonical format to be sent to an experiment.
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
returnDict = {
"objectId1": [
{
0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0
1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1
},
{
0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0
1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1
},
],
"objectId2": [
:
]
:
}
Parameters:
----------------------------
@param objectNames (list)
List of object names to provide to the experiment
"""
if objectNames is None:
objectNames = self.objects.keys()
objects = {}
for name in objectNames:
objects[name] = [self._getSDRPairs([pair] * self.numColumns) \
for pair in self.objects[name]]
self._checkObjectsToLearn(objects)
return objects | Returns the objects in a canonical format to be sent to an experiment.
The returned format is a a dictionary where the keys are object names, and
values are lists of sensations, each sensation being a mapping from
cortical column index to a pair of SDR's (one location and one feature).
returnDict = {
"objectId1": [
{
0: (set([1, 5, 10]), set([6, 12, 52]), # location, feature for CC0
1: (set([6, 2, 15]), set([64, 1, 5]), # location, feature for CC1
},
{
0: (set([5, 46, 50]), set([8, 10, 11]), # location, feature for CC0
1: (set([1, 6, 45]), set([12, 17, 23]), # location, feature for CC1
},
],
"objectId2": [
:
]
:
}
Parameters:
----------------------------
@param objectNames (list)
List of object names to provide to the experiment |
def is_prelinked_bytecode(bytecode: bytes, link_refs: List[Dict[str, Any]]) -> bool:
"""
Returns False if all expected link_refs are unlinked, otherwise returns True.
todo support partially pre-linked bytecode (currently all or nothing)
"""
for link_ref in link_refs:
for offset in link_ref["offsets"]:
try:
validate_empty_bytes(offset, link_ref["length"], bytecode)
except ValidationError:
return True
return False | Returns False if all expected link_refs are unlinked, otherwise returns True.
todo support partially pre-linked bytecode (currently all or nothing) |
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
# keep up with the last token skipped so we can recombine
# consecutively skipped tokens (-2 for when i begins at 0).
last_skipped_token_i = -2
skipped_tokens = list()
try:
# year/month/day list
ymd = _ymd(timestr)
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and res.hour is None and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i - 1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i - 1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i - 1]
if not ymd and l[i - 1].find('.') == -1:
# ymd.append(info.convertyear(int(s[:2])))
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = l[i - 1]
ymd.append(s[:4])
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i + 1 < len_l and l[i] == ' ' and
info.hms(l[i + 1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value % 1:
res.minute = int(60 * (value % 1))
elif idx == 1:
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif (i == len_l and l[i - 2] == ' ' and
info.hms(l[i - 3]) is not None):
# X h MM or X m SS
idx = info.hms(l[i - 3])
if idx == 0: # h
res.minute = int(value)
sec_remainder = value % 1
if sec_remainder:
res.second = int(60 * sec_remainder)
elif idx == 1: # m
res.second, res.microsecond = \
_parsems(value_repr)
# We don't need to advance the tokens here because the
# i == len_l call indicates that we're looking at all
# the tokens already.
elif i + 1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value % 1:
res.second = int(60 * (value % 1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i + 1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(value_repr)
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(l[i])
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
else:
return None, None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd) - 1
assert mstridx == -1
else:
ymd.append(l[i])
i += 1
elif i >= len_l or info.jump(l[i]):
if i + 1 < len_l and info.ampm(l[i + 1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i + 1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i + 1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(value)
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None, None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd) - 1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(l[i])
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(l[i])
i += 1
elif (i + 3 < len_l and l[i] == l[i + 2] == ' '
and info.pertain(l[i + 1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i + 3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(str(info.convertyear(value)))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
# For fuzzy parsing, 'a' or 'am' (both valid English words)
# may erroneously trigger the AM/PM flag. Deal with that
# here.
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and res.ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if res.hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with ' +
'AM or PM flag.')
elif not 0 <= res.hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for ' +
'12-hour clock.')
if val_is_ampm:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
res.ampm = value
elif fuzzy:
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in
string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2]) * \
3600 + int(l[i][2:]) * 60
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
res.tzoffset = int(l[i]) * 3600 + int(l[i + 2]) * 60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2]) * 3600
else:
return None, None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i + 3 < len_l and
info.jump(l[i]) and l[i + 1] == '(' and l[i + 3] == ')' and
3 <= len(l[i + 2]) <= 5 and
not [x for x in l[i + 2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i + 2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None, None
last_skipped_token_i = self._skip_token(skipped_tokens,
last_skipped_token_i, i, l)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst)
if year is not None:
res.year = year
res.century_specified = ymd.century_specified
if month is not None:
res.month = month
if day is not None:
res.day = day
except (IndexError, ValueError, AssertionError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
return res, tuple(skipped_tokens)
else:
return res, None | Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) |
def runner(parallel, config):
"""Run functions, provided by string name, on multiple cores on the current machine.
"""
def run_parallel(fn_name, items):
items = [x for x in items if x is not None]
if len(items) == 0:
return []
items = diagnostics.track_parallel(items, fn_name)
fn, fn_name = (fn_name, fn_name.__name__) if callable(fn_name) else (get_fn(fn_name, parallel), fn_name)
logger.info("multiprocessing: %s" % fn_name)
if "wrapper" in parallel:
wrap_parallel = {k: v for k, v in parallel.items() if k in set(["fresources", "checkpointed"])}
items = [[fn_name] + parallel.get("wrapper_args", []) + [wrap_parallel] + list(x) for x in items]
return run_multicore(fn, items, config, parallel=parallel)
return run_parallel | Run functions, provided by string name, on multiple cores on the current machine. |
def updated_topology_description(topology_description, server_description):
"""Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description.
"""
address = server_description.address
# These values will be updated, if necessary, to form the new
# TopologyDescription.
topology_type = topology_description.topology_type
set_name = topology_description.replica_set_name
max_set_version = topology_description.max_set_version
max_election_id = topology_description.max_election_id
server_type = server_description.server_type
# Don't mutate the original dict of server descriptions; copy it.
sds = topology_description.server_descriptions()
# Replace this server's description with the new one.
sds[address] = server_description
if topology_type == TOPOLOGY_TYPE.Single:
# Single type never changes.
return TopologyDescription(
TOPOLOGY_TYPE.Single,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings)
if topology_type == TOPOLOGY_TYPE.Unknown:
if server_type == SERVER_TYPE.Standalone:
sds.pop(address)
elif server_type not in (SERVER_TYPE.Unknown, SERVER_TYPE.RSGhost):
topology_type = _SERVER_TYPE_TO_TOPOLOGY_TYPE[server_type]
if topology_type == TOPOLOGY_TYPE.Sharded:
if server_type not in (SERVER_TYPE.Mongos, SERVER_TYPE.Unknown):
sds.pop(address)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetNoPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type, set_name = _update_rs_no_primary_from_member(
sds, set_name, server_description)
elif topology_type == TOPOLOGY_TYPE.ReplicaSetWithPrimary:
if server_type in (SERVER_TYPE.Standalone, SERVER_TYPE.Mongos):
sds.pop(address)
topology_type = _check_has_primary(sds)
elif server_type == SERVER_TYPE.RSPrimary:
(topology_type,
set_name,
max_set_version,
max_election_id) = _update_rs_from_primary(sds,
set_name,
server_description,
max_set_version,
max_election_id)
elif server_type in (
SERVER_TYPE.RSSecondary,
SERVER_TYPE.RSArbiter,
SERVER_TYPE.RSOther):
topology_type = _update_rs_with_primary_from_member(
sds, set_name, server_description)
else:
# Server type is Unknown or RSGhost: did we just lose the primary?
topology_type = _check_has_primary(sds)
# Return updated copy.
return TopologyDescription(topology_type,
sds,
set_name,
max_set_version,
max_election_id,
topology_description._topology_settings) | Return an updated copy of a TopologyDescription.
:Parameters:
- `topology_description`: the current TopologyDescription
- `server_description`: a new ServerDescription that resulted from
an ismaster call
Called after attempting (successfully or not) to call ismaster on the
server at server_description.address. Does not modify topology_description. |
def process(self, ast): # type: (Dict[str, Any]) -> None
""" Build a scope tree and links between scopes and identifiers by the
specified ast. You can access the built scope tree and the built links
by .scope_tree and .link_registry.
"""
id_classifier = IdentifierClassifier()
attached_ast = id_classifier.attach_identifier_attributes(ast)
# We are already in script local scope.
self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL)
traverse(attached_ast,
on_enter=self._enter_handler,
on_leave=self._leave_handler)
self.scope_tree = self._scope_tree_builder.get_global_scope()
self.link_registry = self._scope_tree_builder.link_registry | Build a scope tree and links between scopes and identifiers by the
specified ast. You can access the built scope tree and the built links
by .scope_tree and .link_registry. |
def dataset_create_new_cli(self,
folder=None,
public=False,
quiet=False,
convert_to_csv=True,
dir_mode='skip'):
""" client wrapper for creating a new dataset
Parameters
==========
folder: the folder to initialize the metadata file in
public: should the dataset be public?
quiet: suppress verbose output (default is False)
convert_to_csv: if True, convert data to comma separated value
dir_mode: What to do with directories: "skip" - ignore; "zip" - compress and upload
"""
folder = folder or os.getcwd()
result = self.dataset_create_new(folder, public, quiet, convert_to_csv,
dir_mode)
if result.invalidTags:
print('The following are not valid tags and could not be added to '
'the dataset: ' + str(result.invalidTags))
if result.status.lower() == 'ok':
if public:
print('Your public Dataset is being created. Please check '
'progress at ' + result.url)
else:
print('Your private Dataset is being created. Please check '
'progress at ' + result.url)
else:
print('Dataset creation error: ' + result.error) | client wrapper for creating a new dataset
Parameters
==========
folder: the folder to initialize the metadata file in
public: should the dataset be public?
quiet: suppress verbose output (default is False)
convert_to_csv: if True, convert data to comma separated value
dir_mode: What to do with directories: "skip" - ignore; "zip" - compress and upload |
def value(self):
"""Returns the positive value to subtract from the total."""
originalPrice = self.lineItem.totalPrice
if self.flatRate == 0:
return originalPrice * self.percent
return self.flatRate | Returns the positive value to subtract from the total. |
def _make_compile_argv(self, compile_request):
"""Return a list of arguments to use to compile sources. Subclasses can override and append."""
sources_minus_headers = list(self._iter_sources_minus_headers(compile_request))
if len(sources_minus_headers) == 0:
raise self._HeaderOnlyLibrary()
compiler = compile_request.compiler
compiler_options = compile_request.compiler_options
# We are going to execute in the target output, so get absolute paths for everything.
buildroot = get_buildroot()
# TODO: add -v to every compiler and linker invocation!
argv = (
[compiler.exe_filename] +
compiler.extra_args +
# TODO: If we need to produce static libs, don't add -fPIC! (could use Variants -- see #5788).
['-c', '-fPIC'] +
compiler_options +
[
'-I{}'.format(os.path.join(buildroot, inc_dir))
for inc_dir in compile_request.include_dirs
] +
[os.path.join(buildroot, src) for src in sources_minus_headers])
self.context.log.info("selected compiler exe name: '{}'".format(compiler.exe_filename))
self.context.log.debug("compile argv: {}".format(argv))
return argv | Return a list of arguments to use to compile sources. Subclasses can override and append. |
def parse_input_samples(job, inputs):
"""
Parses config file to pull sample information.
Stores samples as tuples of (uuid, URL)
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main)
"""
job.fileStore.logToMaster('Parsing input samples and batching jobs')
samples = []
if inputs.config:
with open(inputs.config, 'r') as f:
for line in f.readlines():
if not line.isspace():
sample = line.strip().split(',')
assert len(sample) == 2, 'Error: Config file is inappropriately formatted.'
samples.append(sample)
job.addChildJobFn(map_job, download_sample, samples, inputs) | Parses config file to pull sample information.
Stores samples as tuples of (uuid, URL)
:param JobFunctionWrappingJob job: passed by Toil automatically
:param Namespace inputs: Stores input arguments (see main) |
def overlay_gateway_access_lists_ipv6_in_cg_ipv6_acl_in_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
access_lists = ET.SubElement(overlay_gateway, "access-lists")
ipv6 = ET.SubElement(access_lists, "ipv6")
in_cg = ET.SubElement(ipv6, "in")
ipv6_acl_in_name = ET.SubElement(in_cg, "ipv6-acl-in-name")
ipv6_acl_in_name.text = kwargs.pop('ipv6_acl_in_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def _is_text_data(self, data_type):
"""Private method for testing text data types."""
dt = DATA_TYPES[data_type]
if type(self.data) is dt['type'] and len(self.data) < dt['max'] and all(type(char) == str for char in self.data):
self.type = data_type.upper()
self.len = len(self.data)
return True | Private method for testing text data types. |
def _parse_xml(self, xml):
"""Extracts the attributes from the XMLElement instance."""
from re import split
vms("Parsing <cron> XML child tag.", 2)
self.frequency = get_attrib(xml, "frequency", default=5, cast=int)
self.emails = split(",\s*", get_attrib(xml, "emails", default=""))
self.notify = split(",\s*", get_attrib(xml, "notify", default="")) | Extracts the attributes from the XMLElement instance. |
def swipe(self):
'''
Perform swipe action. if device platform greater than API 18, percent can be used and value between 0 and 1
Usages:
d().swipe.right()
d().swipe.left(steps=10)
d().swipe.up(steps=10)
d().swipe.down()
d().swipe("right", steps=20)
d().swipe("right", steps=20, percent=0.5)
'''
@param_to_property(direction=["up", "down", "right", "left"])
def _swipe(direction="left", steps=10, percent=1):
if percent == 1:
return self.jsonrpc.swipe(self.selector, direction, steps)
else:
return self.jsonrpc.swipe(self.selector, direction, percent, steps)
return _swipe | Perform swipe action. if device platform greater than API 18, percent can be used and value between 0 and 1
Usages:
d().swipe.right()
d().swipe.left(steps=10)
d().swipe.up(steps=10)
d().swipe.down()
d().swipe("right", steps=20)
d().swipe("right", steps=20, percent=0.5) |
def tag_ner(lang, input_text, output_type=list):
"""Run NER for chosen language.
Choosing output_type=list, returns a list of tuples:
>>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)
[('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]
"""
_check_latest_data(lang)
assert lang in NER_DICT.keys(), \
'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))
types = [str, list]
assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))
assert output_type in types, 'Output must be a {}.'.format(', '.join(types))
if type(input_text) == str:
punkt = PunktLanguageVars()
tokens = punkt.word_tokenize(input_text)
new_tokens = []
for word in tokens:
if word.endswith('.'):
new_tokens.append(word[:-1])
new_tokens.append('.')
else:
new_tokens.append(word)
input_text = new_tokens
ner_file_path = os.path.expanduser(NER_DICT[lang])
with open(ner_file_path) as file_open:
ner_str = file_open.read()
ner_list = ner_str.split('\n')
ner_tuple_list = []
for count, word_token in enumerate(input_text):
match = False
for ner_word in ner_list:
# the replacer slows things down, but is necessary
if word_token == ner_word:
ner_tuple = (word_token, 'Entity')
ner_tuple_list.append(ner_tuple)
match = True
break
if not match:
ner_tuple_list.append((word_token,))
if output_type is str:
string = ''
for tup in ner_tuple_list:
start_space = ' '
final_space = ''
# this is some mediocre string reconstitution
# maybe not worth the effort
if tup[0] in [',', '.', ';', ':', '?', '!']:
start_space = ''
if len(tup) == 2:
string += start_space + tup[0] + '/' + tup[1] + final_space
else:
string += start_space + tup[0] + final_space
return string
return ner_tuple_list | Run NER for chosen language.
Choosing output_type=list, returns a list of tuples:
>>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)
[('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] |
def _get_phi_al_regional(self, C, mag, vs30measured, rrup):
"""
Returns intra-event (Tau) standard deviation (equation 26, page 1046)
"""
phi_al = np.ones((len(vs30measured)))
idx = rrup < 30
phi_al[idx] *= C['s5']
idx = ((rrup <= 80) & (rrup >= 30.))
phi_al[idx] *= C['s5'] + (C['s6'] - C['s5']) / 50. * (rrup[idx] - 30.)
idx = rrup > 80
phi_al[idx] *= C['s6']
return phi_al | Returns intra-event (Tau) standard deviation (equation 26, page 1046) |
def generate_tokens(readline):
"""
The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
"""
lnum = parenlev = continued = 0
namechars, numchars = string.ascii_letters + '_', '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
while 1: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = ''
lnum = lnum + 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield (COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
if initial in numchars or \
(initial == '.' and token != '.'): # ordinary number
yield (NUMBER, token, spos, epos, line)
elif initial in '\r\n':
newline = NEWLINE
if parenlev > 0:
newline = NL
yield (newline, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield (COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield (STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = (endprogs[initial] or endprogs[token[1]] or
endprogs[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield (STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
yield (NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
# This yield is new; needed for better idempotency:
yield (NL, token, spos, (lnum, pos), line)
continued = 1
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
yield (OP, token, spos, epos, line)
else:
yield (ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') | The generate_tokens() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as a string. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included. |
def create(cls, path_name=None, name=None, project_id=None,
log_modified_at=None, crawlable=True):
"""Initialize an instance and save it to db."""
result = cls(path_name, name, project_id, log_modified_at, crawlable)
db.session.add(result)
db.session.commit()
crawl_result(result, True)
return result | Initialize an instance and save it to db. |
def list_gemeenten_by_provincie(self, provincie):
'''
List all `gemeenten` in a `provincie`.
:param provincie: The :class:`Provincie` for which the \
`gemeenten` are wanted.
:rtype: A :class:`list` of :class:`Gemeente`.
'''
try:
gewest = provincie.gewest
prov = provincie
except AttributeError:
prov = self.get_provincie_by_id(provincie)
gewest = prov.gewest
gewest.clear_gateway()
def creator():
gewest_gemeenten = self.list_gemeenten(gewest.id)
return[
Gemeente(
r.id,
r.naam,
r.niscode,
gewest
)for r in gewest_gemeenten if str(r.niscode)[0] == str(prov.niscode)[0]
]
if self.caches['permanent'].is_configured:
key = 'ListGemeentenByProvincieId#%s' % prov.id
gemeente = self.caches['long'].get_or_create(key, creator)
else:
gemeente = creator()
for g in gemeente:
g.set_gateway(self)
return gemeente | List all `gemeenten` in a `provincie`.
:param provincie: The :class:`Provincie` for which the \
`gemeenten` are wanted.
:rtype: A :class:`list` of :class:`Gemeente`. |
def create_paired_dir(output_dir, meta_id, static=False, needwebdir=True):
"""Creates the meta or static dirs.
Adds an "even" or "odd" subdirectory to the static path
based on the meta-id.
"""
# get the absolute root path
root_path = os.path.abspath(output_dir)
# if it's a static directory, add even and odd
if static:
# determine whether meta-id is odd or even
if meta_id[-1].isdigit():
last_character = int(meta_id[-1])
else:
last_character = ord(meta_id[-1])
if last_character % 2 == 0:
num_dir = 'even'
else:
num_dir = 'odd'
# add odd or even to the path, based on the meta-id
output_path = os.path.join(root_path, num_dir)
# if it's a meta directory, output as normal
else:
output_path = root_path
# if it doesn't already exist, create the output path (includes even/odd)
if not os.path.exists(output_path):
os.mkdir(output_path)
# add the pairtree to the output path
path_name = add_to_pairtree(output_path, meta_id)
# add the meta-id directory to the end of the pairpath
meta_dir = os.path.join(path_name, meta_id)
os.mkdir(meta_dir)
# if we are creating static output
if static and needwebdir:
# add the web path to the output directory
os.mkdir(os.path.join(meta_dir, 'web'))
static_dir = os.path.join(meta_dir, 'web')
return static_dir
# else we are creating meta output or don't need web directory
else:
return meta_dir | Creates the meta or static dirs.
Adds an "even" or "odd" subdirectory to the static path
based on the meta-id. |
def writeFITSTable(filename, table):
"""
Convert a table into a FITSTable and then write to disk.
Parameters
----------
filename : str
Filename to write.
table : Table
Table to write.
Returns
-------
None
Notes
-----
Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing.
"""
def FITSTableType(val):
"""
Return the FITSTable type corresponding to each named parameter in obj
"""
if isinstance(val, bool):
types = "L"
elif isinstance(val, (int, np.int64, np.int32)):
types = "J"
elif isinstance(val, (float, np.float64, np.float32)):
types = "E"
elif isinstance(val, six.string_types):
types = "{0}A".format(len(val))
else:
log.warning("Column {0} is of unknown type {1}".format(val, type(val)))
log.warning("Using 5A")
types = "5A"
return types
cols = []
for name in table.colnames:
cols.append(fits.Column(name=name, format=FITSTableType(table[name][0]), array=table[name]))
cols = fits.ColDefs(cols)
tbhdu = fits.BinTableHDU.from_columns(cols)
for k in table.meta:
tbhdu.header['HISTORY'] = ':'.join((k, table.meta[k]))
tbhdu.writeto(filename, overwrite=True) | Convert a table into a FITSTable and then write to disk.
Parameters
----------
filename : str
Filename to write.
table : Table
Table to write.
Returns
-------
None
Notes
-----
Due to a bug in numpy, `int32` and `float32` are converted to `int64` and `float64` before writing. |
def send_error(self, code, message=None):
"""
Send and log plain text error reply.
:param code:
:param message:
"""
message = message.strip()
self.log_error("code %d, message %s", code, message)
self.send_response(code)
self.send_header("Content-Type", "text/plain")
self.send_header('Connection', 'close')
self.end_headers()
if message:
self.wfile.write(message) | Send and log plain text error reply.
:param code:
:param message: |
def _create_fw_fab_dev(self, tenant_id, drvr_name, fw_dict):
"""This routine calls the Tenant Edge routine if FW Type is TE. """
if fw_dict.get('fw_type') == fw_constants.FW_TENANT_EDGE:
self._create_fw_fab_dev_te(tenant_id, drvr_name, fw_dict) | This routine calls the Tenant Edge routine if FW Type is TE. |
def add_callbacks(self, future, callback, errback):
"""
callback or errback may be None, but at least one must be
non-None.
"""
def done(f):
try:
res = f.result()
if callback:
callback(res)
except Exception:
if errback:
errback(create_failure())
return future.add_done_callback(done) | callback or errback may be None, but at least one must be
non-None. |
def _parse_feature(self, info):
"""Parse a feature command."""
parts = info.split(b'=', 1)
name = parts[0]
if len(parts) > 1:
value = self._path(parts[1])
else:
value = None
self.features[name] = value
return commands.FeatureCommand(name, value, lineno=self.lineno) | Parse a feature command. |
def connect_functions(self):
"""
Connects all events to the functions which should be called
:return:
"""
# Lambda is sometimes used to prevent passing the event parameter.
self.cfg_load_pushbutton.clicked.connect(lambda: self.load_overall_config())
self.cfg_save_pushbutton.clicked.connect(lambda: self.save_overall_config())
self.blue_listwidget.itemSelectionChanged.connect(self.load_selected_bot)
self.orange_listwidget.itemSelectionChanged.connect(self.load_selected_bot)
self.blue_listwidget.dropEvent = lambda event: self.bot_item_drop_event(self.blue_listwidget, event)
self.orange_listwidget.dropEvent = lambda event: self.bot_item_drop_event(self.orange_listwidget, event)
self.blue_name_lineedit.editingFinished.connect(self.team_settings_edit_event)
self.orange_name_lineedit.editingFinished.connect(self.team_settings_edit_event)
self.blue_color_spinbox.valueChanged.connect(self.team_settings_edit_event)
self.orange_color_spinbox.valueChanged.connect(self.team_settings_edit_event)
self.blue_minus_toolbutton.clicked.connect(lambda e: self.remove_agent(self.current_bot))
self.orange_minus_toolbutton.clicked.connect(lambda e: self.remove_agent(self.current_bot))
self.blue_plus_toolbutton.clicked.connect(lambda e: self.add_agent_button(team_index=0))
self.orange_plus_toolbutton.clicked.connect(lambda e: self.add_agent_button(team_index=1))
for child in self.bot_config_groupbox.findChildren(QWidget):
if isinstance(child, QLineEdit):
child.editingFinished.connect(self.bot_config_edit_event)
elif isinstance(child, QSlider):
child.valueChanged.connect(self.bot_config_edit_event)
elif isinstance(child, QRadioButton):
child.toggled.connect(self.bot_config_edit_event)
elif isinstance(child, QComboBox):
child.currentTextChanged.connect(self.bot_config_edit_event)
self.loadout_preset_toolbutton.clicked.connect(self.car_customisation.popup)
self.agent_preset_toolbutton.clicked.connect(self.agent_customisation.popup)
self.preset_load_toplevel_pushbutton.clicked.connect(self.load_preset_toplevel)
for child in self.match_settings_groupbox.findChildren(QWidget):
if isinstance(child, QComboBox):
child.currentTextChanged.connect(self.match_settings_edit_event)
elif isinstance(child, QCheckBox):
child.toggled.connect(self.match_settings_edit_event)
self.edit_mutators_pushbutton.clicked.connect(self.mutator_customisation.popup)
self.kill_bots_pushbutton.clicked.connect(self.kill_bots)
self.run_button.clicked.connect(self.run_button_pressed) | Connects all events to the functions which should be called
:return: |
def complete_previous(self, count=1, disable_wrap_around=False):
"""
Browse to the previous completions.
(Does nothing if there are no completion.)
"""
if self.complete_state:
if self.complete_state.complete_index == 0:
index = None
if disable_wrap_around:
return
elif self.complete_state.complete_index is None:
index = len(self.complete_state.current_completions) - 1
else:
index = max(0, self.complete_state.complete_index - count)
self.go_to_completion(index) | Browse to the previous completions.
(Does nothing if there are no completion.) |
def ucast_ip_mask(ip_addr_and_mask, return_tuple=True):
"""
Function to check if a address is unicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options
"""
regex_ucast_ip_and_mask = __re.compile("^((22[0-3])|(2[0-1][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$")
if return_tuple:
while not regex_ucast_ip_and_mask.match(ip_addr_and_mask):
print("Not a good unicast IP and CIDR mask combo.")
print("Please try again.")
ip_addr_and_mask = input("Please enter a unicast IP address and mask in the follwing format x.x.x.x/x: ")
ip_cidr_split = ip_addr_and_mask.split("/")
ip_addr = ip_cidr_split[0]
cidr = ip_cidr_split[1]
return ip_addr, cidr
elif not return_tuple:
if not regex_ucast_ip_and_mask.match(ip_addr_and_mask):
return False
else:
return True | Function to check if a address is unicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Unicast IP address and mask in the following format 192.168.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options |
def append_data(self, len_tag, val_tag, data, header=False):
"""Append raw data, possibly including a embedded SOH.
:param len_tag: Tag number for length field.
:param val_tag: Tag number for value field.
:param data: Raw data byte string.
:param header: Append to header if True; default to body.
Appends two pairs: a length pair, followed by a data pair,
containing the raw data supplied. Example fields that should
use this method include: 95/96, 212/213, 354/355, etc."""
self.append_pair(len_tag, len(data), header=header)
self.append_pair(val_tag, data, header=header)
return | Append raw data, possibly including a embedded SOH.
:param len_tag: Tag number for length field.
:param val_tag: Tag number for value field.
:param data: Raw data byte string.
:param header: Append to header if True; default to body.
Appends two pairs: a length pair, followed by a data pair,
containing the raw data supplied. Example fields that should
use this method include: 95/96, 212/213, 354/355, etc. |
def build_query_uri(self, uri=None, start=0, count=-1, filter='', query='', sort='', view='', fields='', scope_uris=''):
"""Builds the URI from given parameters.
More than one request can be send to get the items, regardless the query parameter 'count', because the actual
number of items in the response might differ from the requested count. Some types of resource have a limited
number of items returned on each call. For those resources, additional calls are made to the API to retrieve
any other items matching the given filter. The actual number of items can also differ from the requested call
if the requested number of items would take too long.
The use of optional parameters for OneView 2.0 is described at:
http://h17007.www1.hpe.com/docs/enterprise/servers/oneview2.0/cic-api/en/api-docs/current/index.html
Note:
Single quote - "'" - inside a query parameter is not supported by OneView API.
Args:
start: The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count: The number of resources to return. A count of -1 requests all items (default).
filter (list or str): A general filter/query string to narrow the list of items returned. The default is no
filter; all resources are returned.
query: A single query parameter can do what would take multiple parameters or multiple GET requests using
filter. Use query for more complex queries. NOTE: This parameter is experimental for OneView 2.0.
sort: The sort order of the returned data set. By default, the sort order is based on create time with the
oldest entry first.
view: Returns a specific subset of the attributes of the resource or collection by specifying the name of a
predefined view. The default view is expand (show all attributes of the resource and all elements of
the collections or resources).
fields: Name of the fields.
uri: A specific URI (optional)
scope_uris: An expression to restrict the resources returned according to the scopes to
which they are assigned.
Returns:
uri: The complete uri
"""
if filter:
filter = self.make_query_filter(filter)
if query:
query = "&query=" + quote(query)
if sort:
sort = "&sort=" + quote(sort)
if view:
view = "&view=" + quote(view)
if fields:
fields = "&fields=" + quote(fields)
if scope_uris:
scope_uris = "&scopeUris=" + quote(scope_uris)
path = uri if uri else self._base_uri
self.validate_resource_uri(path)
symbol = '?' if '?' not in path else '&'
uri = "{0}{1}start={2}&count={3}{4}{5}{6}{7}{8}{9}".format(path, symbol, start, count, filter, query, sort,
view, fields, scope_uris)
return uri | Builds the URI from given parameters.
More than one request can be send to get the items, regardless the query parameter 'count', because the actual
number of items in the response might differ from the requested count. Some types of resource have a limited
number of items returned on each call. For those resources, additional calls are made to the API to retrieve
any other items matching the given filter. The actual number of items can also differ from the requested call
if the requested number of items would take too long.
The use of optional parameters for OneView 2.0 is described at:
http://h17007.www1.hpe.com/docs/enterprise/servers/oneview2.0/cic-api/en/api-docs/current/index.html
Note:
Single quote - "'" - inside a query parameter is not supported by OneView API.
Args:
start: The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count: The number of resources to return. A count of -1 requests all items (default).
filter (list or str): A general filter/query string to narrow the list of items returned. The default is no
filter; all resources are returned.
query: A single query parameter can do what would take multiple parameters or multiple GET requests using
filter. Use query for more complex queries. NOTE: This parameter is experimental for OneView 2.0.
sort: The sort order of the returned data set. By default, the sort order is based on create time with the
oldest entry first.
view: Returns a specific subset of the attributes of the resource or collection by specifying the name of a
predefined view. The default view is expand (show all attributes of the resource and all elements of
the collections or resources).
fields: Name of the fields.
uri: A specific URI (optional)
scope_uris: An expression to restrict the resources returned according to the scopes to
which they are assigned.
Returns:
uri: The complete uri |
def option(name, help=""):
"""Decorator that add an option to the wrapped command or function."""
def decorator(func):
options = getattr(func, "options", [])
_option = Param(name, help)
# Insert at the beginning so the apparent order is preserved
options.insert(0, _option)
func.options = options
return func
return decorator | Decorator that add an option to the wrapped command or function. |
def political_views(self) -> str:
"""Get a random political views.
:return: Political views.
:Example:
Liberal.
"""
views = self._data['political_views']
return self.random.choice(views) | Get a random political views.
:return: Political views.
:Example:
Liberal. |
def do_proxy_failover(self, proxy_url, for_url):
"""
:param str proxy_url: Proxy to ban.
:param str for_url: The URL being requested.
:returns: The next proxy config to try, or 'DIRECT'.
:raises ProxyConfigExhaustedError: If the PAC file provided no usable proxy configuration.
"""
self._proxy_resolver.ban_proxy(proxy_url)
return self._proxy_resolver.get_proxy_for_requests(for_url) | :param str proxy_url: Proxy to ban.
:param str for_url: The URL being requested.
:returns: The next proxy config to try, or 'DIRECT'.
:raises ProxyConfigExhaustedError: If the PAC file provided no usable proxy configuration. |
def showOperandLines(rh):
"""
Produce help output related to operands.
Input:
Request Handle
"""
if rh.function == 'HELP':
rh.printLn("N", " For the GetHost function:")
else:
rh.printLn("N", "Sub-Functions(s):")
rh.printLn("N", " diskpoolnames - " +
"Returns the names of the directory manager disk pools.")
rh.printLn("N", " diskpoolspace - " +
"Returns disk pool size information.")
rh.printLn("N", " fcpdevices - " +
"Lists the FCP device channels that are active, free, or")
rh.printLn("N", " offline.")
rh.printLn("N", " general - " +
"Returns the general information related to the z/VM")
rh.printLn("N", " hypervisor environment.")
rh.printLn("N", " help - Returns this help information.")
rh.printLn("N", " version - Show the version of this function")
if rh.subfunction != '':
rh.printLn("N", "Operand(s):")
rh.printLn("N", " <poolName> - Name of the disk pool.")
return | Produce help output related to operands.
Input:
Request Handle |
def get_valid_examples(self):
"""Return a list of valid examples for the given schema."""
path = os.path.join(self._get_schema_folder(), "examples", "valid")
return list(_get_json_content_from_folder(path)) | Return a list of valid examples for the given schema. |
def tradepileDelete(self, trade_id): # item_id instead of trade_id?
"""Remove card from tradepile.
:params trade_id: Trade id.
"""
method = 'DELETE'
url = 'trade/%s' % trade_id
self.__request__(method, url) # returns nothing
# TODO: validate status code
return True | Remove card from tradepile.
:params trade_id: Trade id. |
def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
res = self.get_next(num_bytes)
self.skip(len(res))
return res | Read and return the specified bytes from the buffer. |
def set_file_path(self, filePath):
"""
Set the file path that needs to be locked.
:Parameters:
#. filePath (None, path): The file that needs to be locked. When given and a lock
is acquired, the file will be automatically opened for writing or reading
depending on the given mode. If None is given, the locker can always be used
for its general purpose as shown in the examples.
"""
if filePath is not None:
assert isinstance(filePath, basestring), "filePath must be None or string"
filePath = str(filePath)
self.__filePath = filePath | Set the file path that needs to be locked.
:Parameters:
#. filePath (None, path): The file that needs to be locked. When given and a lock
is acquired, the file will be automatically opened for writing or reading
depending on the given mode. If None is given, the locker can always be used
for its general purpose as shown in the examples. |
def merge_selected_cells(self, selection):
"""Merges or unmerges cells that are in the selection bounding box
Parameters
----------
selection: Selection object
\tSelection for which attr toggle shall be returned
"""
tab = self.grid.current_table
# Get the selection bounding box
bbox = selection.get_bbox()
if bbox is None:
row, col, tab = self.grid.actions.cursor
(bb_top, bb_left), (bb_bottom, bb_right) = (row, col), (row, col)
else:
(bb_top, bb_left), (bb_bottom, bb_right) = bbox
merge_area = bb_top, bb_left, bb_bottom, bb_right
# Check if top-left cell is already merged
cell_attributes = self.grid.code_array.cell_attributes
tl_merge_area = cell_attributes[(bb_top, bb_left, tab)]["merge_area"]
if tl_merge_area is not None and tl_merge_area[:2] == merge_area[:2]:
self.unmerge(tl_merge_area, tab)
else:
self.merge(merge_area, tab) | Merges or unmerges cells that are in the selection bounding box
Parameters
----------
selection: Selection object
\tSelection for which attr toggle shall be returned |
def load_steps_impl(self, registry, path, module_names=None):
"""
Load the step implementations at the given path, with the given module names. If
module_names is None then the module 'steps' is searched by default.
"""
if not module_names:
module_names = ['steps']
path = os.path.abspath(path)
for module_name in module_names:
mod = self.modules.get((path, module_name))
if mod is None:
#log.debug("Looking for step def module '%s' in %s" % (module_name, path))
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
try:
actual_module_name = os.path.basename(module_name)
complete_path = os.path.join(path, os.path.dirname(module_name))
info = imp.find_module(actual_module_name, [complete_path])
except ImportError:
#log.debug("Did not find step defs module '%s' in %s" % (module_name, path))
return
try:
# Modules have to be loaded with unique names or else problems arise
mod = imp.load_module("stepdefs_" + str(self.module_counter), *info)
except:
exc = sys.exc_info()
raise StepImplLoadException(exc)
self.module_counter += 1
self.modules[(path, module_name)] = mod
for item_name in dir(mod):
item = getattr(mod, item_name)
if isinstance(item, StepImpl):
registry.add_step(item.step_type, item)
elif isinstance(item, HookImpl):
registry.add_hook(item.cb_type, item)
elif isinstance(item, NamedTransformImpl):
registry.add_named_transform(item)
elif isinstance(item, TransformImpl):
registry.add_transform(item) | Load the step implementations at the given path, with the given module names. If
module_names is None then the module 'steps' is searched by default. |
def execute(func: types.FunctionType):
"""
>>> from Redy.Magic.Classic import execute
>>> x = 1
>>> @execute
>>> def f(x = x) -> int:
>>> return x + 1
>>> assert f is 2
"""
spec = getfullargspec(func)
default = spec.defaults
arg_cursor = 0
def get_item(name):
nonlocal arg_cursor
ctx = func.__globals__
value = ctx.get(name, _undef)
if value is _undef:
try:
value = default[arg_cursor]
arg_cursor += 1
except (TypeError, IndexError):
raise ValueError(f"Current context has no variable `{name}`")
return value
return func(*(get_item(arg_name) for arg_name in spec.args)) | >>> from Redy.Magic.Classic import execute
>>> x = 1
>>> @execute
>>> def f(x = x) -> int:
>>> return x + 1
>>> assert f is 2 |
def aggregate_count_over_time(self, metric_store, groupby_name, aggregate_timestamp):
"""
Organize and store the count of data from the log line into the metric store by columnm, group name, timestamp
:param dict metric_store: The metric store used to store all the parsed the log data
:param string groupby_name: the group name that the log line belongs to
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None
"""
all_qps = metric_store['qps']
qps = all_qps[groupby_name]
if aggregate_timestamp in qps:
qps[aggregate_timestamp] += 1
else:
qps[aggregate_timestamp] = 1
return None | Organize and store the count of data from the log line into the metric store by columnm, group name, timestamp
:param dict metric_store: The metric store used to store all the parsed the log data
:param string groupby_name: the group name that the log line belongs to
:param string aggregate_timestamp: timestamp used for storing the raw data. This accounts for aggregation time period
:return: None |
def get_request_feature(self, name):
"""Parses the request for a particular feature.
Arguments:
name: A feature name.
Returns:
A feature parsed from the URL if the feature is supported, or None.
"""
if '[]' in name:
# array-type
return self.request.query_params.getlist(
name) if name in self.features else None
elif '{}' in name:
# object-type (keys are not consistent)
return self._extract_object_params(
name) if name in self.features else {}
else:
# single-type
return self.request.query_params.get(
name) if name in self.features else None | Parses the request for a particular feature.
Arguments:
name: A feature name.
Returns:
A feature parsed from the URL if the feature is supported, or None. |
def get_themes(templates_path):
"""Returns available themes list."""
themes = os.listdir(templates_path)
if '__common__' in themes:
themes.remove('__common__')
return themes | Returns available themes list. |
def log_request(handler):
"""
Logging request is opposite to response, sometime its necessary,
feel free to enable it.
"""
block = 'Request Infomations:\n' + _format_headers_log(handler.request.headers)
if handler.request.arguments:
block += '+----Arguments----+\n'
for k, v in handler.request.arguments.items():
block += '| {0:<15} | {1:<15} \n'.format(repr(k), repr(v))
app_log.info(block) | Logging request is opposite to response, sometime its necessary,
feel free to enable it. |
def fault_sets(self):
"""
You can only create and configure Fault Sets before adding SDSs to the system, and configuring them incorrectly
may prevent the creation of volumes. An SDS can only be added to a Fault Set during the creation of the SDS.
:rtype: list of Faultset objects
"""
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/FaultSet/instances")).json()
all_faultsets = []
for fs in response:
all_faultsets.append(
SIO_Fault_Set.from_dict(fs)
)
return all_faultsets | You can only create and configure Fault Sets before adding SDSs to the system, and configuring them incorrectly
may prevent the creation of volumes. An SDS can only be added to a Fault Set during the creation of the SDS.
:rtype: list of Faultset objects |
def _parse_depot_section(f):
"""Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots
"""
depots = []
for line in f:
line = strip(line)
if line == '-1' or line == 'EOF': # End of section
break
else:
depots.append(line)
if len(depots) != 1:
raise ParseException('One and only one depot is supported')
return int(depots[0]) | Parse TSPLIB DEPOT_SECTION data part from file descriptor f
Args
----
f : str
File descriptor
Returns
-------
int
an array of depots |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.