code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def on_proposal(self, proposal, proto):
"called to inform about synced peers"
assert isinstance(proto, HDCProtocol)
assert isinstance(proposal, Proposal)
if proposal.height >= self.cm.height:
assert proposal.lockset.is_valid
self.last_active_protocol = proto | def function[on_proposal, parameter[self, proposal, proto]]:
constant[called to inform about synced peers]
assert[call[name[isinstance], parameter[name[proto], name[HDCProtocol]]]]
assert[call[name[isinstance], parameter[name[proposal], name[Proposal]]]]
if compare[name[proposal].height greater_or_equal[>=] name[self].cm.height] begin[:]
assert[name[proposal].lockset.is_valid]
name[self].last_active_protocol assign[=] name[proto] | keyword[def] identifier[on_proposal] ( identifier[self] , identifier[proposal] , identifier[proto] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[proto] , identifier[HDCProtocol] )
keyword[assert] identifier[isinstance] ( identifier[proposal] , identifier[Proposal] )
keyword[if] identifier[proposal] . identifier[height] >= identifier[self] . identifier[cm] . identifier[height] :
keyword[assert] identifier[proposal] . identifier[lockset] . identifier[is_valid]
identifier[self] . identifier[last_active_protocol] = identifier[proto] | def on_proposal(self, proposal, proto):
"""called to inform about synced peers"""
assert isinstance(proto, HDCProtocol)
assert isinstance(proposal, Proposal)
if proposal.height >= self.cm.height:
assert proposal.lockset.is_valid
self.last_active_protocol = proto # depends on [control=['if'], data=[]] |
def get_memory_tar(image_path):
'''get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar
'''
byte_array = Client.image.export(image_path)
file_object = io.BytesIO(byte_array)
tar = tarfile.open(mode="r|*", fileobj=file_object)
return (file_object,tar) | def function[get_memory_tar, parameter[image_path]]:
constant[get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar
]
variable[byte_array] assign[=] call[name[Client].image.export, parameter[name[image_path]]]
variable[file_object] assign[=] call[name[io].BytesIO, parameter[name[byte_array]]]
variable[tar] assign[=] call[name[tarfile].open, parameter[]]
return[tuple[[<ast.Name object at 0x7da20e74beb0>, <ast.Name object at 0x7da20e748640>]]] | keyword[def] identifier[get_memory_tar] ( identifier[image_path] ):
literal[string]
identifier[byte_array] = identifier[Client] . identifier[image] . identifier[export] ( identifier[image_path] )
identifier[file_object] = identifier[io] . identifier[BytesIO] ( identifier[byte_array] )
identifier[tar] = identifier[tarfile] . identifier[open] ( identifier[mode] = literal[string] , identifier[fileobj] = identifier[file_object] )
keyword[return] ( identifier[file_object] , identifier[tar] ) | def get_memory_tar(image_path):
"""get an in memory tar of an image. Use carefully, not as reliable
as get_image_tar
"""
byte_array = Client.image.export(image_path)
file_object = io.BytesIO(byte_array)
tar = tarfile.open(mode='r|*', fileobj=file_object)
return (file_object, tar) |
def set_sample_type_default_stickers(portal):
"""
Fills the admitted stickers and their default stickers to every sample
type.
"""
# Getting all sticker templates
stickers = getStickerTemplates()
sticker_ids = []
for sticker in stickers:
sticker_ids.append(sticker.get('id'))
def_small_template = portal.bika_setup.getSmallStickerTemplate()
def_large_template = portal.bika_setup.getLargeStickerTemplate()
# Getting all Sample Type objects
catalog = api.get_tool('bika_setup_catalog')
brains = catalog(portal_type='SampleType')
for brain in brains:
obj = api.get_object(brain)
if obj.getAdmittedStickers() is not None:
continue
obj.setAdmittedStickers(sticker_ids)
obj.setDefaultLargeSticker(def_large_template)
obj.setDefaultSmallSticker(def_small_template) | def function[set_sample_type_default_stickers, parameter[portal]]:
constant[
Fills the admitted stickers and their default stickers to every sample
type.
]
variable[stickers] assign[=] call[name[getStickerTemplates], parameter[]]
variable[sticker_ids] assign[=] list[[]]
for taget[name[sticker]] in starred[name[stickers]] begin[:]
call[name[sticker_ids].append, parameter[call[name[sticker].get, parameter[constant[id]]]]]
variable[def_small_template] assign[=] call[name[portal].bika_setup.getSmallStickerTemplate, parameter[]]
variable[def_large_template] assign[=] call[name[portal].bika_setup.getLargeStickerTemplate, parameter[]]
variable[catalog] assign[=] call[name[api].get_tool, parameter[constant[bika_setup_catalog]]]
variable[brains] assign[=] call[name[catalog], parameter[]]
for taget[name[brain]] in starred[name[brains]] begin[:]
variable[obj] assign[=] call[name[api].get_object, parameter[name[brain]]]
if compare[call[name[obj].getAdmittedStickers, parameter[]] is_not constant[None]] begin[:]
continue
call[name[obj].setAdmittedStickers, parameter[name[sticker_ids]]]
call[name[obj].setDefaultLargeSticker, parameter[name[def_large_template]]]
call[name[obj].setDefaultSmallSticker, parameter[name[def_small_template]]] | keyword[def] identifier[set_sample_type_default_stickers] ( identifier[portal] ):
literal[string]
identifier[stickers] = identifier[getStickerTemplates] ()
identifier[sticker_ids] =[]
keyword[for] identifier[sticker] keyword[in] identifier[stickers] :
identifier[sticker_ids] . identifier[append] ( identifier[sticker] . identifier[get] ( literal[string] ))
identifier[def_small_template] = identifier[portal] . identifier[bika_setup] . identifier[getSmallStickerTemplate] ()
identifier[def_large_template] = identifier[portal] . identifier[bika_setup] . identifier[getLargeStickerTemplate] ()
identifier[catalog] = identifier[api] . identifier[get_tool] ( literal[string] )
identifier[brains] = identifier[catalog] ( identifier[portal_type] = literal[string] )
keyword[for] identifier[brain] keyword[in] identifier[brains] :
identifier[obj] = identifier[api] . identifier[get_object] ( identifier[brain] )
keyword[if] identifier[obj] . identifier[getAdmittedStickers] () keyword[is] keyword[not] keyword[None] :
keyword[continue]
identifier[obj] . identifier[setAdmittedStickers] ( identifier[sticker_ids] )
identifier[obj] . identifier[setDefaultLargeSticker] ( identifier[def_large_template] )
identifier[obj] . identifier[setDefaultSmallSticker] ( identifier[def_small_template] ) | def set_sample_type_default_stickers(portal):
"""
Fills the admitted stickers and their default stickers to every sample
type.
"""
# Getting all sticker templates
stickers = getStickerTemplates()
sticker_ids = []
for sticker in stickers:
sticker_ids.append(sticker.get('id')) # depends on [control=['for'], data=['sticker']]
def_small_template = portal.bika_setup.getSmallStickerTemplate()
def_large_template = portal.bika_setup.getLargeStickerTemplate()
# Getting all Sample Type objects
catalog = api.get_tool('bika_setup_catalog')
brains = catalog(portal_type='SampleType')
for brain in brains:
obj = api.get_object(brain)
if obj.getAdmittedStickers() is not None:
continue # depends on [control=['if'], data=[]]
obj.setAdmittedStickers(sticker_ids)
obj.setDefaultLargeSticker(def_large_template)
obj.setDefaultSmallSticker(def_small_template) # depends on [control=['for'], data=['brain']] |
def _iter_groups(self, data):
"""
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
"""
groups = data.groupby(self.segmentation_col)
for name, group in groups:
if name not in self.models:
continue
logger.debug(
'returning group {} in LCM group {}'.format(name, self.name))
yield name, group | def function[_iter_groups, parameter[self, data]]:
constant[
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
]
variable[groups] assign[=] call[name[data].groupby, parameter[name[self].segmentation_col]]
for taget[tuple[[<ast.Name object at 0x7da18dc9a0e0>, <ast.Name object at 0x7da18dc98d90>]]] in starred[name[groups]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[self].models] begin[:]
continue
call[name[logger].debug, parameter[call[constant[returning group {} in LCM group {}].format, parameter[name[name], name[self].name]]]]
<ast.Yield object at 0x7da18dc99e70> | keyword[def] identifier[_iter_groups] ( identifier[self] , identifier[data] ):
literal[string]
identifier[groups] = identifier[data] . identifier[groupby] ( identifier[self] . identifier[segmentation_col] )
keyword[for] identifier[name] , identifier[group] keyword[in] identifier[groups] :
keyword[if] identifier[name] keyword[not] keyword[in] identifier[self] . identifier[models] :
keyword[continue]
identifier[logger] . identifier[debug] (
literal[string] . identifier[format] ( identifier[name] , identifier[self] . identifier[name] ))
keyword[yield] identifier[name] , identifier[group] | def _iter_groups(self, data):
"""
Iterate over the groups in `data` after grouping by
`segmentation_col`. Skips any groups for which there
is no model stored.
Yields tuples of (name, df) where name is the group key
and df is the group DataFrame.
Parameters
----------
data : pandas.DataFrame
Must have a column with the same name as `segmentation_col`.
"""
groups = data.groupby(self.segmentation_col)
for (name, group) in groups:
if name not in self.models:
continue # depends on [control=['if'], data=[]]
logger.debug('returning group {} in LCM group {}'.format(name, self.name))
yield (name, group) # depends on [control=['for'], data=[]] |
def from_plugin_classname(plugin_classname, exclude_lines_regex=None, **kwargs):
"""Initializes a plugin class, given a classname and kwargs.
:type plugin_classname: str
:param plugin_classname: subclass of BasePlugin.
:type exclude_lines_regex: str|None
:param exclude_lines_regex: optional regex for ignored lines.
"""
klass = globals()[plugin_classname]
# Make sure the instance is a BasePlugin type, before creating it.
if not issubclass(klass, BasePlugin):
raise TypeError
try:
instance = klass(exclude_lines_regex=exclude_lines_regex, **kwargs)
except TypeError:
log.warning(
'Unable to initialize plugin!',
)
raise
return instance | def function[from_plugin_classname, parameter[plugin_classname, exclude_lines_regex]]:
constant[Initializes a plugin class, given a classname and kwargs.
:type plugin_classname: str
:param plugin_classname: subclass of BasePlugin.
:type exclude_lines_regex: str|None
:param exclude_lines_regex: optional regex for ignored lines.
]
variable[klass] assign[=] call[call[name[globals], parameter[]]][name[plugin_classname]]
if <ast.UnaryOp object at 0x7da20c795630> begin[:]
<ast.Raise object at 0x7da20c795d50>
<ast.Try object at 0x7da20c795e70>
return[name[instance]] | keyword[def] identifier[from_plugin_classname] ( identifier[plugin_classname] , identifier[exclude_lines_regex] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[klass] = identifier[globals] ()[ identifier[plugin_classname] ]
keyword[if] keyword[not] identifier[issubclass] ( identifier[klass] , identifier[BasePlugin] ):
keyword[raise] identifier[TypeError]
keyword[try] :
identifier[instance] = identifier[klass] ( identifier[exclude_lines_regex] = identifier[exclude_lines_regex] ,** identifier[kwargs] )
keyword[except] identifier[TypeError] :
identifier[log] . identifier[warning] (
literal[string] ,
)
keyword[raise]
keyword[return] identifier[instance] | def from_plugin_classname(plugin_classname, exclude_lines_regex=None, **kwargs):
"""Initializes a plugin class, given a classname and kwargs.
:type plugin_classname: str
:param plugin_classname: subclass of BasePlugin.
:type exclude_lines_regex: str|None
:param exclude_lines_regex: optional regex for ignored lines.
"""
klass = globals()[plugin_classname]
# Make sure the instance is a BasePlugin type, before creating it.
if not issubclass(klass, BasePlugin):
raise TypeError # depends on [control=['if'], data=[]]
try:
instance = klass(exclude_lines_regex=exclude_lines_regex, **kwargs) # depends on [control=['try'], data=[]]
except TypeError:
log.warning('Unable to initialize plugin!')
raise # depends on [control=['except'], data=[]]
return instance |
def fromtab(args):
"""
%prog fromtab tabfile fastafile
Convert 2-column sequence file to FASTA format. One usage for this is to
generatea `adapters.fasta` for TRIMMOMATIC.
"""
p = OptionParser(fromtab.__doc__)
p.set_sep(sep=None)
p.add_option("--noheader", default=False, action="store_true",
help="Ignore first line")
p.add_option("--replace",
help="Replace spaces in name to char [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
tabfile, fastafile = args
sep = opts.sep
replace = opts.replace
fp = must_open(tabfile)
fw = must_open(fastafile, "w")
nseq = 0
if opts.noheader:
next(fp)
for row in fp:
row = row.strip()
if not row or row[0] == '#':
continue
name, seq = row.rsplit(sep, 1)
if replace:
name = name.replace(" ", replace)
print(">{0}\n{1}".format(name, seq), file=fw)
nseq += 1
fw.close()
logging.debug("A total of {0} sequences written to `{1}`.".\
format(nseq, fastafile)) | def function[fromtab, parameter[args]]:
constant[
%prog fromtab tabfile fastafile
Convert 2-column sequence file to FASTA format. One usage for this is to
generatea `adapters.fasta` for TRIMMOMATIC.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[fromtab].__doc__]]
call[name[p].set_sep, parameter[]]
call[name[p].add_option, parameter[constant[--noheader]]]
call[name[p].add_option, parameter[constant[--replace]]]
<ast.Tuple object at 0x7da1b08d33d0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b08d10f0>]]
<ast.Tuple object at 0x7da1b08d1240> assign[=] name[args]
variable[sep] assign[=] name[opts].sep
variable[replace] assign[=] name[opts].replace
variable[fp] assign[=] call[name[must_open], parameter[name[tabfile]]]
variable[fw] assign[=] call[name[must_open], parameter[name[fastafile], constant[w]]]
variable[nseq] assign[=] constant[0]
if name[opts].noheader begin[:]
call[name[next], parameter[name[fp]]]
for taget[name[row]] in starred[name[fp]] begin[:]
variable[row] assign[=] call[name[row].strip, parameter[]]
if <ast.BoolOp object at 0x7da1b08d1ae0> begin[:]
continue
<ast.Tuple object at 0x7da1b08d2ad0> assign[=] call[name[row].rsplit, parameter[name[sep], constant[1]]]
if name[replace] begin[:]
variable[name] assign[=] call[name[name].replace, parameter[constant[ ], name[replace]]]
call[name[print], parameter[call[constant[>{0}
{1}].format, parameter[name[name], name[seq]]]]]
<ast.AugAssign object at 0x7da1b08d1ff0>
call[name[fw].close, parameter[]]
call[name[logging].debug, parameter[call[constant[A total of {0} sequences written to `{1}`.].format, parameter[name[nseq], name[fastafile]]]]] | keyword[def] identifier[fromtab] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[fromtab] . identifier[__doc__] )
identifier[p] . identifier[set_sep] ( identifier[sep] = keyword[None] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] ,
identifier[help] = literal[string] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[tabfile] , identifier[fastafile] = identifier[args]
identifier[sep] = identifier[opts] . identifier[sep]
identifier[replace] = identifier[opts] . identifier[replace]
identifier[fp] = identifier[must_open] ( identifier[tabfile] )
identifier[fw] = identifier[must_open] ( identifier[fastafile] , literal[string] )
identifier[nseq] = literal[int]
keyword[if] identifier[opts] . identifier[noheader] :
identifier[next] ( identifier[fp] )
keyword[for] identifier[row] keyword[in] identifier[fp] :
identifier[row] = identifier[row] . identifier[strip] ()
keyword[if] keyword[not] identifier[row] keyword[or] identifier[row] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[name] , identifier[seq] = identifier[row] . identifier[rsplit] ( identifier[sep] , literal[int] )
keyword[if] identifier[replace] :
identifier[name] = identifier[name] . identifier[replace] ( literal[string] , identifier[replace] )
identifier[print] ( literal[string] . identifier[format] ( identifier[name] , identifier[seq] ), identifier[file] = identifier[fw] )
identifier[nseq] += literal[int]
identifier[fw] . identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[nseq] , identifier[fastafile] )) | def fromtab(args):
"""
%prog fromtab tabfile fastafile
Convert 2-column sequence file to FASTA format. One usage for this is to
generatea `adapters.fasta` for TRIMMOMATIC.
"""
p = OptionParser(fromtab.__doc__)
p.set_sep(sep=None)
p.add_option('--noheader', default=False, action='store_true', help='Ignore first line')
p.add_option('--replace', help='Replace spaces in name to char [default: %default]')
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(tabfile, fastafile) = args
sep = opts.sep
replace = opts.replace
fp = must_open(tabfile)
fw = must_open(fastafile, 'w')
nseq = 0
if opts.noheader:
next(fp) # depends on [control=['if'], data=[]]
for row in fp:
row = row.strip()
if not row or row[0] == '#':
continue # depends on [control=['if'], data=[]]
(name, seq) = row.rsplit(sep, 1)
if replace:
name = name.replace(' ', replace) # depends on [control=['if'], data=[]]
print('>{0}\n{1}'.format(name, seq), file=fw)
nseq += 1 # depends on [control=['for'], data=['row']]
fw.close()
logging.debug('A total of {0} sequences written to `{1}`.'.format(nseq, fastafile)) |
def to_ufo_glyph_background(self, glyph, layer):
"""Set glyph background."""
if not layer.hasBackground:
return
background = layer.background
ufo_layer = self.to_ufo_background_layer(glyph)
new_glyph = ufo_layer.newGlyph(glyph.name)
width = background.userData[BACKGROUND_WIDTH_KEY]
if width is not None:
new_glyph.width = width
self.to_ufo_background_image(new_glyph, background)
self.to_ufo_paths(new_glyph, background)
self.to_ufo_components(new_glyph, background)
self.to_ufo_glyph_anchors(new_glyph, background.anchors)
self.to_ufo_guidelines(new_glyph, background) | def function[to_ufo_glyph_background, parameter[self, glyph, layer]]:
constant[Set glyph background.]
if <ast.UnaryOp object at 0x7da18ede7b80> begin[:]
return[None]
variable[background] assign[=] name[layer].background
variable[ufo_layer] assign[=] call[name[self].to_ufo_background_layer, parameter[name[glyph]]]
variable[new_glyph] assign[=] call[name[ufo_layer].newGlyph, parameter[name[glyph].name]]
variable[width] assign[=] call[name[background].userData][name[BACKGROUND_WIDTH_KEY]]
if compare[name[width] is_not constant[None]] begin[:]
name[new_glyph].width assign[=] name[width]
call[name[self].to_ufo_background_image, parameter[name[new_glyph], name[background]]]
call[name[self].to_ufo_paths, parameter[name[new_glyph], name[background]]]
call[name[self].to_ufo_components, parameter[name[new_glyph], name[background]]]
call[name[self].to_ufo_glyph_anchors, parameter[name[new_glyph], name[background].anchors]]
call[name[self].to_ufo_guidelines, parameter[name[new_glyph], name[background]]] | keyword[def] identifier[to_ufo_glyph_background] ( identifier[self] , identifier[glyph] , identifier[layer] ):
literal[string]
keyword[if] keyword[not] identifier[layer] . identifier[hasBackground] :
keyword[return]
identifier[background] = identifier[layer] . identifier[background]
identifier[ufo_layer] = identifier[self] . identifier[to_ufo_background_layer] ( identifier[glyph] )
identifier[new_glyph] = identifier[ufo_layer] . identifier[newGlyph] ( identifier[glyph] . identifier[name] )
identifier[width] = identifier[background] . identifier[userData] [ identifier[BACKGROUND_WIDTH_KEY] ]
keyword[if] identifier[width] keyword[is] keyword[not] keyword[None] :
identifier[new_glyph] . identifier[width] = identifier[width]
identifier[self] . identifier[to_ufo_background_image] ( identifier[new_glyph] , identifier[background] )
identifier[self] . identifier[to_ufo_paths] ( identifier[new_glyph] , identifier[background] )
identifier[self] . identifier[to_ufo_components] ( identifier[new_glyph] , identifier[background] )
identifier[self] . identifier[to_ufo_glyph_anchors] ( identifier[new_glyph] , identifier[background] . identifier[anchors] )
identifier[self] . identifier[to_ufo_guidelines] ( identifier[new_glyph] , identifier[background] ) | def to_ufo_glyph_background(self, glyph, layer):
"""Set glyph background."""
if not layer.hasBackground:
return # depends on [control=['if'], data=[]]
background = layer.background
ufo_layer = self.to_ufo_background_layer(glyph)
new_glyph = ufo_layer.newGlyph(glyph.name)
width = background.userData[BACKGROUND_WIDTH_KEY]
if width is not None:
new_glyph.width = width # depends on [control=['if'], data=['width']]
self.to_ufo_background_image(new_glyph, background)
self.to_ufo_paths(new_glyph, background)
self.to_ufo_components(new_glyph, background)
self.to_ufo_glyph_anchors(new_glyph, background.anchors)
self.to_ufo_guidelines(new_glyph, background) |
def increment(self, subname=None, delta=1):
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta)) | def function[increment, parameter[self, subname, delta]]:
constant[Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
]
variable[delta] assign[=] call[name[int], parameter[name[delta]]]
variable[sign] assign[=] <ast.IfExp object at 0x7da1b0246200>
return[call[name[self]._send, parameter[name[subname], binary_operation[constant[%s%d] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b02459c0>, <ast.Name object at 0x7da1b0245300>]]]]]] | keyword[def] identifier[increment] ( identifier[self] , identifier[subname] = keyword[None] , identifier[delta] = literal[int] ):
literal[string]
identifier[delta] = identifier[int] ( identifier[delta] )
identifier[sign] = literal[string] keyword[if] identifier[delta] >= literal[int] keyword[else] literal[string]
keyword[return] identifier[self] . identifier[_send] ( identifier[subname] , literal[string] %( identifier[sign] , identifier[delta] )) | def increment(self, subname=None, delta=1):
"""Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
"""
delta = int(delta)
sign = '+' if delta >= 0 else ''
return self._send(subname, '%s%d' % (sign, delta)) |
def suffix(args):
"""
%prog suffix fastqfile CAG
Filter reads based on suffix.
"""
p = OptionParser(suffix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastqfile, sf = args
fw = must_open(opts.outfile, "w")
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
if rec.seq.endswith(sf):
print(rec, file=fw)
nselected += 1
logging.debug("Selected reads with suffix {0}: {1}".\
format(sf, percentage(nselected, nreads))) | def function[suffix, parameter[args]]:
constant[
%prog suffix fastqfile CAG
Filter reads based on suffix.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[suffix].__doc__]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da18fe91510> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[2]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da18fe912a0>]]
<ast.Tuple object at 0x7da18fe91ae0> assign[=] name[args]
variable[fw] assign[=] call[name[must_open], parameter[name[opts].outfile, constant[w]]]
variable[nreads] assign[=] constant[0]
for taget[name[rec]] in starred[call[name[iter_fastq], parameter[name[fastqfile]]]] begin[:]
<ast.AugAssign object at 0x7da20e961270>
if compare[name[rec] is constant[None]] begin[:]
break
if call[name[rec].seq.endswith, parameter[name[sf]]] begin[:]
call[name[print], parameter[name[rec]]]
<ast.AugAssign object at 0x7da20e960610>
call[name[logging].debug, parameter[call[constant[Selected reads with suffix {0}: {1}].format, parameter[name[sf], call[name[percentage], parameter[name[nselected], name[nreads]]]]]]] | keyword[def] identifier[suffix] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[suffix] . identifier[__doc__] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[fastqfile] , identifier[sf] = identifier[args]
identifier[fw] = identifier[must_open] ( identifier[opts] . identifier[outfile] , literal[string] )
identifier[nreads] = identifier[nselected] = literal[int]
keyword[for] identifier[rec] keyword[in] identifier[iter_fastq] ( identifier[fastqfile] ):
identifier[nreads] += literal[int]
keyword[if] identifier[rec] keyword[is] keyword[None] :
keyword[break]
keyword[if] identifier[rec] . identifier[seq] . identifier[endswith] ( identifier[sf] ):
identifier[print] ( identifier[rec] , identifier[file] = identifier[fw] )
identifier[nselected] += literal[int]
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sf] , identifier[percentage] ( identifier[nselected] , identifier[nreads] ))) | def suffix(args):
"""
%prog suffix fastqfile CAG
Filter reads based on suffix.
"""
p = OptionParser(suffix.__doc__)
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(fastqfile, sf) = args
fw = must_open(opts.outfile, 'w')
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break # depends on [control=['if'], data=[]]
if rec.seq.endswith(sf):
print(rec, file=fw)
nselected += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rec']]
logging.debug('Selected reads with suffix {0}: {1}'.format(sf, percentage(nselected, nreads))) |
def DeleteNodeTags(r, node, tags, dry_run=False):
"""
Delete tags from a node.
@type node: str
@param node: node to remove tags from
@type tags: list of str
@param tags: tags to remove from the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
"""
query = {
"tag": tags,
"dry-run": dry_run,
}
return r.request("delete", "/2/nodes/%s/tags" % node, query=query) | def function[DeleteNodeTags, parameter[r, node, tags, dry_run]]:
constant[
Delete tags from a node.
@type node: str
@param node: node to remove tags from
@type tags: list of str
@param tags: tags to remove from the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
]
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da18f00f520>, <ast.Constant object at 0x7da18f00d0c0>], [<ast.Name object at 0x7da18f00dc60>, <ast.Name object at 0x7da18f00ed10>]]
return[call[name[r].request, parameter[constant[delete], binary_operation[constant[/2/nodes/%s/tags] <ast.Mod object at 0x7da2590d6920> name[node]]]]] | keyword[def] identifier[DeleteNodeTags] ( identifier[r] , identifier[node] , identifier[tags] , identifier[dry_run] = keyword[False] ):
literal[string]
identifier[query] ={
literal[string] : identifier[tags] ,
literal[string] : identifier[dry_run] ,
}
keyword[return] identifier[r] . identifier[request] ( literal[string] , literal[string] % identifier[node] , identifier[query] = identifier[query] ) | def DeleteNodeTags(r, node, tags, dry_run=False):
"""
Delete tags from a node.
@type node: str
@param node: node to remove tags from
@type tags: list of str
@param tags: tags to remove from the node
@type dry_run: bool
@param dry_run: whether to perform a dry run
@rtype: int
@return: job id
"""
query = {'tag': tags, 'dry-run': dry_run}
return r.request('delete', '/2/nodes/%s/tags' % node, query=query) |
def Audits_getEncodedResponse(self, requestId, encoding, **kwargs):
"""
Function path: Audits.getEncodedResponse
Domain: Audits
Method name: getEncodedResponse
Parameters:
Required arguments:
'requestId' (type: Network.RequestId) -> Identifier of the network request to get content for.
'encoding' (type: string) -> The encoding to use.
Optional arguments:
'quality' (type: number) -> The quality of the encoding (0-1). (defaults to 1)
'sizeOnly' (type: boolean) -> Whether to only return the size information (defaults to false).
Returns:
'body' (type: string) -> The encoded body as a base64 string. Omitted if sizeOnly is true.
'originalSize' (type: integer) -> Size before re-encoding.
'encodedSize' (type: integer) -> Size after re-encoding.
Description: Returns the response body and size if it were re-encoded with the specified settings. Only applies to images.
"""
assert isinstance(encoding, (str,)
), "Argument 'encoding' must be of type '['str']'. Received type: '%s'" % type(
encoding)
if 'quality' in kwargs:
assert isinstance(kwargs['quality'], (float, int)
), "Optional argument 'quality' must be of type '['float', 'int']'. Received type: '%s'" % type(
kwargs['quality'])
if 'sizeOnly' in kwargs:
assert isinstance(kwargs['sizeOnly'], (bool,)
), "Optional argument 'sizeOnly' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['sizeOnly'])
expected = ['quality', 'sizeOnly']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['quality', 'sizeOnly']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Audits.getEncodedResponse',
requestId=requestId, encoding=encoding, **kwargs)
return subdom_funcs | def function[Audits_getEncodedResponse, parameter[self, requestId, encoding]]:
constant[
Function path: Audits.getEncodedResponse
Domain: Audits
Method name: getEncodedResponse
Parameters:
Required arguments:
'requestId' (type: Network.RequestId) -> Identifier of the network request to get content for.
'encoding' (type: string) -> The encoding to use.
Optional arguments:
'quality' (type: number) -> The quality of the encoding (0-1). (defaults to 1)
'sizeOnly' (type: boolean) -> Whether to only return the size information (defaults to false).
Returns:
'body' (type: string) -> The encoded body as a base64 string. Omitted if sizeOnly is true.
'originalSize' (type: integer) -> Size before re-encoding.
'encodedSize' (type: integer) -> Size after re-encoding.
Description: Returns the response body and size if it were re-encoded with the specified settings. Only applies to images.
]
assert[call[name[isinstance], parameter[name[encoding], tuple[[<ast.Name object at 0x7da1b118c4f0>]]]]]
if compare[constant[quality] in name[kwargs]] begin[:]
assert[call[name[isinstance], parameter[call[name[kwargs]][constant[quality]], tuple[[<ast.Name object at 0x7da1b118c1f0>, <ast.Name object at 0x7da1b118d300>]]]]]
if compare[constant[sizeOnly] in name[kwargs]] begin[:]
assert[call[name[isinstance], parameter[call[name[kwargs]][constant[sizeOnly]], tuple[[<ast.Name object at 0x7da1b118c250>]]]]]
variable[expected] assign[=] list[[<ast.Constant object at 0x7da1b102a410>, <ast.Constant object at 0x7da1b102a4d0>]]
variable[passed_keys] assign[=] call[name[list], parameter[call[name[kwargs].keys, parameter[]]]]
assert[call[name[all], parameter[<ast.ListComp object at 0x7da1b102a200>]]]
variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Audits.getEncodedResponse]]]
return[name[subdom_funcs]] | keyword[def] identifier[Audits_getEncodedResponse] ( identifier[self] , identifier[requestId] , identifier[encoding] ,** identifier[kwargs] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[encoding] ,( identifier[str] ,)
), literal[string] % identifier[type] (
identifier[encoding] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[assert] identifier[isinstance] ( identifier[kwargs] [ literal[string] ],( identifier[float] , identifier[int] )
), literal[string] % identifier[type] (
identifier[kwargs] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[assert] identifier[isinstance] ( identifier[kwargs] [ literal[string] ],( identifier[bool] ,)
), literal[string] % identifier[type] (
identifier[kwargs] [ literal[string] ])
identifier[expected] =[ literal[string] , literal[string] ]
identifier[passed_keys] = identifier[list] ( identifier[kwargs] . identifier[keys] ())
keyword[assert] identifier[all] ([( identifier[key] keyword[in] identifier[expected] ) keyword[for] identifier[key] keyword[in] identifier[passed_keys] ]
), literal[string] % identifier[passed_keys]
identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] ,
identifier[requestId] = identifier[requestId] , identifier[encoding] = identifier[encoding] ,** identifier[kwargs] )
keyword[return] identifier[subdom_funcs] | def Audits_getEncodedResponse(self, requestId, encoding, **kwargs):
"""
Function path: Audits.getEncodedResponse
Domain: Audits
Method name: getEncodedResponse
Parameters:
Required arguments:
'requestId' (type: Network.RequestId) -> Identifier of the network request to get content for.
'encoding' (type: string) -> The encoding to use.
Optional arguments:
'quality' (type: number) -> The quality of the encoding (0-1). (defaults to 1)
'sizeOnly' (type: boolean) -> Whether to only return the size information (defaults to false).
Returns:
'body' (type: string) -> The encoded body as a base64 string. Omitted if sizeOnly is true.
'originalSize' (type: integer) -> Size before re-encoding.
'encodedSize' (type: integer) -> Size after re-encoding.
Description: Returns the response body and size if it were re-encoded with the specified settings. Only applies to images.
"""
assert isinstance(encoding, (str,)), "Argument 'encoding' must be of type '['str']'. Received type: '%s'" % type(encoding)
if 'quality' in kwargs:
assert isinstance(kwargs['quality'], (float, int)), "Optional argument 'quality' must be of type '['float', 'int']'. Received type: '%s'" % type(kwargs['quality']) # depends on [control=['if'], data=['kwargs']]
if 'sizeOnly' in kwargs:
assert isinstance(kwargs['sizeOnly'], (bool,)), "Optional argument 'sizeOnly' must be of type '['bool']'. Received type: '%s'" % type(kwargs['sizeOnly']) # depends on [control=['if'], data=['kwargs']]
expected = ['quality', 'sizeOnly']
passed_keys = list(kwargs.keys())
assert all([key in expected for key in passed_keys]), "Allowed kwargs are ['quality', 'sizeOnly']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Audits.getEncodedResponse', requestId=requestId, encoding=encoding, **kwargs)
return subdom_funcs |
def ancestors(obj, refattrs=(ALIGNMENT, SEGMENTATION)):
"""
>>> for anc in query.ancestors(igt.get_item('g1'), refattrs=(ALIGNMENT, SEGMENTATION)):
... print(anc)
(<Tier object (id: g type: glosses) at ...>, 'alignment', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: p type: phrases) at ...>, [<Item object (id: p1) at ...>])
"""
if hasattr(obj, 'tier'):
tier = obj.tier
items = [obj]
else:
tier = obj
items = tier.items
# a tier may be visited twice (e.g. A > B > A), but then it stops;
# this is to avoid cycles
visited = set([tier.id])
while True:
# get the first specified attribute
refattr = next((ra for ra in refattrs if ra in tier.attributes), None)
if not refattr:
break
reftier = ref.dereference(tier, refattr)
ids = set(chain.from_iterable(
ref.ids(item.attributes.get(refattr, '')) for item in items
))
refitems = [item for item in reftier.items if item.id in ids]
yield (tier, refattr, reftier, refitems)
# cycle detection; break if we've now encountered something twice
if reftier.id in visited:
break
visited.update(reftier.id)
tier = reftier
items = refitems | def function[ancestors, parameter[obj, refattrs]]:
constant[
>>> for anc in query.ancestors(igt.get_item('g1'), refattrs=(ALIGNMENT, SEGMENTATION)):
... print(anc)
(<Tier object (id: g type: glosses) at ...>, 'alignment', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: p type: phrases) at ...>, [<Item object (id: p1) at ...>])
]
if call[name[hasattr], parameter[name[obj], constant[tier]]] begin[:]
variable[tier] assign[=] name[obj].tier
variable[items] assign[=] list[[<ast.Name object at 0x7da2046219c0>]]
variable[visited] assign[=] call[name[set], parameter[list[[<ast.Attribute object at 0x7da204623d00>]]]]
while constant[True] begin[:]
variable[refattr] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da2046229e0>, constant[None]]]
if <ast.UnaryOp object at 0x7da204621ab0> begin[:]
break
variable[reftier] assign[=] call[name[ref].dereference, parameter[name[tier], name[refattr]]]
variable[ids] assign[=] call[name[set], parameter[call[name[chain].from_iterable, parameter[<ast.GeneratorExp object at 0x7da2046202b0>]]]]
variable[refitems] assign[=] <ast.ListComp object at 0x7da2041d81f0>
<ast.Yield object at 0x7da18f09de40>
if compare[name[reftier].id in name[visited]] begin[:]
break
call[name[visited].update, parameter[name[reftier].id]]
variable[tier] assign[=] name[reftier]
variable[items] assign[=] name[refitems] | keyword[def] identifier[ancestors] ( identifier[obj] , identifier[refattrs] =( identifier[ALIGNMENT] , identifier[SEGMENTATION] )):
literal[string]
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[tier] = identifier[obj] . identifier[tier]
identifier[items] =[ identifier[obj] ]
keyword[else] :
identifier[tier] = identifier[obj]
identifier[items] = identifier[tier] . identifier[items]
identifier[visited] = identifier[set] ([ identifier[tier] . identifier[id] ])
keyword[while] keyword[True] :
identifier[refattr] = identifier[next] (( identifier[ra] keyword[for] identifier[ra] keyword[in] identifier[refattrs] keyword[if] identifier[ra] keyword[in] identifier[tier] . identifier[attributes] ), keyword[None] )
keyword[if] keyword[not] identifier[refattr] :
keyword[break]
identifier[reftier] = identifier[ref] . identifier[dereference] ( identifier[tier] , identifier[refattr] )
identifier[ids] = identifier[set] ( identifier[chain] . identifier[from_iterable] (
identifier[ref] . identifier[ids] ( identifier[item] . identifier[attributes] . identifier[get] ( identifier[refattr] , literal[string] )) keyword[for] identifier[item] keyword[in] identifier[items]
))
identifier[refitems] =[ identifier[item] keyword[for] identifier[item] keyword[in] identifier[reftier] . identifier[items] keyword[if] identifier[item] . identifier[id] keyword[in] identifier[ids] ]
keyword[yield] ( identifier[tier] , identifier[refattr] , identifier[reftier] , identifier[refitems] )
keyword[if] identifier[reftier] . identifier[id] keyword[in] identifier[visited] :
keyword[break]
identifier[visited] . identifier[update] ( identifier[reftier] . identifier[id] )
identifier[tier] = identifier[reftier]
identifier[items] = identifier[refitems] | def ancestors(obj, refattrs=(ALIGNMENT, SEGMENTATION)):
"""
>>> for anc in query.ancestors(igt.get_item('g1'), refattrs=(ALIGNMENT, SEGMENTATION)):
... print(anc)
(<Tier object (id: g type: glosses) at ...>, 'alignment', <Tier object (id: m type: morphemes) at ...>, [<Item object (id: m1) at ...>])
(<Tier object (id: m type: morphemes) at ...>, 'segmentation', <Tier object (id: w type: words) at ...>, [<Item object (id: w1) at ...>])
(<Tier object (id: w type: words) at ...>, 'segmentation', <Tier object (id: p type: phrases) at ...>, [<Item object (id: p1) at ...>])
"""
if hasattr(obj, 'tier'):
tier = obj.tier
items = [obj] # depends on [control=['if'], data=[]]
else:
tier = obj
items = tier.items
# a tier may be visited twice (e.g. A > B > A), but then it stops;
# this is to avoid cycles
visited = set([tier.id])
while True:
# get the first specified attribute
refattr = next((ra for ra in refattrs if ra in tier.attributes), None)
if not refattr:
break # depends on [control=['if'], data=[]]
reftier = ref.dereference(tier, refattr)
ids = set(chain.from_iterable((ref.ids(item.attributes.get(refattr, '')) for item in items)))
refitems = [item for item in reftier.items if item.id in ids]
yield (tier, refattr, reftier, refitems)
# cycle detection; break if we've now encountered something twice
if reftier.id in visited:
break # depends on [control=['if'], data=[]]
visited.update(reftier.id)
tier = reftier
items = refitems # depends on [control=['while'], data=[]] |
def cublasZsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for complex symmetric matrix.
"""
status = _libcublas.cublasZsymv_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(cuda.cuDoubleComplex(alpha.real,
alpha.imag)),
int(A), lda, int(x), incx,
ctypes.byref(cuda.cuDoubleComplex(beta.real,
beta.imag)),
int(y), incy)
cublasCheckStatus(status) | def function[cublasZsymv, parameter[handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy]]:
constant[
Matrix-vector product for complex symmetric matrix.
]
variable[status] assign[=] call[name[_libcublas].cublasZsymv_v2, parameter[name[handle], call[name[_CUBLAS_FILL_MODE]][name[uplo]], name[n], call[name[ctypes].byref, parameter[call[name[cuda].cuDoubleComplex, parameter[name[alpha].real, name[alpha].imag]]]], call[name[int], parameter[name[A]]], name[lda], call[name[int], parameter[name[x]]], name[incx], call[name[ctypes].byref, parameter[call[name[cuda].cuDoubleComplex, parameter[name[beta].real, name[beta].imag]]]], call[name[int], parameter[name[y]]], name[incy]]]
call[name[cublasCheckStatus], parameter[name[status]]] | keyword[def] identifier[cublasZsymv] ( identifier[handle] , identifier[uplo] , identifier[n] , identifier[alpha] , identifier[A] , identifier[lda] , identifier[x] , identifier[incx] , identifier[beta] , identifier[y] , identifier[incy] ):
literal[string]
identifier[status] = identifier[_libcublas] . identifier[cublasZsymv_v2] ( identifier[handle] ,
identifier[_CUBLAS_FILL_MODE] [ identifier[uplo] ], identifier[n] ,
identifier[ctypes] . identifier[byref] ( identifier[cuda] . identifier[cuDoubleComplex] ( identifier[alpha] . identifier[real] ,
identifier[alpha] . identifier[imag] )),
identifier[int] ( identifier[A] ), identifier[lda] , identifier[int] ( identifier[x] ), identifier[incx] ,
identifier[ctypes] . identifier[byref] ( identifier[cuda] . identifier[cuDoubleComplex] ( identifier[beta] . identifier[real] ,
identifier[beta] . identifier[imag] )),
identifier[int] ( identifier[y] ), identifier[incy] )
identifier[cublasCheckStatus] ( identifier[status] ) | def cublasZsymv(handle, uplo, n, alpha, A, lda, x, incx, beta, y, incy):
"""
Matrix-vector product for complex symmetric matrix.
"""
status = _libcublas.cublasZsymv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(cuda.cuDoubleComplex(alpha.real, alpha.imag)), int(A), lda, int(x), incx, ctypes.byref(cuda.cuDoubleComplex(beta.real, beta.imag)), int(y), incy)
cublasCheckStatus(status) |
def search(self, query=None, args=None):
'''query a GitLab artifacts folder for a list of images.
If query is None, collections are listed.
'''
if query is None:
bot.exit('You must include a collection query, <collection>/<repo>')
# or default to listing (searching) all things.
return self._search_all(query) | def function[search, parameter[self, query, args]]:
constant[query a GitLab artifacts folder for a list of images.
If query is None, collections are listed.
]
if compare[name[query] is constant[None]] begin[:]
call[name[bot].exit, parameter[constant[You must include a collection query, <collection>/<repo>]]]
return[call[name[self]._search_all, parameter[name[query]]]] | keyword[def] identifier[search] ( identifier[self] , identifier[query] = keyword[None] , identifier[args] = keyword[None] ):
literal[string]
keyword[if] identifier[query] keyword[is] keyword[None] :
identifier[bot] . identifier[exit] ( literal[string] )
keyword[return] identifier[self] . identifier[_search_all] ( identifier[query] ) | def search(self, query=None, args=None):
"""query a GitLab artifacts folder for a list of images.
If query is None, collections are listed.
"""
if query is None:
bot.exit('You must include a collection query, <collection>/<repo>') # depends on [control=['if'], data=[]]
# or default to listing (searching) all things.
return self._search_all(query) |
def rule(rules, strict_slashes=False, api_func=None, *args, **kwargs):
"""
Add a API route to the 'api' blueprint.
:param rules: rule string or string list
:param strict_slashes: same to Blueprint.route, but default value is False
:param api_func: a function that returns a JSON serializable object
or a Flask Response, or raises ApiException
:param args: other args that should be passed to Blueprint.route
:param kwargs: other kwargs that should be passed to Blueprint.route
:return:
"""
return url_rule(api_blueprint, rules, strict_slashes=strict_slashes,
view_func=json_api(api_func) if api_func else None,
*args, **kwargs) | def function[rule, parameter[rules, strict_slashes, api_func]]:
constant[
Add a API route to the 'api' blueprint.
:param rules: rule string or string list
:param strict_slashes: same to Blueprint.route, but default value is False
:param api_func: a function that returns a JSON serializable object
or a Flask Response, or raises ApiException
:param args: other args that should be passed to Blueprint.route
:param kwargs: other kwargs that should be passed to Blueprint.route
:return:
]
return[call[name[url_rule], parameter[name[api_blueprint], name[rules], <ast.Starred object at 0x7da207f99a50>]]] | keyword[def] identifier[rule] ( identifier[rules] , identifier[strict_slashes] = keyword[False] , identifier[api_func] = keyword[None] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[url_rule] ( identifier[api_blueprint] , identifier[rules] , identifier[strict_slashes] = identifier[strict_slashes] ,
identifier[view_func] = identifier[json_api] ( identifier[api_func] ) keyword[if] identifier[api_func] keyword[else] keyword[None] ,
* identifier[args] ,** identifier[kwargs] ) | def rule(rules, strict_slashes=False, api_func=None, *args, **kwargs):
"""
Add a API route to the 'api' blueprint.
:param rules: rule string or string list
:param strict_slashes: same to Blueprint.route, but default value is False
:param api_func: a function that returns a JSON serializable object
or a Flask Response, or raises ApiException
:param args: other args that should be passed to Blueprint.route
:param kwargs: other kwargs that should be passed to Blueprint.route
:return:
"""
return url_rule(api_blueprint, rules, *args, strict_slashes=strict_slashes, view_func=json_api(api_func) if api_func else None, **kwargs) |
def get_counter(self, transport, bucket, key, r=None, pr=None,
basic_quorum=None, notfound_ok=None):
"""get_counter(bucket, key, r=None, pr=None, basic_quorum=None,\
notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer
"""
return transport.get_counter(bucket, key, r=r, pr=pr) | def function[get_counter, parameter[self, transport, bucket, key, r, pr, basic_quorum, notfound_ok]]:
constant[get_counter(bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer
]
return[call[name[transport].get_counter, parameter[name[bucket], name[key]]]] | keyword[def] identifier[get_counter] ( identifier[self] , identifier[transport] , identifier[bucket] , identifier[key] , identifier[r] = keyword[None] , identifier[pr] = keyword[None] ,
identifier[basic_quorum] = keyword[None] , identifier[notfound_ok] = keyword[None] ):
literal[string]
keyword[return] identifier[transport] . identifier[get_counter] ( identifier[bucket] , identifier[key] , identifier[r] = identifier[r] , identifier[pr] = identifier[pr] ) | def get_counter(self, transport, bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None):
"""get_counter(bucket, key, r=None, pr=None, basic_quorum=None, notfound_ok=None)
Gets the value of a counter.
.. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are
deprecated in favor of the :class:`~riak.datatypes.Counter`
datatype.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param bucket: the bucket of the counter
:type bucket: RiakBucket
:param key: the key of the counter
:type key: string
:param r: the read quorum
:type r: integer, string, None
:param pr: the primary read quorum
:type pr: integer, string, None
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:rtype: integer
"""
return transport.get_counter(bucket, key, r=r, pr=pr) |
def doubleClick(x=None, y=None, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a double click.
This is a wrapper function for click('left', x, y, 2, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
"""
_failSafeCheck()
# Multiple clicks work different in OSX
if sys.platform == 'darwin':
x, y = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
x, y = platformModule._position()
platformModule._multiClick(x, y, button, 2)
else:
click(x, y, 2, interval, button, _pause=False)
_autoPause(pause, _pause) | def function[doubleClick, parameter[x, y, interval, button, duration, tween, pause, _pause]]:
constant[Performs a double click.
This is a wrapper function for click('left', x, y, 2, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
]
call[name[_failSafeCheck], parameter[]]
if compare[name[sys].platform equal[==] constant[darwin]] begin[:]
<ast.Tuple object at 0x7da20c6e6d40> assign[=] call[name[_unpackXY], parameter[name[x], name[y]]]
call[name[_mouseMoveDrag], parameter[constant[move], name[x], name[y], constant[0], constant[0]]]
<ast.Tuple object at 0x7da20c6e6620> assign[=] call[name[platformModule]._position, parameter[]]
call[name[platformModule]._multiClick, parameter[name[x], name[y], name[button], constant[2]]]
call[name[_autoPause], parameter[name[pause], name[_pause]]] | keyword[def] identifier[doubleClick] ( identifier[x] = keyword[None] , identifier[y] = keyword[None] , identifier[interval] = literal[int] , identifier[button] = literal[string] , identifier[duration] = literal[int] , identifier[tween] = identifier[linear] , identifier[pause] = keyword[None] , identifier[_pause] = keyword[True] ):
literal[string]
identifier[_failSafeCheck] ()
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
identifier[x] , identifier[y] = identifier[_unpackXY] ( identifier[x] , identifier[y] )
identifier[_mouseMoveDrag] ( literal[string] , identifier[x] , identifier[y] , literal[int] , literal[int] , identifier[duration] = literal[int] , identifier[tween] = keyword[None] )
identifier[x] , identifier[y] = identifier[platformModule] . identifier[_position] ()
identifier[platformModule] . identifier[_multiClick] ( identifier[x] , identifier[y] , identifier[button] , literal[int] )
keyword[else] :
identifier[click] ( identifier[x] , identifier[y] , literal[int] , identifier[interval] , identifier[button] , identifier[_pause] = keyword[False] )
identifier[_autoPause] ( identifier[pause] , identifier[_pause] ) | def doubleClick(x=None, y=None, interval=0.0, button='left', duration=0.0, tween=linear, pause=None, _pause=True):
"""Performs a double click.
This is a wrapper function for click('left', x, y, 2, interval).
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
If x is a str, it's considered a filename of an image to find on
the screen with locateOnScreen() and click the center of.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
interval (float, optional): The number of seconds in between each click,
if the number of clicks is greater than 1. 0.0 by default, for no
pause in between clicks.
button (str, int, optional): The mouse button clicked. Must be one of
'left', 'middle', 'right' (or 1, 2, or 3) respectively. 'left' by
default.
Returns:
None
Raises:
ValueError: If button is not one of 'left', 'middle', 'right', 1, 2, 3, 4,
5, 6, or 7
"""
_failSafeCheck()
# Multiple clicks work different in OSX
if sys.platform == 'darwin':
(x, y) = _unpackXY(x, y)
_mouseMoveDrag('move', x, y, 0, 0, duration=0, tween=None)
(x, y) = platformModule._position()
platformModule._multiClick(x, y, button, 2) # depends on [control=['if'], data=[]]
else:
click(x, y, 2, interval, button, _pause=False)
_autoPause(pause, _pause) |
def shutdown(self, signum, frame): # pylint: disable=unused-argument
"""Shut it down"""
if not self.exit:
self.exit = True
self.log.debug(f"SIGTRAP!{signum};{frame}")
self.api.shutdown()
self.strat.shutdown() | def function[shutdown, parameter[self, signum, frame]]:
constant[Shut it down]
if <ast.UnaryOp object at 0x7da2054a47c0> begin[:]
name[self].exit assign[=] constant[True]
call[name[self].log.debug, parameter[<ast.JoinedStr object at 0x7da2054a44c0>]]
call[name[self].api.shutdown, parameter[]]
call[name[self].strat.shutdown, parameter[]] | keyword[def] identifier[shutdown] ( identifier[self] , identifier[signum] , identifier[frame] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[exit] :
identifier[self] . identifier[exit] = keyword[True]
identifier[self] . identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[api] . identifier[shutdown] ()
identifier[self] . identifier[strat] . identifier[shutdown] () | def shutdown(self, signum, frame): # pylint: disable=unused-argument
'Shut it down'
if not self.exit:
self.exit = True
self.log.debug(f'SIGTRAP!{signum};{frame}')
self.api.shutdown()
self.strat.shutdown() # depends on [control=['if'], data=[]] |
def add_triple(
self,
subj: Union[URIRef, str],
pred: Union[URIRef, str],
obj: Union[URIRef, Literal, str]
) -> None:
""" Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
"""
if obj in [None, "", " "]: return # Empty objects are bad practice
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add( (_subj, _pred, _obj) ) | def function[add_triple, parameter[self, subj, pred, obj]]:
constant[ Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
]
if compare[name[obj] in list[[<ast.Constant object at 0x7da1b1a7b0a0>, <ast.Constant object at 0x7da1b1a7a320>, <ast.Constant object at 0x7da1b1a78910>]]] begin[:]
return[None]
variable[_subj] assign[=] call[name[self].process_subj_or_pred, parameter[name[subj]]]
variable[_pred] assign[=] call[name[self].process_subj_or_pred, parameter[name[pred]]]
variable[_obj] assign[=] call[name[self].process_obj, parameter[name[obj]]]
call[name[self].g.add, parameter[tuple[[<ast.Name object at 0x7da1b1a7bb80>, <ast.Name object at 0x7da1b1a79ff0>, <ast.Name object at 0x7da1b1a7bc10>]]]] | keyword[def] identifier[add_triple] (
identifier[self] ,
identifier[subj] : identifier[Union] [ identifier[URIRef] , identifier[str] ],
identifier[pred] : identifier[Union] [ identifier[URIRef] , identifier[str] ],
identifier[obj] : identifier[Union] [ identifier[URIRef] , identifier[Literal] , identifier[str] ]
)-> keyword[None] :
literal[string]
keyword[if] identifier[obj] keyword[in] [ keyword[None] , literal[string] , literal[string] ]: keyword[return]
identifier[_subj] = identifier[self] . identifier[process_subj_or_pred] ( identifier[subj] )
identifier[_pred] = identifier[self] . identifier[process_subj_or_pred] ( identifier[pred] )
identifier[_obj] = identifier[self] . identifier[process_obj] ( identifier[obj] )
identifier[self] . identifier[g] . identifier[add] (( identifier[_subj] , identifier[_pred] , identifier[_obj] )) | def add_triple(self, subj: Union[URIRef, str], pred: Union[URIRef, str], obj: Union[URIRef, Literal, str]) -> None:
""" Adds triple to rdflib Graph
Triple can be of any subject, predicate, and object of the entity without a need for order.
Args:
subj: Entity subject
pred: Entity predicate
obj: Entity object
Example:
In [1]: add_triple(
...: 'http://uri.interlex.org/base/ilx_0101431',
...: RDF.type,
...: 'http://www.w3.org/2002/07/owl#Class')
...: )
"""
if obj in [None, '', ' ']:
return # Empty objects are bad practice # depends on [control=['if'], data=[]]
_subj = self.process_subj_or_pred(subj)
_pred = self.process_subj_or_pred(pred)
_obj = self.process_obj(obj)
self.g.add((_subj, _pred, _obj)) |
def emit_measured(self):
"""
The beam emittance :math:`\\langle x x' \\rangle`.
"""
return _np.sqrt(self.spotsq*self.divsq-self.xxp**2) | def function[emit_measured, parameter[self]]:
constant[
The beam emittance :math:`\langle x x' \rangle`.
]
return[call[name[_np].sqrt, parameter[binary_operation[binary_operation[name[self].spotsq * name[self].divsq] - binary_operation[name[self].xxp ** constant[2]]]]]] | keyword[def] identifier[emit_measured] ( identifier[self] ):
literal[string]
keyword[return] identifier[_np] . identifier[sqrt] ( identifier[self] . identifier[spotsq] * identifier[self] . identifier[divsq] - identifier[self] . identifier[xxp] ** literal[int] ) | def emit_measured(self):
"""
The beam emittance :math:`\\langle x x' \\rangle`.
"""
return _np.sqrt(self.spotsq * self.divsq - self.xxp ** 2) |
def _get_document_data(f, image_handler=None):
'''
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
'''
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id)
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
path, _ = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser)
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser)
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser)
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser)
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser)
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(
item.filename,
path,
)
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(
relationship_xml,
media,
image_sizes
)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict)
meta_data = MetaData(
numbering_dict=numbering_dict,
relationship_dict=relationship_dict,
styles_dict=styles_dict,
font_sizes_dict=font_sizes_dict,
image_handler=image_handler,
image_sizes=image_sizes,
)
return document_xml, meta_data | def function[_get_document_data, parameter[f, image_handler]]:
constant[
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
]
if compare[name[image_handler] is constant[None]] begin[:]
def function[image_handler, parameter[image_id, relationship_dict]]:
return[call[name[relationship_dict].get, parameter[name[image_id]]]]
variable[document_xml] assign[=] constant[None]
variable[numbering_xml] assign[=] constant[None]
variable[relationship_xml] assign[=] constant[None]
variable[styles_xml] assign[=] constant[None]
variable[parser] assign[=] call[name[etree].XMLParser, parameter[]]
<ast.Tuple object at 0x7da1b025efe0> assign[=] call[name[os].path.split, parameter[name[f].filename]]
variable[media] assign[=] dictionary[[], []]
variable[image_sizes] assign[=] dictionary[[], []]
for taget[name[item]] in starred[call[name[f].infolist, parameter[]]] begin[:]
if compare[name[item].filename equal[==] constant[word/document.xml]] begin[:]
variable[xml] assign[=] call[name[f].read, parameter[name[item].filename]]
variable[document_xml] assign[=] call[name[etree].fromstring, parameter[name[xml], name[parser]]]
if call[name[item].filename.startswith, parameter[constant[word/media/]]] begin[:]
call[name[media]][call[name[item].filename][<ast.Slice object at 0x7da20c6a9c90>]] assign[=] call[name[f].extract, parameter[name[item].filename, name[path]]]
call[name[f].close, parameter[]]
variable[numbering_dict] assign[=] call[name[get_numbering_info], parameter[name[numbering_xml]]]
variable[image_sizes] assign[=] call[name[get_image_sizes], parameter[name[document_xml]]]
variable[relationship_dict] assign[=] call[name[get_relationship_info], parameter[name[relationship_xml], name[media], name[image_sizes]]]
variable[styles_dict] assign[=] call[name[get_style_dict], parameter[name[styles_xml]]]
variable[font_sizes_dict] assign[=] call[name[defaultdict], parameter[name[int]]]
if name[DETECT_FONT_SIZE] begin[:]
variable[font_sizes_dict] assign[=] call[name[get_font_sizes_dict], parameter[name[document_xml], name[styles_dict]]]
variable[meta_data] assign[=] call[name[MetaData], parameter[]]
return[tuple[[<ast.Name object at 0x7da1b02851e0>, <ast.Name object at 0x7da1b0286740>]]] | keyword[def] identifier[_get_document_data] ( identifier[f] , identifier[image_handler] = keyword[None] ):
literal[string]
keyword[if] identifier[image_handler] keyword[is] keyword[None] :
keyword[def] identifier[image_handler] ( identifier[image_id] , identifier[relationship_dict] ):
keyword[return] identifier[relationship_dict] . identifier[get] ( identifier[image_id] )
identifier[document_xml] = keyword[None]
identifier[numbering_xml] = keyword[None]
identifier[relationship_xml] = keyword[None]
identifier[styles_xml] = keyword[None]
identifier[parser] = identifier[etree] . identifier[XMLParser] ( identifier[strip_cdata] = keyword[False] )
identifier[path] , identifier[_] = identifier[os] . identifier[path] . identifier[split] ( identifier[f] . identifier[filename] )
identifier[media] ={}
identifier[image_sizes] ={}
keyword[for] identifier[item] keyword[in] identifier[f] . identifier[infolist] ():
keyword[if] identifier[item] . identifier[filename] == literal[string] :
identifier[xml] = identifier[f] . identifier[read] ( identifier[item] . identifier[filename] )
identifier[document_xml] = identifier[etree] . identifier[fromstring] ( identifier[xml] , identifier[parser] )
keyword[elif] identifier[item] . identifier[filename] == literal[string] :
identifier[xml] = identifier[f] . identifier[read] ( identifier[item] . identifier[filename] )
identifier[numbering_xml] = identifier[etree] . identifier[fromstring] ( identifier[xml] , identifier[parser] )
keyword[elif] identifier[item] . identifier[filename] == literal[string] :
identifier[xml] = identifier[f] . identifier[read] ( identifier[item] . identifier[filename] )
identifier[styles_xml] = identifier[etree] . identifier[fromstring] ( identifier[xml] , identifier[parser] )
keyword[elif] identifier[item] . identifier[filename] == literal[string] :
identifier[xml] = identifier[f] . identifier[read] ( identifier[item] . identifier[filename] )
keyword[try] :
identifier[relationship_xml] = identifier[etree] . identifier[fromstring] ( identifier[xml] , identifier[parser] )
keyword[except] identifier[XMLSyntaxError] :
identifier[relationship_xml] = identifier[etree] . identifier[fromstring] ( literal[string] , identifier[parser] )
keyword[if] identifier[item] . identifier[filename] . identifier[startswith] ( literal[string] ):
identifier[media] [ identifier[item] . identifier[filename] [ identifier[len] ( literal[string] ):]]= identifier[f] . identifier[extract] (
identifier[item] . identifier[filename] ,
identifier[path] ,
)
identifier[f] . identifier[close] ()
identifier[numbering_dict] = identifier[get_numbering_info] ( identifier[numbering_xml] )
identifier[image_sizes] = identifier[get_image_sizes] ( identifier[document_xml] )
identifier[relationship_dict] = identifier[get_relationship_info] (
identifier[relationship_xml] ,
identifier[media] ,
identifier[image_sizes]
)
identifier[styles_dict] = identifier[get_style_dict] ( identifier[styles_xml] )
identifier[font_sizes_dict] = identifier[defaultdict] ( identifier[int] )
keyword[if] identifier[DETECT_FONT_SIZE] :
identifier[font_sizes_dict] = identifier[get_font_sizes_dict] ( identifier[document_xml] , identifier[styles_dict] )
identifier[meta_data] = identifier[MetaData] (
identifier[numbering_dict] = identifier[numbering_dict] ,
identifier[relationship_dict] = identifier[relationship_dict] ,
identifier[styles_dict] = identifier[styles_dict] ,
identifier[font_sizes_dict] = identifier[font_sizes_dict] ,
identifier[image_handler] = identifier[image_handler] ,
identifier[image_sizes] = identifier[image_sizes] ,
)
keyword[return] identifier[document_xml] , identifier[meta_data] | def _get_document_data(f, image_handler=None):
"""
``f`` is a ``ZipFile`` that is open
Extract out the document data, numbering data and the relationship data.
"""
if image_handler is None:
def image_handler(image_id, relationship_dict):
return relationship_dict.get(image_id) # depends on [control=['if'], data=[]]
document_xml = None
numbering_xml = None
relationship_xml = None
styles_xml = None
parser = etree.XMLParser(strip_cdata=False)
(path, _) = os.path.split(f.filename)
media = {}
image_sizes = {}
# Loop through the files in the zip file.
for item in f.infolist():
# This file holds all the content of the document.
if item.filename == 'word/document.xml':
xml = f.read(item.filename)
document_xml = etree.fromstring(xml, parser) # depends on [control=['if'], data=[]]
# This file tells document.xml how lists should look.
elif item.filename == 'word/numbering.xml':
xml = f.read(item.filename)
numbering_xml = etree.fromstring(xml, parser) # depends on [control=['if'], data=[]]
elif item.filename == 'word/styles.xml':
xml = f.read(item.filename)
styles_xml = etree.fromstring(xml, parser) # depends on [control=['if'], data=[]]
# This file holds the targets for hyperlinks and images.
elif item.filename == 'word/_rels/document.xml.rels':
xml = f.read(item.filename)
try:
relationship_xml = etree.fromstring(xml, parser) # depends on [control=['try'], data=[]]
except XMLSyntaxError:
relationship_xml = etree.fromstring('<xml></xml>', parser) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if item.filename.startswith('word/media/'):
# Strip off the leading word/
media[item.filename[len('word/'):]] = f.extract(item.filename, path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
# Close the file pointer.
f.close()
# Get dictionaries for the numbering and the relationships.
numbering_dict = get_numbering_info(numbering_xml)
image_sizes = get_image_sizes(document_xml)
relationship_dict = get_relationship_info(relationship_xml, media, image_sizes)
styles_dict = get_style_dict(styles_xml)
font_sizes_dict = defaultdict(int)
if DETECT_FONT_SIZE:
font_sizes_dict = get_font_sizes_dict(document_xml, styles_dict) # depends on [control=['if'], data=[]]
meta_data = MetaData(numbering_dict=numbering_dict, relationship_dict=relationship_dict, styles_dict=styles_dict, font_sizes_dict=font_sizes_dict, image_handler=image_handler, image_sizes=image_sizes)
return (document_xml, meta_data) |
def transform(self, words, aggregate_method):
"""
Transform words (or sequences of words) to vectors using a word2vec model.
:param str words: An H2OFrame made of a single column containing source words.
:param str aggregate_method: Specifies how to aggregate sequences of words. If method is `NONE`
then no aggregation is performed and each input word is mapped to a single word-vector.
If method is 'AVERAGE' then input is treated as sequences of words delimited by NA.
Each word of a sequences is internally mapped to a vector and vectors belonging to
the same sentence are averaged and returned in the result.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api("GET /3/Word2VecTransform", data={'model': self.model_id, 'words_frame': words.frame_id, 'aggregate_method': aggregate_method})
return h2o.get_frame(j["vectors_frame"]["name"]) | def function[transform, parameter[self, words, aggregate_method]]:
constant[
Transform words (or sequences of words) to vectors using a word2vec model.
:param str words: An H2OFrame made of a single column containing source words.
:param str aggregate_method: Specifies how to aggregate sequences of words. If method is `NONE`
then no aggregation is performed and each input word is mapped to a single word-vector.
If method is 'AVERAGE' then input is treated as sequences of words delimited by NA.
Each word of a sequences is internally mapped to a vector and vectors belonging to
the same sentence are averaged and returned in the result.
:returns: the approximate reconstruction of the training data.
]
variable[j] assign[=] call[name[h2o].api, parameter[constant[GET /3/Word2VecTransform]]]
return[call[name[h2o].get_frame, parameter[call[call[name[j]][constant[vectors_frame]]][constant[name]]]]] | keyword[def] identifier[transform] ( identifier[self] , identifier[words] , identifier[aggregate_method] ):
literal[string]
identifier[j] = identifier[h2o] . identifier[api] ( literal[string] , identifier[data] ={ literal[string] : identifier[self] . identifier[model_id] , literal[string] : identifier[words] . identifier[frame_id] , literal[string] : identifier[aggregate_method] })
keyword[return] identifier[h2o] . identifier[get_frame] ( identifier[j] [ literal[string] ][ literal[string] ]) | def transform(self, words, aggregate_method):
"""
Transform words (or sequences of words) to vectors using a word2vec model.
:param str words: An H2OFrame made of a single column containing source words.
:param str aggregate_method: Specifies how to aggregate sequences of words. If method is `NONE`
then no aggregation is performed and each input word is mapped to a single word-vector.
If method is 'AVERAGE' then input is treated as sequences of words delimited by NA.
Each word of a sequences is internally mapped to a vector and vectors belonging to
the same sentence are averaged and returned in the result.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api('GET /3/Word2VecTransform', data={'model': self.model_id, 'words_frame': words.frame_id, 'aggregate_method': aggregate_method})
return h2o.get_frame(j['vectors_frame']['name']) |
def resolve_elements(self):
"""Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
"""
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
if not self.content:
return set()
return set(self.content)
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# By default we are using a OR rule
if not self.operand:
self.operand = '|'
res = set()
# The operand will change the positiv loop only
i = 0
for node in positiv_nodes:
node_members = node.resolve_elements()
if self.operand == '|':
res = res.union(node_members)
elif self.operand == '&':
# The first elements of an AND rule should be used
if i == 0:
res = node_members
else:
res = res.intersection(node_members)
i += 1
# And we finally remove all NOT elements from the result
for node in not_nodes:
node_members = node.resolve_elements()
res = res.difference(node_members)
return res | def function[resolve_elements, parameter[self]]:
constant[Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
]
if name[self].leaf begin[:]
if <ast.UnaryOp object at 0x7da20c7cb3a0> begin[:]
return[call[name[set], parameter[]]]
return[call[name[set], parameter[name[self].content]]]
variable[not_nodes] assign[=] <ast.ListComp object at 0x7da20c7c95a0>
variable[positiv_nodes] assign[=] <ast.ListComp object at 0x7da20c7ca830>
if <ast.UnaryOp object at 0x7da20c7c85b0> begin[:]
name[self].operand assign[=] constant[|]
variable[res] assign[=] call[name[set], parameter[]]
variable[i] assign[=] constant[0]
for taget[name[node]] in starred[name[positiv_nodes]] begin[:]
variable[node_members] assign[=] call[name[node].resolve_elements, parameter[]]
if compare[name[self].operand equal[==] constant[|]] begin[:]
variable[res] assign[=] call[name[res].union, parameter[name[node_members]]]
<ast.AugAssign object at 0x7da20c7cbc70>
for taget[name[node]] in starred[name[not_nodes]] begin[:]
variable[node_members] assign[=] call[name[node].resolve_elements, parameter[]]
variable[res] assign[=] call[name[res].difference, parameter[name[node_members]]]
return[name[res]] | keyword[def] identifier[resolve_elements] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[leaf] :
keyword[if] keyword[not] identifier[self] . identifier[content] :
keyword[return] identifier[set] ()
keyword[return] identifier[set] ( identifier[self] . identifier[content] )
identifier[not_nodes] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[self] . identifier[sons] keyword[if] identifier[s] . identifier[not_value] ]
identifier[positiv_nodes] =[ identifier[s] keyword[for] identifier[s] keyword[in] identifier[self] . identifier[sons] keyword[if] keyword[not] identifier[s] . identifier[not_value] ]
keyword[if] keyword[not] identifier[self] . identifier[operand] :
identifier[self] . identifier[operand] = literal[string]
identifier[res] = identifier[set] ()
identifier[i] = literal[int]
keyword[for] identifier[node] keyword[in] identifier[positiv_nodes] :
identifier[node_members] = identifier[node] . identifier[resolve_elements] ()
keyword[if] identifier[self] . identifier[operand] == literal[string] :
identifier[res] = identifier[res] . identifier[union] ( identifier[node_members] )
keyword[elif] identifier[self] . identifier[operand] == literal[string] :
keyword[if] identifier[i] == literal[int] :
identifier[res] = identifier[node_members]
keyword[else] :
identifier[res] = identifier[res] . identifier[intersection] ( identifier[node_members] )
identifier[i] += literal[int]
keyword[for] identifier[node] keyword[in] identifier[not_nodes] :
identifier[node_members] = identifier[node] . identifier[resolve_elements] ()
identifier[res] = identifier[res] . identifier[difference] ( identifier[node_members] )
keyword[return] identifier[res] | def resolve_elements(self):
"""Get element of this node recursively
Compute rules with OR or AND rule then NOT rules.
:return: set of element
:rtype: set
"""
# If it's a leaf, we just need to dump a set with the content of the node
if self.leaf:
if not self.content:
return set() # depends on [control=['if'], data=[]]
return set(self.content) # depends on [control=['if'], data=[]]
# first got the not ones in a list, and the other in the other list
not_nodes = [s for s in self.sons if s.not_value]
positiv_nodes = [s for s in self.sons if not s.not_value] # ok a not not is hard to read..
# By default we are using a OR rule
if not self.operand:
self.operand = '|' # depends on [control=['if'], data=[]]
res = set()
# The operand will change the positiv loop only
i = 0
for node in positiv_nodes:
node_members = node.resolve_elements()
if self.operand == '|':
res = res.union(node_members) # depends on [control=['if'], data=[]]
elif self.operand == '&':
# The first elements of an AND rule should be used
if i == 0:
res = node_members # depends on [control=['if'], data=[]]
else:
res = res.intersection(node_members) # depends on [control=['if'], data=[]]
i += 1 # depends on [control=['for'], data=['node']]
# And we finally remove all NOT elements from the result
for node in not_nodes:
node_members = node.resolve_elements()
res = res.difference(node_members) # depends on [control=['for'], data=['node']]
return res |
def get_urls(self):
"""
Add a calendar URL.
"""
from django.conf.urls import patterns, url
urls = super(EventAdmin, self).get_urls()
my_urls = patterns(
'',
url(
r'^calendar/$',
self.admin_site.admin_view(self.calendar),
name='icekit_events_eventbase_calendar'
),
url(
r'^calendar_data/$',
self.admin_site.admin_view(self.calendar_data),
name='icekit_events_eventbase_calendar_data'
),
)
return my_urls + urls | def function[get_urls, parameter[self]]:
constant[
Add a calendar URL.
]
from relative_module[django.conf.urls] import module[patterns], module[url]
variable[urls] assign[=] call[call[name[super], parameter[name[EventAdmin], name[self]]].get_urls, parameter[]]
variable[my_urls] assign[=] call[name[patterns], parameter[constant[], call[name[url], parameter[constant[^calendar/$], call[name[self].admin_site.admin_view, parameter[name[self].calendar]]]], call[name[url], parameter[constant[^calendar_data/$], call[name[self].admin_site.admin_view, parameter[name[self].calendar_data]]]]]]
return[binary_operation[name[my_urls] + name[urls]]] | keyword[def] identifier[get_urls] ( identifier[self] ):
literal[string]
keyword[from] identifier[django] . identifier[conf] . identifier[urls] keyword[import] identifier[patterns] , identifier[url]
identifier[urls] = identifier[super] ( identifier[EventAdmin] , identifier[self] ). identifier[get_urls] ()
identifier[my_urls] = identifier[patterns] (
literal[string] ,
identifier[url] (
literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[calendar] ),
identifier[name] = literal[string]
),
identifier[url] (
literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[calendar_data] ),
identifier[name] = literal[string]
),
)
keyword[return] identifier[my_urls] + identifier[urls] | def get_urls(self):
"""
Add a calendar URL.
"""
from django.conf.urls import patterns, url
urls = super(EventAdmin, self).get_urls()
my_urls = patterns('', url('^calendar/$', self.admin_site.admin_view(self.calendar), name='icekit_events_eventbase_calendar'), url('^calendar_data/$', self.admin_site.admin_view(self.calendar_data), name='icekit_events_eventbase_calendar_data'))
return my_urls + urls |
def time(self, intervals=1, *args, _show_progress=True, _print=True,
_collect_garbage=True, _quiet=True, **kwargs):
""" Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing
"""
self.reset()
args = list(args) + list(self._callableargs[0])
_kwargs = self._callableargs[1]
_kwargs.update(kwargs)
kwargs = _kwargs
if not _collect_garbage:
gc.disable() # Garbage collection setting
gc.collect()
self.allocated_memory = 0
for x in self.progress(intervals):
if _quiet: # Quiets print()s in the tested function
sys.stdout = NullIO()
try:
self.start() # Starts the timer
self._callable(*args, **kwargs)
self.stop() # Stops the timer
except Exception as e:
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__
raise e
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__
if not _collect_garbage:
gc.enable() # Garbage collection setting
if _print:
self.info() | def function[time, parameter[self, intervals]]:
constant[ Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing
]
call[name[self].reset, parameter[]]
variable[args] assign[=] binary_operation[call[name[list], parameter[name[args]]] + call[name[list], parameter[call[name[self]._callableargs][constant[0]]]]]
variable[_kwargs] assign[=] call[name[self]._callableargs][constant[1]]
call[name[_kwargs].update, parameter[name[kwargs]]]
variable[kwargs] assign[=] name[_kwargs]
if <ast.UnaryOp object at 0x7da20c76f9d0> begin[:]
call[name[gc].disable, parameter[]]
call[name[gc].collect, parameter[]]
name[self].allocated_memory assign[=] constant[0]
for taget[name[x]] in starred[call[name[self].progress, parameter[name[intervals]]]] begin[:]
if name[_quiet] begin[:]
name[sys].stdout assign[=] call[name[NullIO], parameter[]]
<ast.Try object at 0x7da20c76e080>
if name[_quiet] begin[:]
name[sys].stdout assign[=] name[sys].__stdout__
if <ast.UnaryOp object at 0x7da1b10539a0> begin[:]
call[name[gc].enable, parameter[]]
if name[_print] begin[:]
call[name[self].info, parameter[]] | keyword[def] identifier[time] ( identifier[self] , identifier[intervals] = literal[int] ,* identifier[args] , identifier[_show_progress] = keyword[True] , identifier[_print] = keyword[True] ,
identifier[_collect_garbage] = keyword[True] , identifier[_quiet] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[reset] ()
identifier[args] = identifier[list] ( identifier[args] )+ identifier[list] ( identifier[self] . identifier[_callableargs] [ literal[int] ])
identifier[_kwargs] = identifier[self] . identifier[_callableargs] [ literal[int] ]
identifier[_kwargs] . identifier[update] ( identifier[kwargs] )
identifier[kwargs] = identifier[_kwargs]
keyword[if] keyword[not] identifier[_collect_garbage] :
identifier[gc] . identifier[disable] ()
identifier[gc] . identifier[collect] ()
identifier[self] . identifier[allocated_memory] = literal[int]
keyword[for] identifier[x] keyword[in] identifier[self] . identifier[progress] ( identifier[intervals] ):
keyword[if] identifier[_quiet] :
identifier[sys] . identifier[stdout] = identifier[NullIO] ()
keyword[try] :
identifier[self] . identifier[start] ()
identifier[self] . identifier[_callable] (* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[stop] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[_quiet] :
identifier[sys] . identifier[stdout] = identifier[sys] . identifier[__stdout__]
keyword[raise] identifier[e]
keyword[if] identifier[_quiet] :
identifier[sys] . identifier[stdout] = identifier[sys] . identifier[__stdout__]
keyword[if] keyword[not] identifier[_collect_garbage] :
identifier[gc] . identifier[enable] ()
keyword[if] identifier[_print] :
identifier[self] . identifier[info] () | def time(self, intervals=1, *args, _show_progress=True, _print=True, _collect_garbage=True, _quiet=True, **kwargs):
""" Measures the execution time of :prop:_callable for @intervals
@intervals: #int number of intervals to measure the execution time
of the function for
@*args: arguments to pass to the callable being timed
@**kwargs: arguments to pass to the callable being timed
@_show_progress: #bool whether or not to print a progress bar
@_print: #bool whether or not to print the results of the timing
@_collect_garbage: #bool whether or not to garbage collect
while timing
@_quiet: #bool whether or not to disable the print() function's
ability to output to terminal during the timing
-> :class:collections.OrderedDict of stats about the timing
"""
self.reset()
args = list(args) + list(self._callableargs[0])
_kwargs = self._callableargs[1]
_kwargs.update(kwargs)
kwargs = _kwargs
if not _collect_garbage:
gc.disable() # Garbage collection setting # depends on [control=['if'], data=[]]
gc.collect()
self.allocated_memory = 0
for x in self.progress(intervals):
if _quiet: # Quiets print()s in the tested function
sys.stdout = NullIO() # depends on [control=['if'], data=[]]
try:
self.start() # Starts the timer
self._callable(*args, **kwargs)
self.stop() # Stops the timer # depends on [control=['try'], data=[]]
except Exception as e:
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__ # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']]
if _quiet: # Unquiets prints()
sys.stdout = sys.__stdout__ # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not _collect_garbage:
gc.enable() # Garbage collection setting # depends on [control=['if'], data=[]]
if _print:
self.info() # depends on [control=['if'], data=[]] |
def render_registered(url_id, remote_info):
"""
Render template file for the registered user, which has some of the values
prefilled.
Args:
url_id (str): Seeder URL id.
remote_info (dict): Informations read from Seeder.
Returns:
str: Template filled with data.
"""
return template(
read_index_template(),
registered=True,
url=remote_info["url"],
seeder_data=json.dumps(remote_info),
url_id=url_id,
) | def function[render_registered, parameter[url_id, remote_info]]:
constant[
Render template file for the registered user, which has some of the values
prefilled.
Args:
url_id (str): Seeder URL id.
remote_info (dict): Informations read from Seeder.
Returns:
str: Template filled with data.
]
return[call[name[template], parameter[call[name[read_index_template], parameter[]]]]] | keyword[def] identifier[render_registered] ( identifier[url_id] , identifier[remote_info] ):
literal[string]
keyword[return] identifier[template] (
identifier[read_index_template] (),
identifier[registered] = keyword[True] ,
identifier[url] = identifier[remote_info] [ literal[string] ],
identifier[seeder_data] = identifier[json] . identifier[dumps] ( identifier[remote_info] ),
identifier[url_id] = identifier[url_id] ,
) | def render_registered(url_id, remote_info):
"""
Render template file for the registered user, which has some of the values
prefilled.
Args:
url_id (str): Seeder URL id.
remote_info (dict): Informations read from Seeder.
Returns:
str: Template filled with data.
"""
return template(read_index_template(), registered=True, url=remote_info['url'], seeder_data=json.dumps(remote_info), url_id=url_id) |
def _record(self, ext_loc, parent_dir_num):
# type: (int, int) -> bytes
'''
An internal method to generate a string representing this Path Table Record.
Parameters:
ext_loc - The extent location to place in this Path Table Record.
parent_dir_num - The parent directory number to place in this Path Table
Record.
Returns:
A string representing this Path Table Record.
'''
return struct.pack(self.FMT, self.len_di, self.xattr_length,
ext_loc, parent_dir_num) + self.directory_identifier + b'\x00' * (self.len_di % 2) | def function[_record, parameter[self, ext_loc, parent_dir_num]]:
constant[
An internal method to generate a string representing this Path Table Record.
Parameters:
ext_loc - The extent location to place in this Path Table Record.
parent_dir_num - The parent directory number to place in this Path Table
Record.
Returns:
A string representing this Path Table Record.
]
return[binary_operation[binary_operation[call[name[struct].pack, parameter[name[self].FMT, name[self].len_di, name[self].xattr_length, name[ext_loc], name[parent_dir_num]]] + name[self].directory_identifier] + binary_operation[constant[b'\x00'] * binary_operation[name[self].len_di <ast.Mod object at 0x7da2590d6920> constant[2]]]]] | keyword[def] identifier[_record] ( identifier[self] , identifier[ext_loc] , identifier[parent_dir_num] ):
literal[string]
keyword[return] identifier[struct] . identifier[pack] ( identifier[self] . identifier[FMT] , identifier[self] . identifier[len_di] , identifier[self] . identifier[xattr_length] ,
identifier[ext_loc] , identifier[parent_dir_num] )+ identifier[self] . identifier[directory_identifier] + literal[string] *( identifier[self] . identifier[len_di] % literal[int] ) | def _record(self, ext_loc, parent_dir_num):
# type: (int, int) -> bytes
'\n An internal method to generate a string representing this Path Table Record.\n\n Parameters:\n ext_loc - The extent location to place in this Path Table Record.\n parent_dir_num - The parent directory number to place in this Path Table\n Record.\n Returns:\n A string representing this Path Table Record.\n '
return struct.pack(self.FMT, self.len_di, self.xattr_length, ext_loc, parent_dir_num) + self.directory_identifier + b'\x00' * (self.len_di % 2) |
def choose_init(module):
"""
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
"""
# Upstart checks first because when installing ceph, the
# `/lib/systemd/system/ceph.target` file may be created, fooling this
# detection mechanism.
if is_upstart(module.conn):
return 'upstart'
if is_systemd(module.conn) or module.conn.remote_module.path_exists(
"/lib/systemd/system/ceph.target"):
return 'systemd'
return 'sysvinit' | def function[choose_init, parameter[module]]:
constant[
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
]
if call[name[is_upstart], parameter[name[module].conn]] begin[:]
return[constant[upstart]]
if <ast.BoolOp object at 0x7da1b16a3d90> begin[:]
return[constant[systemd]]
return[constant[sysvinit]] | keyword[def] identifier[choose_init] ( identifier[module] ):
literal[string]
keyword[if] identifier[is_upstart] ( identifier[module] . identifier[conn] ):
keyword[return] literal[string]
keyword[if] identifier[is_systemd] ( identifier[module] . identifier[conn] ) keyword[or] identifier[module] . identifier[conn] . identifier[remote_module] . identifier[path_exists] (
literal[string] ):
keyword[return] literal[string]
keyword[return] literal[string] | def choose_init(module):
"""
Select a init system
Returns the name of a init system (upstart, sysvinit ...).
"""
# Upstart checks first because when installing ceph, the
# `/lib/systemd/system/ceph.target` file may be created, fooling this
# detection mechanism.
if is_upstart(module.conn):
return 'upstart' # depends on [control=['if'], data=[]]
if is_systemd(module.conn) or module.conn.remote_module.path_exists('/lib/systemd/system/ceph.target'):
return 'systemd' # depends on [control=['if'], data=[]]
return 'sysvinit' |
def fit(self, range, function=None):
"""Fits a function to the active display's data trace within a
specified range of the time window.
E.g.::
# Fit's a gaussian to the first 30% of the time window.
lockin.fit(range=(0, 30), function='gauss')
:param start: The left limit of the time window in percent.
:param stop: The right limit of the time window in percent.
:param function: The function used to fit the data, either 'line',
'exp', 'gauss' or None, the default. The configured fit function is
left unchanged if function is None.
.. note::
Fitting takes some time. Check the status byte to see when the
operation is done. A running scan will be paused until the
fitting is complete.
.. warning::
The SR850 will generate an error if the active display trace is not
stored when the fit command is executed.
"""
if function is not None:
self.fit_function = function
cmd = 'FITT', Integer(min=0, max=100), Integer(min=0, max=100)
self._write(cmd, start, stop) | def function[fit, parameter[self, range, function]]:
constant[Fits a function to the active display's data trace within a
specified range of the time window.
E.g.::
# Fit's a gaussian to the first 30% of the time window.
lockin.fit(range=(0, 30), function='gauss')
:param start: The left limit of the time window in percent.
:param stop: The right limit of the time window in percent.
:param function: The function used to fit the data, either 'line',
'exp', 'gauss' or None, the default. The configured fit function is
left unchanged if function is None.
.. note::
Fitting takes some time. Check the status byte to see when the
operation is done. A running scan will be paused until the
fitting is complete.
.. warning::
The SR850 will generate an error if the active display trace is not
stored when the fit command is executed.
]
if compare[name[function] is_not constant[None]] begin[:]
name[self].fit_function assign[=] name[function]
variable[cmd] assign[=] tuple[[<ast.Constant object at 0x7da1b0a6cc70>, <ast.Call object at 0x7da1b0a6c880>, <ast.Call object at 0x7da1b0a6c250>]]
call[name[self]._write, parameter[name[cmd], name[start], name[stop]]] | keyword[def] identifier[fit] ( identifier[self] , identifier[range] , identifier[function] = keyword[None] ):
literal[string]
keyword[if] identifier[function] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[fit_function] = identifier[function]
identifier[cmd] = literal[string] , identifier[Integer] ( identifier[min] = literal[int] , identifier[max] = literal[int] ), identifier[Integer] ( identifier[min] = literal[int] , identifier[max] = literal[int] )
identifier[self] . identifier[_write] ( identifier[cmd] , identifier[start] , identifier[stop] ) | def fit(self, range, function=None):
"""Fits a function to the active display's data trace within a
specified range of the time window.
E.g.::
# Fit's a gaussian to the first 30% of the time window.
lockin.fit(range=(0, 30), function='gauss')
:param start: The left limit of the time window in percent.
:param stop: The right limit of the time window in percent.
:param function: The function used to fit the data, either 'line',
'exp', 'gauss' or None, the default. The configured fit function is
left unchanged if function is None.
.. note::
Fitting takes some time. Check the status byte to see when the
operation is done. A running scan will be paused until the
fitting is complete.
.. warning::
The SR850 will generate an error if the active display trace is not
stored when the fit command is executed.
"""
if function is not None:
self.fit_function = function # depends on [control=['if'], data=['function']]
cmd = ('FITT', Integer(min=0, max=100), Integer(min=0, max=100))
self._write(cmd, start, stop) |
def references(self, criteria, publications='publications', column_name='publication_shortname', fetch=False):
"""
Do a reverse lookup on the **publications** table. Will return every entry that matches that reference.
Parameters
----------
criteria: int or str
The id from the PUBLICATIONS table whose data across all tables is to be printed.
publications: str
Name of the publications table
column_name: str
Name of the reference column in other tables
fetch: bool
Return the results.
Returns
-------
data_tables: dict
Returns a dictionary of astropy tables with the table name as the keys.
"""
data_tables = dict()
# If an ID is provided but the column name is publication shortname, grab the shortname
if isinstance(criteria, type(1)) and column_name == 'publication_shortname':
t = self.query("SELECT * FROM {} WHERE id={}".format(publications, criteria), fmt='table')
if len(t) > 0:
criteria = t['shortname'][0]
else:
print('No match found for {}'.format(criteria))
return
t = self.query("SELECT * FROM sqlite_master WHERE type='table'", fmt='table')
all_tables = t['name'].tolist()
for table in ['sources'] + [t for t in all_tables if
t not in ['publications', 'sqlite_sequence', 'sources']]:
# Get the columns, pull out redundant ones, and query the table for this source's data
t = self.query("PRAGMA table_info({})".format(table), fmt='table')
columns = np.array(t['name'])
types = np.array(t['type'])
# Only get simple data types and exclude redundant ones for nicer printing
columns = columns[
((types == 'REAL') | (types == 'INTEGER') | (types == 'TEXT')) & (columns != column_name)]
# Query the table
try:
data = self.query("SELECT {} FROM {} WHERE {}='{}'".format(','.join(columns), table,
column_name, criteria), fmt='table')
except:
data = None
# If there's data for this table, save it
if data:
if fetch:
data_tables[table] = self.query(
"SELECT {} FROM {} WHERE {}='{}'".format(
','.join(columns), table, column_name, criteria), fmt='table', fetch=True)
else:
data = data[[c.lower() for c in columns]] # force lowercase since astropy.Tables have all lowercase
pprint(data, title=table.upper())
if fetch: return data_tables | def function[references, parameter[self, criteria, publications, column_name, fetch]]:
constant[
Do a reverse lookup on the **publications** table. Will return every entry that matches that reference.
Parameters
----------
criteria: int or str
The id from the PUBLICATIONS table whose data across all tables is to be printed.
publications: str
Name of the publications table
column_name: str
Name of the reference column in other tables
fetch: bool
Return the results.
Returns
-------
data_tables: dict
Returns a dictionary of astropy tables with the table name as the keys.
]
variable[data_tables] assign[=] call[name[dict], parameter[]]
if <ast.BoolOp object at 0x7da1b0aae830> begin[:]
variable[t] assign[=] call[name[self].query, parameter[call[constant[SELECT * FROM {} WHERE id={}].format, parameter[name[publications], name[criteria]]]]]
if compare[call[name[len], parameter[name[t]]] greater[>] constant[0]] begin[:]
variable[criteria] assign[=] call[call[name[t]][constant[shortname]]][constant[0]]
variable[t] assign[=] call[name[self].query, parameter[constant[SELECT * FROM sqlite_master WHERE type='table']]]
variable[all_tables] assign[=] call[call[name[t]][constant[name]].tolist, parameter[]]
for taget[name[table]] in starred[binary_operation[list[[<ast.Constant object at 0x7da1b0ac11e0>]] + <ast.ListComp object at 0x7da1b0ac11b0>]] begin[:]
variable[t] assign[=] call[name[self].query, parameter[call[constant[PRAGMA table_info({})].format, parameter[name[table]]]]]
variable[columns] assign[=] call[name[np].array, parameter[call[name[t]][constant[name]]]]
variable[types] assign[=] call[name[np].array, parameter[call[name[t]][constant[type]]]]
variable[columns] assign[=] call[name[columns]][binary_operation[binary_operation[binary_operation[compare[name[types] equal[==] constant[REAL]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[types] equal[==] constant[INTEGER]]] <ast.BitOr object at 0x7da2590d6aa0> compare[name[types] equal[==] constant[TEXT]]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[columns] not_equal[!=] name[column_name]]]]
<ast.Try object at 0x7da1b0aaf790>
if name[data] begin[:]
if name[fetch] begin[:]
call[name[data_tables]][name[table]] assign[=] call[name[self].query, parameter[call[constant[SELECT {} FROM {} WHERE {}='{}'].format, parameter[call[constant[,].join, parameter[name[columns]]], name[table], name[column_name], name[criteria]]]]]
if name[fetch] begin[:]
return[name[data_tables]] | keyword[def] identifier[references] ( identifier[self] , identifier[criteria] , identifier[publications] = literal[string] , identifier[column_name] = literal[string] , identifier[fetch] = keyword[False] ):
literal[string]
identifier[data_tables] = identifier[dict] ()
keyword[if] identifier[isinstance] ( identifier[criteria] , identifier[type] ( literal[int] )) keyword[and] identifier[column_name] == literal[string] :
identifier[t] = identifier[self] . identifier[query] ( literal[string] . identifier[format] ( identifier[publications] , identifier[criteria] ), identifier[fmt] = literal[string] )
keyword[if] identifier[len] ( identifier[t] )> literal[int] :
identifier[criteria] = identifier[t] [ literal[string] ][ literal[int] ]
keyword[else] :
identifier[print] ( literal[string] . identifier[format] ( identifier[criteria] ))
keyword[return]
identifier[t] = identifier[self] . identifier[query] ( literal[string] , identifier[fmt] = literal[string] )
identifier[all_tables] = identifier[t] [ literal[string] ]. identifier[tolist] ()
keyword[for] identifier[table] keyword[in] [ literal[string] ]+[ identifier[t] keyword[for] identifier[t] keyword[in] identifier[all_tables] keyword[if]
identifier[t] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]]:
identifier[t] = identifier[self] . identifier[query] ( literal[string] . identifier[format] ( identifier[table] ), identifier[fmt] = literal[string] )
identifier[columns] = identifier[np] . identifier[array] ( identifier[t] [ literal[string] ])
identifier[types] = identifier[np] . identifier[array] ( identifier[t] [ literal[string] ])
identifier[columns] = identifier[columns] [
(( identifier[types] == literal[string] )|( identifier[types] == literal[string] )|( identifier[types] == literal[string] ))&( identifier[columns] != identifier[column_name] )]
keyword[try] :
identifier[data] = identifier[self] . identifier[query] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[columns] ), identifier[table] ,
identifier[column_name] , identifier[criteria] ), identifier[fmt] = literal[string] )
keyword[except] :
identifier[data] = keyword[None]
keyword[if] identifier[data] :
keyword[if] identifier[fetch] :
identifier[data_tables] [ identifier[table] ]= identifier[self] . identifier[query] (
literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[columns] ), identifier[table] , identifier[column_name] , identifier[criteria] ), identifier[fmt] = literal[string] , identifier[fetch] = keyword[True] )
keyword[else] :
identifier[data] = identifier[data] [[ identifier[c] . identifier[lower] () keyword[for] identifier[c] keyword[in] identifier[columns] ]]
identifier[pprint] ( identifier[data] , identifier[title] = identifier[table] . identifier[upper] ())
keyword[if] identifier[fetch] : keyword[return] identifier[data_tables] | def references(self, criteria, publications='publications', column_name='publication_shortname', fetch=False):
"""
Do a reverse lookup on the **publications** table. Will return every entry that matches that reference.
Parameters
----------
criteria: int or str
The id from the PUBLICATIONS table whose data across all tables is to be printed.
publications: str
Name of the publications table
column_name: str
Name of the reference column in other tables
fetch: bool
Return the results.
Returns
-------
data_tables: dict
Returns a dictionary of astropy tables with the table name as the keys.
"""
data_tables = dict()
# If an ID is provided but the column name is publication shortname, grab the shortname
if isinstance(criteria, type(1)) and column_name == 'publication_shortname':
t = self.query('SELECT * FROM {} WHERE id={}'.format(publications, criteria), fmt='table')
if len(t) > 0:
criteria = t['shortname'][0] # depends on [control=['if'], data=[]]
else:
print('No match found for {}'.format(criteria))
return # depends on [control=['if'], data=[]]
t = self.query("SELECT * FROM sqlite_master WHERE type='table'", fmt='table')
all_tables = t['name'].tolist()
for table in ['sources'] + [t for t in all_tables if t not in ['publications', 'sqlite_sequence', 'sources']]:
# Get the columns, pull out redundant ones, and query the table for this source's data
t = self.query('PRAGMA table_info({})'.format(table), fmt='table')
columns = np.array(t['name'])
types = np.array(t['type'])
# Only get simple data types and exclude redundant ones for nicer printing
columns = columns[((types == 'REAL') | (types == 'INTEGER') | (types == 'TEXT')) & (columns != column_name)]
# Query the table
try:
data = self.query("SELECT {} FROM {} WHERE {}='{}'".format(','.join(columns), table, column_name, criteria), fmt='table') # depends on [control=['try'], data=[]]
except:
data = None # depends on [control=['except'], data=[]]
# If there's data for this table, save it
if data:
if fetch:
data_tables[table] = self.query("SELECT {} FROM {} WHERE {}='{}'".format(','.join(columns), table, column_name, criteria), fmt='table', fetch=True) # depends on [control=['if'], data=[]]
else:
data = data[[c.lower() for c in columns]] # force lowercase since astropy.Tables have all lowercase
pprint(data, title=table.upper()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['table']]
if fetch:
return data_tables # depends on [control=['if'], data=[]] |
def paint(self, painter, rect, palette):
"""Draws a generic visual representation for this component
Re-implement this to get a custom graphic in builder editor
:param painter: Use this class to do the drawing
:type painter: :qtdoc:`QPainter`
:param rect: boundary of the delegate for this component, painting should be done inside this boundary
:type rect: :qtdoc:`QRect`
:param palette: contains color groups to use, if wanted
:type palette: :qtdoc:`QPalette`
"""
painter.save()
image = img.default()
painter.drawImage(rect, image)
# set text color
painter.setPen(QtGui.QPen(QtCore.Qt.red))
painter.drawText(rect, QtCore.Qt.AlignLeft, self.__class__.__name__)
painter.restore() | def function[paint, parameter[self, painter, rect, palette]]:
constant[Draws a generic visual representation for this component
Re-implement this to get a custom graphic in builder editor
:param painter: Use this class to do the drawing
:type painter: :qtdoc:`QPainter`
:param rect: boundary of the delegate for this component, painting should be done inside this boundary
:type rect: :qtdoc:`QRect`
:param palette: contains color groups to use, if wanted
:type palette: :qtdoc:`QPalette`
]
call[name[painter].save, parameter[]]
variable[image] assign[=] call[name[img].default, parameter[]]
call[name[painter].drawImage, parameter[name[rect], name[image]]]
call[name[painter].setPen, parameter[call[name[QtGui].QPen, parameter[name[QtCore].Qt.red]]]]
call[name[painter].drawText, parameter[name[rect], name[QtCore].Qt.AlignLeft, name[self].__class__.__name__]]
call[name[painter].restore, parameter[]] | keyword[def] identifier[paint] ( identifier[self] , identifier[painter] , identifier[rect] , identifier[palette] ):
literal[string]
identifier[painter] . identifier[save] ()
identifier[image] = identifier[img] . identifier[default] ()
identifier[painter] . identifier[drawImage] ( identifier[rect] , identifier[image] )
identifier[painter] . identifier[setPen] ( identifier[QtGui] . identifier[QPen] ( identifier[QtCore] . identifier[Qt] . identifier[red] ))
identifier[painter] . identifier[drawText] ( identifier[rect] , identifier[QtCore] . identifier[Qt] . identifier[AlignLeft] , identifier[self] . identifier[__class__] . identifier[__name__] )
identifier[painter] . identifier[restore] () | def paint(self, painter, rect, palette):
"""Draws a generic visual representation for this component
Re-implement this to get a custom graphic in builder editor
:param painter: Use this class to do the drawing
:type painter: :qtdoc:`QPainter`
:param rect: boundary of the delegate for this component, painting should be done inside this boundary
:type rect: :qtdoc:`QRect`
:param palette: contains color groups to use, if wanted
:type palette: :qtdoc:`QPalette`
"""
painter.save()
image = img.default()
painter.drawImage(rect, image)
# set text color
painter.setPen(QtGui.QPen(QtCore.Qt.red))
painter.drawText(rect, QtCore.Qt.AlignLeft, self.__class__.__name__)
painter.restore() |
def find_modules_with_decorators(path,decorator_module,decorator_name):
'''
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
'''
modules_paths = []
#If a path to a module file
if path[-3:] == '.py':
modules_paths.append(path)
#If a directory, Get all the .py files
else :
modules_paths += find_file_regex(path,'.*\.py$')
#Return only modules using the decorator
return [module for module in modules_paths if is_module_has_decorated(module,decorator_module,decorator_name)] | def function[find_modules_with_decorators, parameter[path, decorator_module, decorator_name]]:
constant[
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
]
variable[modules_paths] assign[=] list[[]]
if compare[call[name[path]][<ast.Slice object at 0x7da1b144cdc0>] equal[==] constant[.py]] begin[:]
call[name[modules_paths].append, parameter[name[path]]]
return[<ast.ListComp object at 0x7da1b144c490>] | keyword[def] identifier[find_modules_with_decorators] ( identifier[path] , identifier[decorator_module] , identifier[decorator_name] ):
literal[string]
identifier[modules_paths] =[]
keyword[if] identifier[path] [- literal[int] :]== literal[string] :
identifier[modules_paths] . identifier[append] ( identifier[path] )
keyword[else] :
identifier[modules_paths] += identifier[find_file_regex] ( identifier[path] , literal[string] )
keyword[return] [ identifier[module] keyword[for] identifier[module] keyword[in] identifier[modules_paths] keyword[if] identifier[is_module_has_decorated] ( identifier[module] , identifier[decorator_module] , identifier[decorator_name] )] | def find_modules_with_decorators(path, decorator_module, decorator_name):
"""
Finds all the modules decorated with the specified decorator in the path, file or module specified.
Args :
path : All modules in the directory and its sub-directories will be scanned.
decorator_module : Then full name of the module defining the decorator.
decorator_name : The name of the decorator.
"""
modules_paths = []
#If a path to a module file
if path[-3:] == '.py':
modules_paths.append(path) # depends on [control=['if'], data=[]]
else:
#If a directory, Get all the .py files
modules_paths += find_file_regex(path, '.*\\.py$')
#Return only modules using the decorator
return [module for module in modules_paths if is_module_has_decorated(module, decorator_module, decorator_name)] |
def remove_cached_item(self, path):
"""
Remove cached resource item
:param path: str
:return: PIL.Image
"""
item_path = '%s/%s' % (
self.cache_folder,
path.strip('/')
)
self.blob_service.delete_blob(self.container_name, item_path)
while self.blob_service.exists(self.container_name, item_path):
time.sleep(0.5)
return True | def function[remove_cached_item, parameter[self, path]]:
constant[
Remove cached resource item
:param path: str
:return: PIL.Image
]
variable[item_path] assign[=] binary_operation[constant[%s/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b15f30a0>, <ast.Call object at 0x7da1b15f0a00>]]]
call[name[self].blob_service.delete_blob, parameter[name[self].container_name, name[item_path]]]
while call[name[self].blob_service.exists, parameter[name[self].container_name, name[item_path]]] begin[:]
call[name[time].sleep, parameter[constant[0.5]]]
return[constant[True]] | keyword[def] identifier[remove_cached_item] ( identifier[self] , identifier[path] ):
literal[string]
identifier[item_path] = literal[string] %(
identifier[self] . identifier[cache_folder] ,
identifier[path] . identifier[strip] ( literal[string] )
)
identifier[self] . identifier[blob_service] . identifier[delete_blob] ( identifier[self] . identifier[container_name] , identifier[item_path] )
keyword[while] identifier[self] . identifier[blob_service] . identifier[exists] ( identifier[self] . identifier[container_name] , identifier[item_path] ):
identifier[time] . identifier[sleep] ( literal[int] )
keyword[return] keyword[True] | def remove_cached_item(self, path):
"""
Remove cached resource item
:param path: str
:return: PIL.Image
"""
item_path = '%s/%s' % (self.cache_folder, path.strip('/'))
self.blob_service.delete_blob(self.container_name, item_path)
while self.blob_service.exists(self.container_name, item_path):
time.sleep(0.5) # depends on [control=['while'], data=[]]
return True |
def __register(self, client_id, client_secret, email, scope, first_name,
last_name, original_ip, original_device, **kwargs):
"""Call documentation: `/user/register
<https://www.wepay.com/developer/reference/user#register>`_, plus
extra keyword parameter:
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. note ::
This call is NOT supported by API versions older then '2014-01-08'.
"""
params = {
'client_id': client_id,
'client_secret': client_secret,
'email': email,
'scope': scope,
'first_name': first_name,
'last_name': last_name,
'original_ip': original_ip,
'original_device': original_device
}
return self.make_call(self.__register, params, kwargs) | def function[__register, parameter[self, client_id, client_secret, email, scope, first_name, last_name, original_ip, original_device]]:
constant[Call documentation: `/user/register
<https://www.wepay.com/developer/reference/user#register>`_, plus
extra keyword parameter:
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. note ::
This call is NOT supported by API versions older then '2014-01-08'.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b13b9000>, <ast.Constant object at 0x7da1b13b9030>, <ast.Constant object at 0x7da1b13b9420>, <ast.Constant object at 0x7da1b13b9660>, <ast.Constant object at 0x7da1b13ba0b0>, <ast.Constant object at 0x7da1b13bb3a0>, <ast.Constant object at 0x7da1b13bbdc0>, <ast.Constant object at 0x7da1b13bb880>], [<ast.Name object at 0x7da1b13b8520>, <ast.Name object at 0x7da1b13bb6a0>, <ast.Name object at 0x7da1b13bab00>, <ast.Name object at 0x7da1b13baa40>, <ast.Name object at 0x7da1b13b8b80>, <ast.Name object at 0x7da1b13bbb80>, <ast.Name object at 0x7da1b13bb3d0>, <ast.Name object at 0x7da1b13b8670>]]
return[call[name[self].make_call, parameter[name[self].__register, name[params], name[kwargs]]]] | keyword[def] identifier[__register] ( identifier[self] , identifier[client_id] , identifier[client_secret] , identifier[email] , identifier[scope] , identifier[first_name] ,
identifier[last_name] , identifier[original_ip] , identifier[original_device] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[client_id] ,
literal[string] : identifier[client_secret] ,
literal[string] : identifier[email] ,
literal[string] : identifier[scope] ,
literal[string] : identifier[first_name] ,
literal[string] : identifier[last_name] ,
literal[string] : identifier[original_ip] ,
literal[string] : identifier[original_device]
}
keyword[return] identifier[self] . identifier[make_call] ( identifier[self] . identifier[__register] , identifier[params] , identifier[kwargs] ) | def __register(self, client_id, client_secret, email, scope, first_name, last_name, original_ip, original_device, **kwargs):
"""Call documentation: `/user/register
<https://www.wepay.com/developer/reference/user#register>`_, plus
extra keyword parameter:
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
.. note ::
This call is NOT supported by API versions older then '2014-01-08'.
"""
params = {'client_id': client_id, 'client_secret': client_secret, 'email': email, 'scope': scope, 'first_name': first_name, 'last_name': last_name, 'original_ip': original_ip, 'original_device': original_device}
return self.make_call(self.__register, params, kwargs) |
def make_a_call(self, number: int or str = 18268237856) -> None:
'''Make a call.'''
self.app_start_action(Actions.CALL, '-d', 'tel:{}'.format(str(number))) | def function[make_a_call, parameter[self, number]]:
constant[Make a call.]
call[name[self].app_start_action, parameter[name[Actions].CALL, constant[-d], call[constant[tel:{}].format, parameter[call[name[str], parameter[name[number]]]]]]] | keyword[def] identifier[make_a_call] ( identifier[self] , identifier[number] : identifier[int] keyword[or] identifier[str] = literal[int] )-> keyword[None] :
literal[string]
identifier[self] . identifier[app_start_action] ( identifier[Actions] . identifier[CALL] , literal[string] , literal[string] . identifier[format] ( identifier[str] ( identifier[number] ))) | def make_a_call(self, number: int or str=18268237856) -> None:
"""Make a call."""
self.app_start_action(Actions.CALL, '-d', 'tel:{}'.format(str(number))) |
def safe_getattr(brain_or_object, attr, default=_marker):
"""Return the attribute value
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param attr: Attribute name
:type attr: str
:returns: Attribute value
:rtype: obj
"""
try:
value = getattr(brain_or_object, attr, _marker)
if value is _marker:
if default is not _marker:
return default
fail("Attribute '{}' not found.".format(attr))
if callable(value):
return value()
return value
except Unauthorized:
if default is not _marker:
return default
fail("You are not authorized to access '{}' of '{}'.".format(
attr, repr(brain_or_object))) | def function[safe_getattr, parameter[brain_or_object, attr, default]]:
constant[Return the attribute value
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param attr: Attribute name
:type attr: str
:returns: Attribute value
:rtype: obj
]
<ast.Try object at 0x7da18eb57220> | keyword[def] identifier[safe_getattr] ( identifier[brain_or_object] , identifier[attr] , identifier[default] = identifier[_marker] ):
literal[string]
keyword[try] :
identifier[value] = identifier[getattr] ( identifier[brain_or_object] , identifier[attr] , identifier[_marker] )
keyword[if] identifier[value] keyword[is] identifier[_marker] :
keyword[if] identifier[default] keyword[is] keyword[not] identifier[_marker] :
keyword[return] identifier[default]
identifier[fail] ( literal[string] . identifier[format] ( identifier[attr] ))
keyword[if] identifier[callable] ( identifier[value] ):
keyword[return] identifier[value] ()
keyword[return] identifier[value]
keyword[except] identifier[Unauthorized] :
keyword[if] identifier[default] keyword[is] keyword[not] identifier[_marker] :
keyword[return] identifier[default]
identifier[fail] ( literal[string] . identifier[format] (
identifier[attr] , identifier[repr] ( identifier[brain_or_object] ))) | def safe_getattr(brain_or_object, attr, default=_marker):
"""Return the attribute value
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param attr: Attribute name
:type attr: str
:returns: Attribute value
:rtype: obj
"""
try:
value = getattr(brain_or_object, attr, _marker)
if value is _marker:
if default is not _marker:
return default # depends on [control=['if'], data=['default']]
fail("Attribute '{}' not found.".format(attr)) # depends on [control=['if'], data=['_marker']]
if callable(value):
return value() # depends on [control=['if'], data=[]]
return value # depends on [control=['try'], data=[]]
except Unauthorized:
if default is not _marker:
return default # depends on [control=['if'], data=['default']]
fail("You are not authorized to access '{}' of '{}'.".format(attr, repr(brain_or_object))) # depends on [control=['except'], data=[]] |
def install_middleware(middleware_name, lookup_names=None):
"""
Install specified middleware
"""
if lookup_names is None:
lookup_names = (middleware_name,)
# default settings.MIDDLEWARE is None
middleware_attr = 'MIDDLEWARE' if getattr(settings,
'MIDDLEWARE',
None) is not None \
else 'MIDDLEWARE_CLASSES'
# make sure to get an empty tuple when attr is None
middleware = getattr(settings, middleware_attr, ()) or ()
if set(lookup_names).isdisjoint(set(middleware)):
setattr(settings,
middleware_attr,
type(middleware)((middleware_name,)) + middleware) | def function[install_middleware, parameter[middleware_name, lookup_names]]:
constant[
Install specified middleware
]
if compare[name[lookup_names] is constant[None]] begin[:]
variable[lookup_names] assign[=] tuple[[<ast.Name object at 0x7da1b1727880>]]
variable[middleware_attr] assign[=] <ast.IfExp object at 0x7da1b1724130>
variable[middleware] assign[=] <ast.BoolOp object at 0x7da204565780>
if call[call[name[set], parameter[name[lookup_names]]].isdisjoint, parameter[call[name[set], parameter[name[middleware]]]]] begin[:]
call[name[setattr], parameter[name[settings], name[middleware_attr], binary_operation[call[call[name[type], parameter[name[middleware]]], parameter[tuple[[<ast.Name object at 0x7da1b17cfd30>]]]] + name[middleware]]]] | keyword[def] identifier[install_middleware] ( identifier[middleware_name] , identifier[lookup_names] = keyword[None] ):
literal[string]
keyword[if] identifier[lookup_names] keyword[is] keyword[None] :
identifier[lookup_names] =( identifier[middleware_name] ,)
identifier[middleware_attr] = literal[string] keyword[if] identifier[getattr] ( identifier[settings] ,
literal[string] ,
keyword[None] ) keyword[is] keyword[not] keyword[None] keyword[else] literal[string]
identifier[middleware] = identifier[getattr] ( identifier[settings] , identifier[middleware_attr] ,()) keyword[or] ()
keyword[if] identifier[set] ( identifier[lookup_names] ). identifier[isdisjoint] ( identifier[set] ( identifier[middleware] )):
identifier[setattr] ( identifier[settings] ,
identifier[middleware_attr] ,
identifier[type] ( identifier[middleware] )(( identifier[middleware_name] ,))+ identifier[middleware] ) | def install_middleware(middleware_name, lookup_names=None):
"""
Install specified middleware
"""
if lookup_names is None:
lookup_names = (middleware_name,) # depends on [control=['if'], data=['lookup_names']]
# default settings.MIDDLEWARE is None
middleware_attr = 'MIDDLEWARE' if getattr(settings, 'MIDDLEWARE', None) is not None else 'MIDDLEWARE_CLASSES'
# make sure to get an empty tuple when attr is None
middleware = getattr(settings, middleware_attr, ()) or ()
if set(lookup_names).isdisjoint(set(middleware)):
setattr(settings, middleware_attr, type(middleware)((middleware_name,)) + middleware) # depends on [control=['if'], data=[]] |
def nice_output(self):
"""Return a string for printing"""
dates = [
str_format('Opening Day {0}: {1}.',
[self.year, date_format(self.first_date_seas)]),
str_format('Last day of the 1st half: {0}.',
[date_format(self.last_date_1sth)]),
str_format('{0} All Star Game: {1}.',
[self.year, date_format(self.all_star_date)]),
str_format('First day of the 2nd half: {}.',
[date_format(self.first_date_2ndh)]),
str_format('Last day of the {0} season: {1}.',
[self.year, date_format(self.last_date_seas)]),
str_format('{0} Playoffs start: {1}.',
[self.year, date_format(self.playoffs_start_date)]),
str_format('{0} Playoffs end: {1}.',
[self.year, date_format(self.playoffs_end_date)])
]
return '\n'.join(dates) | def function[nice_output, parameter[self]]:
constant[Return a string for printing]
variable[dates] assign[=] list[[<ast.Call object at 0x7da20c7c8eb0>, <ast.Call object at 0x7da20c7c8cd0>, <ast.Call object at 0x7da20c7c83d0>, <ast.Call object at 0x7da20c7cb6a0>, <ast.Call object at 0x7da20c7c8940>, <ast.Call object at 0x7da20c7c8dc0>, <ast.Call object at 0x7da20c7cb8b0>]]
return[call[constant[
].join, parameter[name[dates]]]] | keyword[def] identifier[nice_output] ( identifier[self] ):
literal[string]
identifier[dates] =[
identifier[str_format] ( literal[string] ,
[ identifier[self] . identifier[year] , identifier[date_format] ( identifier[self] . identifier[first_date_seas] )]),
identifier[str_format] ( literal[string] ,
[ identifier[date_format] ( identifier[self] . identifier[last_date_1sth] )]),
identifier[str_format] ( literal[string] ,
[ identifier[self] . identifier[year] , identifier[date_format] ( identifier[self] . identifier[all_star_date] )]),
identifier[str_format] ( literal[string] ,
[ identifier[date_format] ( identifier[self] . identifier[first_date_2ndh] )]),
identifier[str_format] ( literal[string] ,
[ identifier[self] . identifier[year] , identifier[date_format] ( identifier[self] . identifier[last_date_seas] )]),
identifier[str_format] ( literal[string] ,
[ identifier[self] . identifier[year] , identifier[date_format] ( identifier[self] . identifier[playoffs_start_date] )]),
identifier[str_format] ( literal[string] ,
[ identifier[self] . identifier[year] , identifier[date_format] ( identifier[self] . identifier[playoffs_end_date] )])
]
keyword[return] literal[string] . identifier[join] ( identifier[dates] ) | def nice_output(self):
"""Return a string for printing"""
dates = [str_format('Opening Day {0}: {1}.', [self.year, date_format(self.first_date_seas)]), str_format('Last day of the 1st half: {0}.', [date_format(self.last_date_1sth)]), str_format('{0} All Star Game: {1}.', [self.year, date_format(self.all_star_date)]), str_format('First day of the 2nd half: {}.', [date_format(self.first_date_2ndh)]), str_format('Last day of the {0} season: {1}.', [self.year, date_format(self.last_date_seas)]), str_format('{0} Playoffs start: {1}.', [self.year, date_format(self.playoffs_start_date)]), str_format('{0} Playoffs end: {1}.', [self.year, date_format(self.playoffs_end_date)])]
return '\n'.join(dates) |
def avg(self):
"""return the mean value"""
# XXX rename this method
if len(self.values) > 0:
return sum(self.values) / float(len(self.values))
else:
return None | def function[avg, parameter[self]]:
constant[return the mean value]
if compare[call[name[len], parameter[name[self].values]] greater[>] constant[0]] begin[:]
return[binary_operation[call[name[sum], parameter[name[self].values]] / call[name[float], parameter[call[name[len], parameter[name[self].values]]]]]] | keyword[def] identifier[avg] ( identifier[self] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[values] )> literal[int] :
keyword[return] identifier[sum] ( identifier[self] . identifier[values] )/ identifier[float] ( identifier[len] ( identifier[self] . identifier[values] ))
keyword[else] :
keyword[return] keyword[None] | def avg(self):
"""return the mean value"""
# XXX rename this method
if len(self.values) > 0:
return sum(self.values) / float(len(self.values)) # depends on [control=['if'], data=[]]
else:
return None |
def lookup_users(self, user_ids=None, screen_names=None, include_entities=None, tweet_mode=None):
""" Perform bulk look up of users from user ID or screen_name """
post_data = {}
if include_entities is not None:
include_entities = 'true' if include_entities else 'false'
post_data['include_entities'] = include_entities
if user_ids:
post_data['user_id'] = list_to_csv(user_ids)
if screen_names:
post_data['screen_name'] = list_to_csv(screen_names)
if tweet_mode:
post_data['tweet_mode'] = tweet_mode
return self._lookup_users(post_data=post_data) | def function[lookup_users, parameter[self, user_ids, screen_names, include_entities, tweet_mode]]:
constant[ Perform bulk look up of users from user ID or screen_name ]
variable[post_data] assign[=] dictionary[[], []]
if compare[name[include_entities] is_not constant[None]] begin[:]
variable[include_entities] assign[=] <ast.IfExp object at 0x7da2054a7d30>
call[name[post_data]][constant[include_entities]] assign[=] name[include_entities]
if name[user_ids] begin[:]
call[name[post_data]][constant[user_id]] assign[=] call[name[list_to_csv], parameter[name[user_ids]]]
if name[screen_names] begin[:]
call[name[post_data]][constant[screen_name]] assign[=] call[name[list_to_csv], parameter[name[screen_names]]]
if name[tweet_mode] begin[:]
call[name[post_data]][constant[tweet_mode]] assign[=] name[tweet_mode]
return[call[name[self]._lookup_users, parameter[]]] | keyword[def] identifier[lookup_users] ( identifier[self] , identifier[user_ids] = keyword[None] , identifier[screen_names] = keyword[None] , identifier[include_entities] = keyword[None] , identifier[tweet_mode] = keyword[None] ):
literal[string]
identifier[post_data] ={}
keyword[if] identifier[include_entities] keyword[is] keyword[not] keyword[None] :
identifier[include_entities] = literal[string] keyword[if] identifier[include_entities] keyword[else] literal[string]
identifier[post_data] [ literal[string] ]= identifier[include_entities]
keyword[if] identifier[user_ids] :
identifier[post_data] [ literal[string] ]= identifier[list_to_csv] ( identifier[user_ids] )
keyword[if] identifier[screen_names] :
identifier[post_data] [ literal[string] ]= identifier[list_to_csv] ( identifier[screen_names] )
keyword[if] identifier[tweet_mode] :
identifier[post_data] [ literal[string] ]= identifier[tweet_mode]
keyword[return] identifier[self] . identifier[_lookup_users] ( identifier[post_data] = identifier[post_data] ) | def lookup_users(self, user_ids=None, screen_names=None, include_entities=None, tweet_mode=None):
""" Perform bulk look up of users from user ID or screen_name """
post_data = {}
if include_entities is not None:
include_entities = 'true' if include_entities else 'false'
post_data['include_entities'] = include_entities # depends on [control=['if'], data=['include_entities']]
if user_ids:
post_data['user_id'] = list_to_csv(user_ids) # depends on [control=['if'], data=[]]
if screen_names:
post_data['screen_name'] = list_to_csv(screen_names) # depends on [control=['if'], data=[]]
if tweet_mode:
post_data['tweet_mode'] = tweet_mode # depends on [control=['if'], data=[]]
return self._lookup_users(post_data=post_data) |
def _modify_relationship(relationship, unlink=False, is_sub=False):
"""Return a function for relationship modification.
Used to support friending (user-to-user), as well as moderating,
contributor creating, and banning (user-to-subreddit).
"""
# The API uses friend and unfriend to manage all of these relationships.
url_key = 'unfriend' if unlink else 'friend'
if relationship == 'friend':
access = {'scope': None, 'login': True}
elif relationship == 'moderator':
access = {'scope': 'modothers'}
elif relationship in ['banned', 'contributor', 'muted']:
access = {'scope': 'modcontributors'}
elif relationship in ['wikibanned', 'wikicontributor']:
access = {'scope': ['modcontributors', 'modwiki']}
else:
access = {'scope': None, 'mod': True}
@restrict_access(**access)
def do_relationship(thing, user, **kwargs):
data = {'name': six.text_type(user),
'type': relationship}
data.update(kwargs)
if is_sub:
data['r'] = six.text_type(thing)
else:
data['container'] = thing.fullname
session = thing.reddit_session
if relationship == 'moderator':
session.evict(session.config['moderators'].format(
subreddit=six.text_type(thing)))
url = session.config[url_key]
return session.request_json(url, data=data)
return do_relationship | def function[_modify_relationship, parameter[relationship, unlink, is_sub]]:
constant[Return a function for relationship modification.
Used to support friending (user-to-user), as well as moderating,
contributor creating, and banning (user-to-subreddit).
]
variable[url_key] assign[=] <ast.IfExp object at 0x7da2054a4af0>
if compare[name[relationship] equal[==] constant[friend]] begin[:]
variable[access] assign[=] dictionary[[<ast.Constant object at 0x7da2054a5090>, <ast.Constant object at 0x7da2054a5ab0>], [<ast.Constant object at 0x7da2054a5cf0>, <ast.Constant object at 0x7da2054a4190>]]
def function[do_relationship, parameter[thing, user]]:
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da2054a63b0>, <ast.Constant object at 0x7da2054a4700>], [<ast.Call object at 0x7da2054a7c40>, <ast.Name object at 0x7da2054a7490>]]
call[name[data].update, parameter[name[kwargs]]]
if name[is_sub] begin[:]
call[name[data]][constant[r]] assign[=] call[name[six].text_type, parameter[name[thing]]]
variable[session] assign[=] name[thing].reddit_session
if compare[name[relationship] equal[==] constant[moderator]] begin[:]
call[name[session].evict, parameter[call[call[name[session].config][constant[moderators]].format, parameter[]]]]
variable[url] assign[=] call[name[session].config][name[url_key]]
return[call[name[session].request_json, parameter[name[url]]]]
return[name[do_relationship]] | keyword[def] identifier[_modify_relationship] ( identifier[relationship] , identifier[unlink] = keyword[False] , identifier[is_sub] = keyword[False] ):
literal[string]
identifier[url_key] = literal[string] keyword[if] identifier[unlink] keyword[else] literal[string]
keyword[if] identifier[relationship] == literal[string] :
identifier[access] ={ literal[string] : keyword[None] , literal[string] : keyword[True] }
keyword[elif] identifier[relationship] == literal[string] :
identifier[access] ={ literal[string] : literal[string] }
keyword[elif] identifier[relationship] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[access] ={ literal[string] : literal[string] }
keyword[elif] identifier[relationship] keyword[in] [ literal[string] , literal[string] ]:
identifier[access] ={ literal[string] :[ literal[string] , literal[string] ]}
keyword[else] :
identifier[access] ={ literal[string] : keyword[None] , literal[string] : keyword[True] }
@ identifier[restrict_access] (** identifier[access] )
keyword[def] identifier[do_relationship] ( identifier[thing] , identifier[user] ,** identifier[kwargs] ):
identifier[data] ={ literal[string] : identifier[six] . identifier[text_type] ( identifier[user] ),
literal[string] : identifier[relationship] }
identifier[data] . identifier[update] ( identifier[kwargs] )
keyword[if] identifier[is_sub] :
identifier[data] [ literal[string] ]= identifier[six] . identifier[text_type] ( identifier[thing] )
keyword[else] :
identifier[data] [ literal[string] ]= identifier[thing] . identifier[fullname]
identifier[session] = identifier[thing] . identifier[reddit_session]
keyword[if] identifier[relationship] == literal[string] :
identifier[session] . identifier[evict] ( identifier[session] . identifier[config] [ literal[string] ]. identifier[format] (
identifier[subreddit] = identifier[six] . identifier[text_type] ( identifier[thing] )))
identifier[url] = identifier[session] . identifier[config] [ identifier[url_key] ]
keyword[return] identifier[session] . identifier[request_json] ( identifier[url] , identifier[data] = identifier[data] )
keyword[return] identifier[do_relationship] | def _modify_relationship(relationship, unlink=False, is_sub=False):
"""Return a function for relationship modification.
Used to support friending (user-to-user), as well as moderating,
contributor creating, and banning (user-to-subreddit).
"""
# The API uses friend and unfriend to manage all of these relationships.
url_key = 'unfriend' if unlink else 'friend'
if relationship == 'friend':
access = {'scope': None, 'login': True} # depends on [control=['if'], data=[]]
elif relationship == 'moderator':
access = {'scope': 'modothers'} # depends on [control=['if'], data=[]]
elif relationship in ['banned', 'contributor', 'muted']:
access = {'scope': 'modcontributors'} # depends on [control=['if'], data=[]]
elif relationship in ['wikibanned', 'wikicontributor']:
access = {'scope': ['modcontributors', 'modwiki']} # depends on [control=['if'], data=[]]
else:
access = {'scope': None, 'mod': True}
@restrict_access(**access)
def do_relationship(thing, user, **kwargs):
data = {'name': six.text_type(user), 'type': relationship}
data.update(kwargs)
if is_sub:
data['r'] = six.text_type(thing) # depends on [control=['if'], data=[]]
else:
data['container'] = thing.fullname
session = thing.reddit_session
if relationship == 'moderator':
session.evict(session.config['moderators'].format(subreddit=six.text_type(thing))) # depends on [control=['if'], data=[]]
url = session.config[url_key]
return session.request_json(url, data=data)
return do_relationship |
def handle_prep(self, req):
"""Handles the POST v2/.prep call for preparing the backing store Swift
cluster for use with the auth subsystem. Can only be called by
.super_admin.
:param req: The swob.Request to process.
:returns: swob.Response, 204 on success
"""
if not self.is_super_admin(req):
return self.denied_response(req)
path = quote('/v1/%s' % self.auth_account)
resp = self.make_pre_authed_request(
req.environ, 'PUT', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create the main auth account: %s %s' %
(path, resp.status))
path = quote('/v1/%s/.account_id' % self.auth_account)
resp = self.make_pre_authed_request(
req.environ, 'PUT', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create container: %s %s' %
(path, resp.status))
for container in xrange(16):
path = quote('/v1/%s/.token_%x' % (self.auth_account, container))
resp = self.make_pre_authed_request(
req.environ, 'PUT', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create container: %s %s' %
(path, resp.status))
return HTTPNoContent(request=req) | def function[handle_prep, parameter[self, req]]:
constant[Handles the POST v2/.prep call for preparing the backing store Swift
cluster for use with the auth subsystem. Can only be called by
.super_admin.
:param req: The swob.Request to process.
:returns: swob.Response, 204 on success
]
if <ast.UnaryOp object at 0x7da1b0499840> begin[:]
return[call[name[self].denied_response, parameter[name[req]]]]
variable[path] assign[=] call[name[quote], parameter[binary_operation[constant[/v1/%s] <ast.Mod object at 0x7da2590d6920> name[self].auth_account]]]
variable[resp] assign[=] call[call[name[self].make_pre_authed_request, parameter[name[req].environ, constant[PUT], name[path]]].get_response, parameter[name[self].app]]
if compare[binary_operation[name[resp].status_int <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b0499ff0>
variable[path] assign[=] call[name[quote], parameter[binary_operation[constant[/v1/%s/.account_id] <ast.Mod object at 0x7da2590d6920> name[self].auth_account]]]
variable[resp] assign[=] call[call[name[self].make_pre_authed_request, parameter[name[req].environ, constant[PUT], name[path]]].get_response, parameter[name[self].app]]
if compare[binary_operation[name[resp].status_int <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b049a8f0>
for taget[name[container]] in starred[call[name[xrange], parameter[constant[16]]]] begin[:]
variable[path] assign[=] call[name[quote], parameter[binary_operation[constant[/v1/%s/.token_%x] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b049a800>, <ast.Name object at 0x7da1b049b4f0>]]]]]
variable[resp] assign[=] call[call[name[self].make_pre_authed_request, parameter[name[req].environ, constant[PUT], name[path]]].get_response, parameter[name[self].app]]
if compare[binary_operation[name[resp].status_int <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]] not_equal[!=] constant[2]] begin[:]
<ast.Raise object at 0x7da1b04bf0d0>
return[call[name[HTTPNoContent], parameter[]]] | keyword[def] identifier[handle_prep] ( identifier[self] , identifier[req] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_super_admin] ( identifier[req] ):
keyword[return] identifier[self] . identifier[denied_response] ( identifier[req] )
identifier[path] = identifier[quote] ( literal[string] % identifier[self] . identifier[auth_account] )
identifier[resp] = identifier[self] . identifier[make_pre_authed_request] (
identifier[req] . identifier[environ] , literal[string] , identifier[path] ). identifier[get_response] ( identifier[self] . identifier[app] )
keyword[if] identifier[resp] . identifier[status_int] // literal[int] != literal[int] :
keyword[raise] identifier[Exception] ( literal[string] %
( identifier[path] , identifier[resp] . identifier[status] ))
identifier[path] = identifier[quote] ( literal[string] % identifier[self] . identifier[auth_account] )
identifier[resp] = identifier[self] . identifier[make_pre_authed_request] (
identifier[req] . identifier[environ] , literal[string] , identifier[path] ). identifier[get_response] ( identifier[self] . identifier[app] )
keyword[if] identifier[resp] . identifier[status_int] // literal[int] != literal[int] :
keyword[raise] identifier[Exception] ( literal[string] %
( identifier[path] , identifier[resp] . identifier[status] ))
keyword[for] identifier[container] keyword[in] identifier[xrange] ( literal[int] ):
identifier[path] = identifier[quote] ( literal[string] %( identifier[self] . identifier[auth_account] , identifier[container] ))
identifier[resp] = identifier[self] . identifier[make_pre_authed_request] (
identifier[req] . identifier[environ] , literal[string] , identifier[path] ). identifier[get_response] ( identifier[self] . identifier[app] )
keyword[if] identifier[resp] . identifier[status_int] // literal[int] != literal[int] :
keyword[raise] identifier[Exception] ( literal[string] %
( identifier[path] , identifier[resp] . identifier[status] ))
keyword[return] identifier[HTTPNoContent] ( identifier[request] = identifier[req] ) | def handle_prep(self, req):
"""Handles the POST v2/.prep call for preparing the backing store Swift
cluster for use with the auth subsystem. Can only be called by
.super_admin.
:param req: The swob.Request to process.
:returns: swob.Response, 204 on success
"""
if not self.is_super_admin(req):
return self.denied_response(req) # depends on [control=['if'], data=[]]
path = quote('/v1/%s' % self.auth_account)
resp = self.make_pre_authed_request(req.environ, 'PUT', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create the main auth account: %s %s' % (path, resp.status)) # depends on [control=['if'], data=[]]
path = quote('/v1/%s/.account_id' % self.auth_account)
resp = self.make_pre_authed_request(req.environ, 'PUT', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create container: %s %s' % (path, resp.status)) # depends on [control=['if'], data=[]]
for container in xrange(16):
path = quote('/v1/%s/.token_%x' % (self.auth_account, container))
resp = self.make_pre_authed_request(req.environ, 'PUT', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create container: %s %s' % (path, resp.status)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['container']]
return HTTPNoContent(request=req) |
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name] | def function[_unregister_lookup, parameter[cls, lookup, lookup_name]]:
constant[
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
]
if compare[name[lookup_name] is constant[None]] begin[:]
variable[lookup_name] assign[=] name[lookup].lookup_name
<ast.Delete object at 0x7da1b1b0cd90> | keyword[def] identifier[_unregister_lookup] ( identifier[cls] , identifier[lookup] , identifier[lookup_name] = keyword[None] ):
literal[string]
keyword[if] identifier[lookup_name] keyword[is] keyword[None] :
identifier[lookup_name] = identifier[lookup] . identifier[lookup_name]
keyword[del] identifier[cls] . identifier[class_lookups] [ identifier[lookup_name] ] | def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name # depends on [control=['if'], data=['lookup_name']]
del cls.class_lookups[lookup_name] |
def send_location(self, loc, to, reply=None):
"""
Use this method to send point on the map. On success, the sent Message is returned.
"""
lat, lon = loc
payload = dict(chat_id=to, reply_to_message_id=reply,
latitude=lat, longitude=lon)
return Message.from_api(api, **self._get('sendLocation', payload)) | def function[send_location, parameter[self, loc, to, reply]]:
constant[
Use this method to send point on the map. On success, the sent Message is returned.
]
<ast.Tuple object at 0x7da18f00c7c0> assign[=] name[loc]
variable[payload] assign[=] call[name[dict], parameter[]]
return[call[name[Message].from_api, parameter[name[api]]]] | keyword[def] identifier[send_location] ( identifier[self] , identifier[loc] , identifier[to] , identifier[reply] = keyword[None] ):
literal[string]
identifier[lat] , identifier[lon] = identifier[loc]
identifier[payload] = identifier[dict] ( identifier[chat_id] = identifier[to] , identifier[reply_to_message_id] = identifier[reply] ,
identifier[latitude] = identifier[lat] , identifier[longitude] = identifier[lon] )
keyword[return] identifier[Message] . identifier[from_api] ( identifier[api] ,** identifier[self] . identifier[_get] ( literal[string] , identifier[payload] )) | def send_location(self, loc, to, reply=None):
"""
Use this method to send point on the map. On success, the sent Message is returned.
"""
(lat, lon) = loc
payload = dict(chat_id=to, reply_to_message_id=reply, latitude=lat, longitude=lon)
return Message.from_api(api, **self._get('sendLocation', payload)) |
def __load(self):
"""
Loads dynamically the class that acts like a namespace for constants.
"""
parts = self.__class_name.split('.')
module_name = ".".join(parts[:-1])
module = __import__(module_name)
modules = []
for comp in parts[1:]:
module = getattr(module, comp)
modules.append(module)
self.__module = modules[-2] | def function[__load, parameter[self]]:
constant[
Loads dynamically the class that acts like a namespace for constants.
]
variable[parts] assign[=] call[name[self].__class_name.split, parameter[constant[.]]]
variable[module_name] assign[=] call[constant[.].join, parameter[call[name[parts]][<ast.Slice object at 0x7da1b2345d50>]]]
variable[module] assign[=] call[name[__import__], parameter[name[module_name]]]
variable[modules] assign[=] list[[]]
for taget[name[comp]] in starred[call[name[parts]][<ast.Slice object at 0x7da18f810b50>]] begin[:]
variable[module] assign[=] call[name[getattr], parameter[name[module], name[comp]]]
call[name[modules].append, parameter[name[module]]]
name[self].__module assign[=] call[name[modules]][<ast.UnaryOp object at 0x7da20c990070>] | keyword[def] identifier[__load] ( identifier[self] ):
literal[string]
identifier[parts] = identifier[self] . identifier[__class_name] . identifier[split] ( literal[string] )
identifier[module_name] = literal[string] . identifier[join] ( identifier[parts] [:- literal[int] ])
identifier[module] = identifier[__import__] ( identifier[module_name] )
identifier[modules] =[]
keyword[for] identifier[comp] keyword[in] identifier[parts] [ literal[int] :]:
identifier[module] = identifier[getattr] ( identifier[module] , identifier[comp] )
identifier[modules] . identifier[append] ( identifier[module] )
identifier[self] . identifier[__module] = identifier[modules] [- literal[int] ] | def __load(self):
"""
Loads dynamically the class that acts like a namespace for constants.
"""
parts = self.__class_name.split('.')
module_name = '.'.join(parts[:-1])
module = __import__(module_name)
modules = []
for comp in parts[1:]:
module = getattr(module, comp)
modules.append(module) # depends on [control=['for'], data=['comp']]
self.__module = modules[-2] |
def child(self, **kwargs):
'''set childSelector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().child(**kwargs)
) | def function[child, parameter[self]]:
constant[set childSelector.]
return[call[name[AutomatorDeviceObject], parameter[name[self].device, call[call[name[self].selector.clone, parameter[]].child, parameter[]]]]] | keyword[def] identifier[child] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[AutomatorDeviceObject] (
identifier[self] . identifier[device] ,
identifier[self] . identifier[selector] . identifier[clone] (). identifier[child] (** identifier[kwargs] )
) | def child(self, **kwargs):
"""set childSelector."""
return AutomatorDeviceObject(self.device, self.selector.clone().child(**kwargs)) |
def print_trip_table(document):
""" Print trip table """
headers = [
'Alt.',
'Name',
'Time',
'Track',
'Direction',
'Dest.',
'Track',
'Arrival']
table = []
altnr = 0
for alternative in document:
altnr += 1
first_trip_in_alt = True
if not isinstance(alternative['Leg'], list):
alternative['Leg'] = [alternative['Leg']]
for part in alternative['Leg']:
orig = part['Origin']
dest = part['Destination']
row = [
altnr if first_trip_in_alt else None,
part['name'],
orig['rtTime'] if 'rtTime' in orig else orig['time'],
orig['track'],
part['direction'] if 'direction' in part else None,
dest['name'],
dest['track'],
dest['rtTime'] if 'rtTime' in dest else dest['time'],
]
table.append(row)
first_trip_in_alt = False
print(tabulate.tabulate(table, headers)) | def function[print_trip_table, parameter[document]]:
constant[ Print trip table ]
variable[headers] assign[=] list[[<ast.Constant object at 0x7da18c4cfaf0>, <ast.Constant object at 0x7da18c4cf8e0>, <ast.Constant object at 0x7da18c4cfe80>, <ast.Constant object at 0x7da18c4ccf70>, <ast.Constant object at 0x7da18c4cfb50>, <ast.Constant object at 0x7da18c4cdd80>, <ast.Constant object at 0x7da18c4cc220>, <ast.Constant object at 0x7da18c4ce590>]]
variable[table] assign[=] list[[]]
variable[altnr] assign[=] constant[0]
for taget[name[alternative]] in starred[name[document]] begin[:]
<ast.AugAssign object at 0x7da18c4cc2b0>
variable[first_trip_in_alt] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da18c4cc940> begin[:]
call[name[alternative]][constant[Leg]] assign[=] list[[<ast.Subscript object at 0x7da18c4cf850>]]
for taget[name[part]] in starred[call[name[alternative]][constant[Leg]]] begin[:]
variable[orig] assign[=] call[name[part]][constant[Origin]]
variable[dest] assign[=] call[name[part]][constant[Destination]]
variable[row] assign[=] list[[<ast.IfExp object at 0x7da18c4cc5b0>, <ast.Subscript object at 0x7da18c4ce050>, <ast.IfExp object at 0x7da18c4ce650>, <ast.Subscript object at 0x7da18c4cd360>, <ast.IfExp object at 0x7da18c4ce170>, <ast.Subscript object at 0x7da18c4cd6c0>, <ast.Subscript object at 0x7da18c4ced40>, <ast.IfExp object at 0x7da18c4ce1d0>]]
call[name[table].append, parameter[name[row]]]
variable[first_trip_in_alt] assign[=] constant[False]
call[name[print], parameter[call[name[tabulate].tabulate, parameter[name[table], name[headers]]]]] | keyword[def] identifier[print_trip_table] ( identifier[document] ):
literal[string]
identifier[headers] =[
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]
identifier[table] =[]
identifier[altnr] = literal[int]
keyword[for] identifier[alternative] keyword[in] identifier[document] :
identifier[altnr] += literal[int]
identifier[first_trip_in_alt] = keyword[True]
keyword[if] keyword[not] identifier[isinstance] ( identifier[alternative] [ literal[string] ], identifier[list] ):
identifier[alternative] [ literal[string] ]=[ identifier[alternative] [ literal[string] ]]
keyword[for] identifier[part] keyword[in] identifier[alternative] [ literal[string] ]:
identifier[orig] = identifier[part] [ literal[string] ]
identifier[dest] = identifier[part] [ literal[string] ]
identifier[row] =[
identifier[altnr] keyword[if] identifier[first_trip_in_alt] keyword[else] keyword[None] ,
identifier[part] [ literal[string] ],
identifier[orig] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[orig] keyword[else] identifier[orig] [ literal[string] ],
identifier[orig] [ literal[string] ],
identifier[part] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[part] keyword[else] keyword[None] ,
identifier[dest] [ literal[string] ],
identifier[dest] [ literal[string] ],
identifier[dest] [ literal[string] ] keyword[if] literal[string] keyword[in] identifier[dest] keyword[else] identifier[dest] [ literal[string] ],
]
identifier[table] . identifier[append] ( identifier[row] )
identifier[first_trip_in_alt] = keyword[False]
identifier[print] ( identifier[tabulate] . identifier[tabulate] ( identifier[table] , identifier[headers] )) | def print_trip_table(document):
""" Print trip table """
headers = ['Alt.', 'Name', 'Time', 'Track', 'Direction', 'Dest.', 'Track', 'Arrival']
table = []
altnr = 0
for alternative in document:
altnr += 1
first_trip_in_alt = True
if not isinstance(alternative['Leg'], list):
alternative['Leg'] = [alternative['Leg']] # depends on [control=['if'], data=[]]
for part in alternative['Leg']:
orig = part['Origin']
dest = part['Destination']
row = [altnr if first_trip_in_alt else None, part['name'], orig['rtTime'] if 'rtTime' in orig else orig['time'], orig['track'], part['direction'] if 'direction' in part else None, dest['name'], dest['track'], dest['rtTime'] if 'rtTime' in dest else dest['time']]
table.append(row)
first_trip_in_alt = False # depends on [control=['for'], data=['part']] # depends on [control=['for'], data=['alternative']]
print(tabulate.tabulate(table, headers)) |
def pst_prior(pst,logger=None, filename=None, **kwargs):
""" helper to plot prior parameter histograms implied by
parameter bounds. Saves a multipage pdf named <case>.prior.pdf
Parameters
----------
pst : pyemu.Pst
logger : pyemu.Logger
filename : str
PDF filename to save plots to. If None, return figs without saving. Default is None.
kwargs : dict
accepts 'grouper' as dict to group parameters on to a single axis (use
parameter groups if not passed),
'unqiue_only' to only show unique mean-stdev combinations within a
given group
Returns
-------
None
TODO
----
external parcov, unique mean-std pairs
"""
if logger is None:
logger=Logger('Default_Loggger.log',echo=False)
logger.log("plot pst_prior")
par = pst.parameter_data
if "parcov_filename" in pst.pestpp_options:
logger.warn("ignoring parcov_filename, using parameter bounds for prior cov")
logger.log("loading cov from parameter data")
cov = pyemu.Cov.from_parameter_data(pst)
logger.log("loading cov from parameter data")
logger.log("building mean parameter values")
li = par.partrans.loc[cov.names] == "log"
mean = par.parval1.loc[cov.names]
info = par.loc[cov.names,:].copy()
info.loc[:,"mean"] = mean
info.loc[li,"mean"] = mean[li].apply(np.log10)
logger.log("building mean parameter values")
logger.log("building stdev parameter values")
if cov.isdiagonal:
std = cov.x.flatten()
else:
std = np.diag(cov.x)
std = np.sqrt(std)
info.loc[:,"prior_std"] = std
logger.log("building stdev parameter values")
if std.shape != mean.shape:
logger.lraise("mean.shape {0} != std.shape {1}".
format(mean.shape,std.shape))
if "grouper" in kwargs:
raise NotImplementedError()
#check for consistency here
else:
par_adj = par.loc[par.partrans.apply(lambda x: x in ["log","none"]),:]
grouper = par_adj.groupby(par_adj.pargp).groups
#grouper = par.groupby(par.pargp).groups
if len(grouper) == 0:
raise Exception("no adustable parameters to plot")
fig = plt.figure(figsize=figsize)
if "fig_title" in kwargs:
plt.figtext(0.5,0.5,kwargs["fig_title"])
else:
plt.figtext(0.5,0.5,"pyemu.Pst.plot(kind='prior')\nfrom pest control file '{0}'\n at {1}"
.format(pst.filename,str(datetime.now())),ha="center")
figs = []
ax_count = 0
grps_names = list(grouper.keys())
grps_names.sort()
for g in grps_names:
names = grouper[g]
logger.log("plotting priors for {0}".
format(','.join(list(names))))
if ax_count % (nr * nc) == 0:
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
fig = plt.figure(figsize=figsize)
axes = get_page_axes()
ax_count = 0
islog = False
vc = info.partrans.value_counts()
if vc.shape[0] > 1:
logger.warn("mixed partrans for group {0}".format(g))
elif "log" in vc.index:
islog = True
ax = axes[ax_count]
if "unique_only" in kwargs and kwargs["unique_only"]:
ms = info.loc[names,:].apply(lambda x: (x["mean"],x["prior_std"]),axis=1).unique()
for (m,s) in ms:
x, y = gaussian_distribution(m, s)
ax.fill_between(x, 0, y, facecolor='0.5', alpha=0.5,
edgecolor="none")
else:
for m,s in zip(info.loc[names,'mean'],info.loc[names,'prior_std']):
x,y = gaussian_distribution(m,s)
ax.fill_between(x,0,y,facecolor='0.5',alpha=0.5,
edgecolor="none")
ax.set_title("{0}) group:{1}, {2} parameters".
format(abet[ax_count],g,names.shape[0]),loc="left")
ax.set_yticks([])
if islog:
ax.set_xlabel("$log_{10}$ parameter value",labelpad=0.1)
else:
ax.set_xlabel("parameter value", labelpad=0.1)
logger.log("plotting priors for {0}".
format(','.join(list(names))))
ax_count += 1
for a in range(ax_count,nr*nc):
axes[a].set_axis_off()
axes[a].set_yticks([])
axes[a].set_xticks([])
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
if filename is not None:
with PdfPages(filename) as pdf:
plt.tight_layout()
pdf.savefig(fig)
plt.close(fig)
logger.log("plot pst_prior")
else:
logger.log("plot pst_prior")
return figs | def function[pst_prior, parameter[pst, logger, filename]]:
constant[ helper to plot prior parameter histograms implied by
parameter bounds. Saves a multipage pdf named <case>.prior.pdf
Parameters
----------
pst : pyemu.Pst
logger : pyemu.Logger
filename : str
PDF filename to save plots to. If None, return figs without saving. Default is None.
kwargs : dict
accepts 'grouper' as dict to group parameters on to a single axis (use
parameter groups if not passed),
'unqiue_only' to only show unique mean-stdev combinations within a
given group
Returns
-------
None
TODO
----
external parcov, unique mean-std pairs
]
if compare[name[logger] is constant[None]] begin[:]
variable[logger] assign[=] call[name[Logger], parameter[constant[Default_Loggger.log]]]
call[name[logger].log, parameter[constant[plot pst_prior]]]
variable[par] assign[=] name[pst].parameter_data
if compare[constant[parcov_filename] in name[pst].pestpp_options] begin[:]
call[name[logger].warn, parameter[constant[ignoring parcov_filename, using parameter bounds for prior cov]]]
call[name[logger].log, parameter[constant[loading cov from parameter data]]]
variable[cov] assign[=] call[name[pyemu].Cov.from_parameter_data, parameter[name[pst]]]
call[name[logger].log, parameter[constant[loading cov from parameter data]]]
call[name[logger].log, parameter[constant[building mean parameter values]]]
variable[li] assign[=] compare[call[name[par].partrans.loc][name[cov].names] equal[==] constant[log]]
variable[mean] assign[=] call[name[par].parval1.loc][name[cov].names]
variable[info] assign[=] call[call[name[par].loc][tuple[[<ast.Attribute object at 0x7da1b1dad930>, <ast.Slice object at 0x7da1b1daf4c0>]]].copy, parameter[]]
call[name[info].loc][tuple[[<ast.Slice object at 0x7da1b1dadbd0>, <ast.Constant object at 0x7da1b1dadf90>]]] assign[=] name[mean]
call[name[info].loc][tuple[[<ast.Name object at 0x7da1b1dadfc0>, <ast.Constant object at 0x7da1b1daf0d0>]]] assign[=] call[call[name[mean]][name[li]].apply, parameter[name[np].log10]]
call[name[logger].log, parameter[constant[building mean parameter values]]]
call[name[logger].log, parameter[constant[building stdev parameter values]]]
if name[cov].isdiagonal begin[:]
variable[std] assign[=] call[name[cov].x.flatten, parameter[]]
variable[std] assign[=] call[name[np].sqrt, parameter[name[std]]]
call[name[info].loc][tuple[[<ast.Slice object at 0x7da1b1d28880>, <ast.Constant object at 0x7da1b1d28dc0>]]] assign[=] name[std]
call[name[logger].log, parameter[constant[building stdev parameter values]]]
if compare[name[std].shape not_equal[!=] name[mean].shape] begin[:]
call[name[logger].lraise, parameter[call[constant[mean.shape {0} != std.shape {1}].format, parameter[name[mean].shape, name[std].shape]]]]
if compare[constant[grouper] in name[kwargs]] begin[:]
<ast.Raise object at 0x7da1b1d28a90>
if compare[call[name[len], parameter[name[grouper]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1d28550>
variable[fig] assign[=] call[name[plt].figure, parameter[]]
if compare[constant[fig_title] in name[kwargs]] begin[:]
call[name[plt].figtext, parameter[constant[0.5], constant[0.5], call[name[kwargs]][constant[fig_title]]]]
variable[figs] assign[=] list[[]]
variable[ax_count] assign[=] constant[0]
variable[grps_names] assign[=] call[name[list], parameter[call[name[grouper].keys, parameter[]]]]
call[name[grps_names].sort, parameter[]]
for taget[name[g]] in starred[name[grps_names]] begin[:]
variable[names] assign[=] call[name[grouper]][name[g]]
call[name[logger].log, parameter[call[constant[plotting priors for {0}].format, parameter[call[constant[,].join, parameter[call[name[list], parameter[name[names]]]]]]]]]
if compare[binary_operation[name[ax_count] <ast.Mod object at 0x7da2590d6920> binary_operation[name[nr] * name[nc]]] equal[==] constant[0]] begin[:]
call[name[plt].tight_layout, parameter[]]
call[name[figs].append, parameter[name[fig]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[axes] assign[=] call[name[get_page_axes], parameter[]]
variable[ax_count] assign[=] constant[0]
variable[islog] assign[=] constant[False]
variable[vc] assign[=] call[name[info].partrans.value_counts, parameter[]]
if compare[call[name[vc].shape][constant[0]] greater[>] constant[1]] begin[:]
call[name[logger].warn, parameter[call[constant[mixed partrans for group {0}].format, parameter[name[g]]]]]
variable[ax] assign[=] call[name[axes]][name[ax_count]]
if <ast.BoolOp object at 0x7da1b1d6fcd0> begin[:]
variable[ms] assign[=] call[call[call[name[info].loc][tuple[[<ast.Name object at 0x7da1b1d6cd00>, <ast.Slice object at 0x7da1b1d6e680>]]].apply, parameter[<ast.Lambda object at 0x7da1b1d6ece0>]].unique, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1d6d900>, <ast.Name object at 0x7da1b1d6f010>]]] in starred[name[ms]] begin[:]
<ast.Tuple object at 0x7da1b1d6e860> assign[=] call[name[gaussian_distribution], parameter[name[m], name[s]]]
call[name[ax].fill_between, parameter[name[x], constant[0], name[y]]]
call[name[ax].set_title, parameter[call[constant[{0}) group:{1}, {2} parameters].format, parameter[call[name[abet]][name[ax_count]], name[g], call[name[names].shape][constant[0]]]]]]
call[name[ax].set_yticks, parameter[list[[]]]]
if name[islog] begin[:]
call[name[ax].set_xlabel, parameter[constant[$log_{10}$ parameter value]]]
call[name[logger].log, parameter[call[constant[plotting priors for {0}].format, parameter[call[constant[,].join, parameter[call[name[list], parameter[name[names]]]]]]]]]
<ast.AugAssign object at 0x7da1b1d46e90>
for taget[name[a]] in starred[call[name[range], parameter[name[ax_count], binary_operation[name[nr] * name[nc]]]]] begin[:]
call[call[name[axes]][name[a]].set_axis_off, parameter[]]
call[call[name[axes]][name[a]].set_yticks, parameter[list[[]]]]
call[call[name[axes]][name[a]].set_xticks, parameter[list[[]]]]
call[name[plt].tight_layout, parameter[]]
call[name[figs].append, parameter[name[fig]]]
if compare[name[filename] is_not constant[None]] begin[:]
with call[name[PdfPages], parameter[name[filename]]] begin[:]
call[name[plt].tight_layout, parameter[]]
call[name[pdf].savefig, parameter[name[fig]]]
call[name[plt].close, parameter[name[fig]]]
call[name[logger].log, parameter[constant[plot pst_prior]]] | keyword[def] identifier[pst_prior] ( identifier[pst] , identifier[logger] = keyword[None] , identifier[filename] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[logger] keyword[is] keyword[None] :
identifier[logger] = identifier[Logger] ( literal[string] , identifier[echo] = keyword[False] )
identifier[logger] . identifier[log] ( literal[string] )
identifier[par] = identifier[pst] . identifier[parameter_data]
keyword[if] literal[string] keyword[in] identifier[pst] . identifier[pestpp_options] :
identifier[logger] . identifier[warn] ( literal[string] )
identifier[logger] . identifier[log] ( literal[string] )
identifier[cov] = identifier[pyemu] . identifier[Cov] . identifier[from_parameter_data] ( identifier[pst] )
identifier[logger] . identifier[log] ( literal[string] )
identifier[logger] . identifier[log] ( literal[string] )
identifier[li] = identifier[par] . identifier[partrans] . identifier[loc] [ identifier[cov] . identifier[names] ]== literal[string]
identifier[mean] = identifier[par] . identifier[parval1] . identifier[loc] [ identifier[cov] . identifier[names] ]
identifier[info] = identifier[par] . identifier[loc] [ identifier[cov] . identifier[names] ,:]. identifier[copy] ()
identifier[info] . identifier[loc] [:, literal[string] ]= identifier[mean]
identifier[info] . identifier[loc] [ identifier[li] , literal[string] ]= identifier[mean] [ identifier[li] ]. identifier[apply] ( identifier[np] . identifier[log10] )
identifier[logger] . identifier[log] ( literal[string] )
identifier[logger] . identifier[log] ( literal[string] )
keyword[if] identifier[cov] . identifier[isdiagonal] :
identifier[std] = identifier[cov] . identifier[x] . identifier[flatten] ()
keyword[else] :
identifier[std] = identifier[np] . identifier[diag] ( identifier[cov] . identifier[x] )
identifier[std] = identifier[np] . identifier[sqrt] ( identifier[std] )
identifier[info] . identifier[loc] [:, literal[string] ]= identifier[std]
identifier[logger] . identifier[log] ( literal[string] )
keyword[if] identifier[std] . identifier[shape] != identifier[mean] . identifier[shape] :
identifier[logger] . identifier[lraise] ( literal[string] .
identifier[format] ( identifier[mean] . identifier[shape] , identifier[std] . identifier[shape] ))
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[raise] identifier[NotImplementedError] ()
keyword[else] :
identifier[par_adj] = identifier[par] . identifier[loc] [ identifier[par] . identifier[partrans] . identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] keyword[in] [ literal[string] , literal[string] ]),:]
identifier[grouper] = identifier[par_adj] . identifier[groupby] ( identifier[par_adj] . identifier[pargp] ). identifier[groups]
keyword[if] identifier[len] ( identifier[grouper] )== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[figsize] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[plt] . identifier[figtext] ( literal[int] , literal[int] , identifier[kwargs] [ literal[string] ])
keyword[else] :
identifier[plt] . identifier[figtext] ( literal[int] , literal[int] , literal[string]
. identifier[format] ( identifier[pst] . identifier[filename] , identifier[str] ( identifier[datetime] . identifier[now] ())), identifier[ha] = literal[string] )
identifier[figs] =[]
identifier[ax_count] = literal[int]
identifier[grps_names] = identifier[list] ( identifier[grouper] . identifier[keys] ())
identifier[grps_names] . identifier[sort] ()
keyword[for] identifier[g] keyword[in] identifier[grps_names] :
identifier[names] = identifier[grouper] [ identifier[g] ]
identifier[logger] . identifier[log] ( literal[string] .
identifier[format] ( literal[string] . identifier[join] ( identifier[list] ( identifier[names] ))))
keyword[if] identifier[ax_count] %( identifier[nr] * identifier[nc] )== literal[int] :
identifier[plt] . identifier[tight_layout] ()
identifier[figs] . identifier[append] ( identifier[fig] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[figsize] )
identifier[axes] = identifier[get_page_axes] ()
identifier[ax_count] = literal[int]
identifier[islog] = keyword[False]
identifier[vc] = identifier[info] . identifier[partrans] . identifier[value_counts] ()
keyword[if] identifier[vc] . identifier[shape] [ literal[int] ]> literal[int] :
identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[g] ))
keyword[elif] literal[string] keyword[in] identifier[vc] . identifier[index] :
identifier[islog] = keyword[True]
identifier[ax] = identifier[axes] [ identifier[ax_count] ]
keyword[if] literal[string] keyword[in] identifier[kwargs] keyword[and] identifier[kwargs] [ literal[string] ]:
identifier[ms] = identifier[info] . identifier[loc] [ identifier[names] ,:]. identifier[apply] ( keyword[lambda] identifier[x] :( identifier[x] [ literal[string] ], identifier[x] [ literal[string] ]), identifier[axis] = literal[int] ). identifier[unique] ()
keyword[for] ( identifier[m] , identifier[s] ) keyword[in] identifier[ms] :
identifier[x] , identifier[y] = identifier[gaussian_distribution] ( identifier[m] , identifier[s] )
identifier[ax] . identifier[fill_between] ( identifier[x] , literal[int] , identifier[y] , identifier[facecolor] = literal[string] , identifier[alpha] = literal[int] ,
identifier[edgecolor] = literal[string] )
keyword[else] :
keyword[for] identifier[m] , identifier[s] keyword[in] identifier[zip] ( identifier[info] . identifier[loc] [ identifier[names] , literal[string] ], identifier[info] . identifier[loc] [ identifier[names] , literal[string] ]):
identifier[x] , identifier[y] = identifier[gaussian_distribution] ( identifier[m] , identifier[s] )
identifier[ax] . identifier[fill_between] ( identifier[x] , literal[int] , identifier[y] , identifier[facecolor] = literal[string] , identifier[alpha] = literal[int] ,
identifier[edgecolor] = literal[string] )
identifier[ax] . identifier[set_title] ( literal[string] .
identifier[format] ( identifier[abet] [ identifier[ax_count] ], identifier[g] , identifier[names] . identifier[shape] [ literal[int] ]), identifier[loc] = literal[string] )
identifier[ax] . identifier[set_yticks] ([])
keyword[if] identifier[islog] :
identifier[ax] . identifier[set_xlabel] ( literal[string] , identifier[labelpad] = literal[int] )
keyword[else] :
identifier[ax] . identifier[set_xlabel] ( literal[string] , identifier[labelpad] = literal[int] )
identifier[logger] . identifier[log] ( literal[string] .
identifier[format] ( literal[string] . identifier[join] ( identifier[list] ( identifier[names] ))))
identifier[ax_count] += literal[int]
keyword[for] identifier[a] keyword[in] identifier[range] ( identifier[ax_count] , identifier[nr] * identifier[nc] ):
identifier[axes] [ identifier[a] ]. identifier[set_axis_off] ()
identifier[axes] [ identifier[a] ]. identifier[set_yticks] ([])
identifier[axes] [ identifier[a] ]. identifier[set_xticks] ([])
identifier[plt] . identifier[tight_layout] ()
identifier[figs] . identifier[append] ( identifier[fig] )
keyword[if] identifier[filename] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[PdfPages] ( identifier[filename] ) keyword[as] identifier[pdf] :
identifier[plt] . identifier[tight_layout] ()
identifier[pdf] . identifier[savefig] ( identifier[fig] )
identifier[plt] . identifier[close] ( identifier[fig] )
identifier[logger] . identifier[log] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[log] ( literal[string] )
keyword[return] identifier[figs] | def pst_prior(pst, logger=None, filename=None, **kwargs):
""" helper to plot prior parameter histograms implied by
parameter bounds. Saves a multipage pdf named <case>.prior.pdf
Parameters
----------
pst : pyemu.Pst
logger : pyemu.Logger
filename : str
PDF filename to save plots to. If None, return figs without saving. Default is None.
kwargs : dict
accepts 'grouper' as dict to group parameters on to a single axis (use
parameter groups if not passed),
'unqiue_only' to only show unique mean-stdev combinations within a
given group
Returns
-------
None
TODO
----
external parcov, unique mean-std pairs
"""
if logger is None:
logger = Logger('Default_Loggger.log', echo=False) # depends on [control=['if'], data=['logger']]
logger.log('plot pst_prior')
par = pst.parameter_data
if 'parcov_filename' in pst.pestpp_options:
logger.warn('ignoring parcov_filename, using parameter bounds for prior cov') # depends on [control=['if'], data=[]]
logger.log('loading cov from parameter data')
cov = pyemu.Cov.from_parameter_data(pst)
logger.log('loading cov from parameter data')
logger.log('building mean parameter values')
li = par.partrans.loc[cov.names] == 'log'
mean = par.parval1.loc[cov.names]
info = par.loc[cov.names, :].copy()
info.loc[:, 'mean'] = mean
info.loc[li, 'mean'] = mean[li].apply(np.log10)
logger.log('building mean parameter values')
logger.log('building stdev parameter values')
if cov.isdiagonal:
std = cov.x.flatten() # depends on [control=['if'], data=[]]
else:
std = np.diag(cov.x)
std = np.sqrt(std)
info.loc[:, 'prior_std'] = std
logger.log('building stdev parameter values')
if std.shape != mean.shape:
logger.lraise('mean.shape {0} != std.shape {1}'.format(mean.shape, std.shape)) # depends on [control=['if'], data=[]]
if 'grouper' in kwargs:
raise NotImplementedError() # depends on [control=['if'], data=[]]
else:
#check for consistency here
par_adj = par.loc[par.partrans.apply(lambda x: x in ['log', 'none']), :]
grouper = par_adj.groupby(par_adj.pargp).groups
#grouper = par.groupby(par.pargp).groups
if len(grouper) == 0:
raise Exception('no adustable parameters to plot') # depends on [control=['if'], data=[]]
fig = plt.figure(figsize=figsize)
if 'fig_title' in kwargs:
plt.figtext(0.5, 0.5, kwargs['fig_title']) # depends on [control=['if'], data=['kwargs']]
else:
plt.figtext(0.5, 0.5, "pyemu.Pst.plot(kind='prior')\nfrom pest control file '{0}'\n at {1}".format(pst.filename, str(datetime.now())), ha='center')
figs = []
ax_count = 0
grps_names = list(grouper.keys())
grps_names.sort()
for g in grps_names:
names = grouper[g]
logger.log('plotting priors for {0}'.format(','.join(list(names))))
if ax_count % (nr * nc) == 0:
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
fig = plt.figure(figsize=figsize)
axes = get_page_axes()
ax_count = 0 # depends on [control=['if'], data=[]]
islog = False
vc = info.partrans.value_counts()
if vc.shape[0] > 1:
logger.warn('mixed partrans for group {0}'.format(g)) # depends on [control=['if'], data=[]]
elif 'log' in vc.index:
islog = True # depends on [control=['if'], data=[]]
ax = axes[ax_count]
if 'unique_only' in kwargs and kwargs['unique_only']:
ms = info.loc[names, :].apply(lambda x: (x['mean'], x['prior_std']), axis=1).unique()
for (m, s) in ms:
(x, y) = gaussian_distribution(m, s)
ax.fill_between(x, 0, y, facecolor='0.5', alpha=0.5, edgecolor='none') # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for (m, s) in zip(info.loc[names, 'mean'], info.loc[names, 'prior_std']):
(x, y) = gaussian_distribution(m, s)
ax.fill_between(x, 0, y, facecolor='0.5', alpha=0.5, edgecolor='none') # depends on [control=['for'], data=[]]
ax.set_title('{0}) group:{1}, {2} parameters'.format(abet[ax_count], g, names.shape[0]), loc='left')
ax.set_yticks([])
if islog:
ax.set_xlabel('$log_{10}$ parameter value', labelpad=0.1) # depends on [control=['if'], data=[]]
else:
ax.set_xlabel('parameter value', labelpad=0.1)
logger.log('plotting priors for {0}'.format(','.join(list(names))))
ax_count += 1 # depends on [control=['for'], data=['g']]
for a in range(ax_count, nr * nc):
axes[a].set_axis_off()
axes[a].set_yticks([])
axes[a].set_xticks([]) # depends on [control=['for'], data=['a']]
plt.tight_layout()
#pdf.savefig()
#plt.close(fig)
figs.append(fig)
if filename is not None:
with PdfPages(filename) as pdf:
plt.tight_layout()
pdf.savefig(fig)
plt.close(fig) # depends on [control=['with'], data=['pdf']]
logger.log('plot pst_prior') # depends on [control=['if'], data=['filename']]
else:
logger.log('plot pst_prior')
return figs |
def generate_menu(self, ass, text, path=None, level=0):
"""
Function generates menu from based on ass parameter
"""
menu = self.create_menu()
for index, sub in enumerate(sorted(ass[1], key=lambda y: y[0].fullname.lower())):
if index != 0:
text += "|"
text += "- " + sub[0].fullname
new_path = list(path)
if level == 0:
new_path.append(ass[0].name)
new_path.append(sub[0].name)
menu_item = self.menu_item(sub, new_path)
if sub[1]:
# If assistant has subassistants
(sub_menu, txt) = self.generate_menu(sub, text, new_path, level=level + 1)
menu_item.set_submenu(sub_menu)
menu.append(menu_item)
return menu, text | def function[generate_menu, parameter[self, ass, text, path, level]]:
constant[
Function generates menu from based on ass parameter
]
variable[menu] assign[=] call[name[self].create_menu, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0f0d7b0>, <ast.Name object at 0x7da1b0f0f1f0>]]] in starred[call[name[enumerate], parameter[call[name[sorted], parameter[call[name[ass]][constant[1]]]]]]] begin[:]
if compare[name[index] not_equal[!=] constant[0]] begin[:]
<ast.AugAssign object at 0x7da1b0f0ecb0>
<ast.AugAssign object at 0x7da1b0f0dd80>
variable[new_path] assign[=] call[name[list], parameter[name[path]]]
if compare[name[level] equal[==] constant[0]] begin[:]
call[name[new_path].append, parameter[call[name[ass]][constant[0]].name]]
call[name[new_path].append, parameter[call[name[sub]][constant[0]].name]]
variable[menu_item] assign[=] call[name[self].menu_item, parameter[name[sub], name[new_path]]]
if call[name[sub]][constant[1]] begin[:]
<ast.Tuple object at 0x7da1b0f0c310> assign[=] call[name[self].generate_menu, parameter[name[sub], name[text], name[new_path]]]
call[name[menu_item].set_submenu, parameter[name[sub_menu]]]
call[name[menu].append, parameter[name[menu_item]]]
return[tuple[[<ast.Name object at 0x7da1b0f0d450>, <ast.Name object at 0x7da1b0f0cc40>]]] | keyword[def] identifier[generate_menu] ( identifier[self] , identifier[ass] , identifier[text] , identifier[path] = keyword[None] , identifier[level] = literal[int] ):
literal[string]
identifier[menu] = identifier[self] . identifier[create_menu] ()
keyword[for] identifier[index] , identifier[sub] keyword[in] identifier[enumerate] ( identifier[sorted] ( identifier[ass] [ literal[int] ], identifier[key] = keyword[lambda] identifier[y] : identifier[y] [ literal[int] ]. identifier[fullname] . identifier[lower] ())):
keyword[if] identifier[index] != literal[int] :
identifier[text] += literal[string]
identifier[text] += literal[string] + identifier[sub] [ literal[int] ]. identifier[fullname]
identifier[new_path] = identifier[list] ( identifier[path] )
keyword[if] identifier[level] == literal[int] :
identifier[new_path] . identifier[append] ( identifier[ass] [ literal[int] ]. identifier[name] )
identifier[new_path] . identifier[append] ( identifier[sub] [ literal[int] ]. identifier[name] )
identifier[menu_item] = identifier[self] . identifier[menu_item] ( identifier[sub] , identifier[new_path] )
keyword[if] identifier[sub] [ literal[int] ]:
( identifier[sub_menu] , identifier[txt] )= identifier[self] . identifier[generate_menu] ( identifier[sub] , identifier[text] , identifier[new_path] , identifier[level] = identifier[level] + literal[int] )
identifier[menu_item] . identifier[set_submenu] ( identifier[sub_menu] )
identifier[menu] . identifier[append] ( identifier[menu_item] )
keyword[return] identifier[menu] , identifier[text] | def generate_menu(self, ass, text, path=None, level=0):
"""
Function generates menu from based on ass parameter
"""
menu = self.create_menu()
for (index, sub) in enumerate(sorted(ass[1], key=lambda y: y[0].fullname.lower())):
if index != 0:
text += '|' # depends on [control=['if'], data=[]]
text += '- ' + sub[0].fullname
new_path = list(path)
if level == 0:
new_path.append(ass[0].name) # depends on [control=['if'], data=[]]
new_path.append(sub[0].name)
menu_item = self.menu_item(sub, new_path)
if sub[1]:
# If assistant has subassistants
(sub_menu, txt) = self.generate_menu(sub, text, new_path, level=level + 1)
menu_item.set_submenu(sub_menu) # depends on [control=['if'], data=[]]
menu.append(menu_item) # depends on [control=['for'], data=[]]
return (menu, text) |
def GetServiceVersions(namespace):
"""
Get all the versions for the service with specified namespace (partially) ordered
by compatibility (i.e. any version in the list that is compatible with some version
v in the list will preceed v)
"""
def compare(a, b):
if a == b:
return 0
if b in parentMap[a]:
return -1
if a in parentMap[b]:
return 1
return (a > b) - (a < b)
if PY3:
return sorted([v for (v, n) in iteritems(serviceNsMap) if n == namespace],
key=cmp_to_key(compare))
else:
return sorted([v for (v, n) in iteritems(serviceNsMap) if n == namespace],
compare) | def function[GetServiceVersions, parameter[namespace]]:
constant[
Get all the versions for the service with specified namespace (partially) ordered
by compatibility (i.e. any version in the list that is compatible with some version
v in the list will preceed v)
]
def function[compare, parameter[a, b]]:
if compare[name[a] equal[==] name[b]] begin[:]
return[constant[0]]
if compare[name[b] in call[name[parentMap]][name[a]]] begin[:]
return[<ast.UnaryOp object at 0x7da2041d9e40>]
if compare[name[a] in call[name[parentMap]][name[b]]] begin[:]
return[constant[1]]
return[binary_operation[compare[name[a] greater[>] name[b]] - compare[name[a] less[<] name[b]]]]
if name[PY3] begin[:]
return[call[name[sorted], parameter[<ast.ListComp object at 0x7da2041d8a90>]]] | keyword[def] identifier[GetServiceVersions] ( identifier[namespace] ):
literal[string]
keyword[def] identifier[compare] ( identifier[a] , identifier[b] ):
keyword[if] identifier[a] == identifier[b] :
keyword[return] literal[int]
keyword[if] identifier[b] keyword[in] identifier[parentMap] [ identifier[a] ]:
keyword[return] - literal[int]
keyword[if] identifier[a] keyword[in] identifier[parentMap] [ identifier[b] ]:
keyword[return] literal[int]
keyword[return] ( identifier[a] > identifier[b] )-( identifier[a] < identifier[b] )
keyword[if] identifier[PY3] :
keyword[return] identifier[sorted] ([ identifier[v] keyword[for] ( identifier[v] , identifier[n] ) keyword[in] identifier[iteritems] ( identifier[serviceNsMap] ) keyword[if] identifier[n] == identifier[namespace] ],
identifier[key] = identifier[cmp_to_key] ( identifier[compare] ))
keyword[else] :
keyword[return] identifier[sorted] ([ identifier[v] keyword[for] ( identifier[v] , identifier[n] ) keyword[in] identifier[iteritems] ( identifier[serviceNsMap] ) keyword[if] identifier[n] == identifier[namespace] ],
identifier[compare] ) | def GetServiceVersions(namespace):
"""
Get all the versions for the service with specified namespace (partially) ordered
by compatibility (i.e. any version in the list that is compatible with some version
v in the list will preceed v)
"""
def compare(a, b):
if a == b:
return 0 # depends on [control=['if'], data=[]]
if b in parentMap[a]:
return -1 # depends on [control=['if'], data=[]]
if a in parentMap[b]:
return 1 # depends on [control=['if'], data=[]]
return (a > b) - (a < b)
if PY3:
return sorted([v for (v, n) in iteritems(serviceNsMap) if n == namespace], key=cmp_to_key(compare)) # depends on [control=['if'], data=[]]
else:
return sorted([v for (v, n) in iteritems(serviceNsMap) if n == namespace], compare) |
def post_register_hook(self, verbosity=1):
"""Pull Docker images needed by processes after registering."""
if not getattr(settings, 'FLOW_DOCKER_DONT_PULL', False):
call_command('list_docker_images', pull=True, verbosity=verbosity) | def function[post_register_hook, parameter[self, verbosity]]:
constant[Pull Docker images needed by processes after registering.]
if <ast.UnaryOp object at 0x7da18bccb8b0> begin[:]
call[name[call_command], parameter[constant[list_docker_images]]] | keyword[def] identifier[post_register_hook] ( identifier[self] , identifier[verbosity] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ):
identifier[call_command] ( literal[string] , identifier[pull] = keyword[True] , identifier[verbosity] = identifier[verbosity] ) | def post_register_hook(self, verbosity=1):
"""Pull Docker images needed by processes after registering."""
if not getattr(settings, 'FLOW_DOCKER_DONT_PULL', False):
call_command('list_docker_images', pull=True, verbosity=verbosity) # depends on [control=['if'], data=[]] |
def build(self) -> type:
"""
Build a subclass of `MappingJSONEncoder`.
:return: the built subclass
"""
def _get_property_mappings(encoder: MappingJSONEncoder) -> List[JsonPropertyMapping]:
return _get_all_property_mappings(encoder, self.mappings, self.superclasses)
def get_serializable_cls(encoder: MappingJSONEncoder) -> type:
return self.target_cls
def default(encoder: MappingJSONEncoder, serializable):
if serializable is None:
# Fix for #18
return None
elif isinstance(serializable, List):
# Fix for #8
return [encoder.default(item) for item in serializable]
else:
# Sort subclasses so subclass' default method is called last
superclasses_as_list = list(self.superclasses)
superclasses_as_list.sort(key=lambda superclass: 1 if superclass == MappingJSONEncoder else -1)
encoded_combined = {}
for superclass in superclasses_as_list:
encoded = superclass.default(encoder, serializable)
encoded_combined.update(encoded)
return encoded_combined
return type(
"%sDynamicMappingJSONEncoder" % self.target_cls.__name__,
self.superclasses,
{
"_get_property_mappings": _get_property_mappings,
"_get_serializable_cls": get_serializable_cls,
"default": default
}
) | def function[build, parameter[self]]:
constant[
Build a subclass of `MappingJSONEncoder`.
:return: the built subclass
]
def function[_get_property_mappings, parameter[encoder]]:
return[call[name[_get_all_property_mappings], parameter[name[encoder], name[self].mappings, name[self].superclasses]]]
def function[get_serializable_cls, parameter[encoder]]:
return[name[self].target_cls]
def function[default, parameter[encoder, serializable]]:
if compare[name[serializable] is constant[None]] begin[:]
return[constant[None]]
return[call[name[type], parameter[binary_operation[constant[%sDynamicMappingJSONEncoder] <ast.Mod object at 0x7da2590d6920> name[self].target_cls.__name__], name[self].superclasses, dictionary[[<ast.Constant object at 0x7da1b255e350>, <ast.Constant object at 0x7da1b255eb60>, <ast.Constant object at 0x7da1b255e050>], [<ast.Name object at 0x7da1b255c370>, <ast.Name object at 0x7da1b255e800>, <ast.Name object at 0x7da1b255c3a0>]]]]] | keyword[def] identifier[build] ( identifier[self] )-> identifier[type] :
literal[string]
keyword[def] identifier[_get_property_mappings] ( identifier[encoder] : identifier[MappingJSONEncoder] )-> identifier[List] [ identifier[JsonPropertyMapping] ]:
keyword[return] identifier[_get_all_property_mappings] ( identifier[encoder] , identifier[self] . identifier[mappings] , identifier[self] . identifier[superclasses] )
keyword[def] identifier[get_serializable_cls] ( identifier[encoder] : identifier[MappingJSONEncoder] )-> identifier[type] :
keyword[return] identifier[self] . identifier[target_cls]
keyword[def] identifier[default] ( identifier[encoder] : identifier[MappingJSONEncoder] , identifier[serializable] ):
keyword[if] identifier[serializable] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[elif] identifier[isinstance] ( identifier[serializable] , identifier[List] ):
keyword[return] [ identifier[encoder] . identifier[default] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[serializable] ]
keyword[else] :
identifier[superclasses_as_list] = identifier[list] ( identifier[self] . identifier[superclasses] )
identifier[superclasses_as_list] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[superclass] : literal[int] keyword[if] identifier[superclass] == identifier[MappingJSONEncoder] keyword[else] - literal[int] )
identifier[encoded_combined] ={}
keyword[for] identifier[superclass] keyword[in] identifier[superclasses_as_list] :
identifier[encoded] = identifier[superclass] . identifier[default] ( identifier[encoder] , identifier[serializable] )
identifier[encoded_combined] . identifier[update] ( identifier[encoded] )
keyword[return] identifier[encoded_combined]
keyword[return] identifier[type] (
literal[string] % identifier[self] . identifier[target_cls] . identifier[__name__] ,
identifier[self] . identifier[superclasses] ,
{
literal[string] : identifier[_get_property_mappings] ,
literal[string] : identifier[get_serializable_cls] ,
literal[string] : identifier[default]
}
) | def build(self) -> type:
"""
Build a subclass of `MappingJSONEncoder`.
:return: the built subclass
"""
def _get_property_mappings(encoder: MappingJSONEncoder) -> List[JsonPropertyMapping]:
return _get_all_property_mappings(encoder, self.mappings, self.superclasses)
def get_serializable_cls(encoder: MappingJSONEncoder) -> type:
return self.target_cls
def default(encoder: MappingJSONEncoder, serializable):
if serializable is None:
# Fix for #18
return None # depends on [control=['if'], data=[]]
elif isinstance(serializable, List):
# Fix for #8
return [encoder.default(item) for item in serializable] # depends on [control=['if'], data=[]]
else:
# Sort subclasses so subclass' default method is called last
superclasses_as_list = list(self.superclasses)
superclasses_as_list.sort(key=lambda superclass: 1 if superclass == MappingJSONEncoder else -1)
encoded_combined = {}
for superclass in superclasses_as_list:
encoded = superclass.default(encoder, serializable)
encoded_combined.update(encoded) # depends on [control=['for'], data=['superclass']]
return encoded_combined
return type('%sDynamicMappingJSONEncoder' % self.target_cls.__name__, self.superclasses, {'_get_property_mappings': _get_property_mappings, '_get_serializable_cls': get_serializable_cls, 'default': default}) |
def convertIndexToState(index):
"""Convert transition probability matrix index to state parameters.
Parameters
----------
index : int
The index into the transition probability matrix that corresponds to
the state parameters.
Returns
-------
population, fire : tuple of int
``population``, the population abundance class of the threatened
species. ``fire``, the time in years since last fire.
"""
assert index < STATES
population = index // FIRE_CLASSES
fire = index % FIRE_CLASSES
return(population, fire) | def function[convertIndexToState, parameter[index]]:
constant[Convert transition probability matrix index to state parameters.
Parameters
----------
index : int
The index into the transition probability matrix that corresponds to
the state parameters.
Returns
-------
population, fire : tuple of int
``population``, the population abundance class of the threatened
species. ``fire``, the time in years since last fire.
]
assert[compare[name[index] less[<] name[STATES]]]
variable[population] assign[=] binary_operation[name[index] <ast.FloorDiv object at 0x7da2590d6bc0> name[FIRE_CLASSES]]
variable[fire] assign[=] binary_operation[name[index] <ast.Mod object at 0x7da2590d6920> name[FIRE_CLASSES]]
return[tuple[[<ast.Name object at 0x7da18f09fe50>, <ast.Name object at 0x7da18f09f9d0>]]] | keyword[def] identifier[convertIndexToState] ( identifier[index] ):
literal[string]
keyword[assert] identifier[index] < identifier[STATES]
identifier[population] = identifier[index] // identifier[FIRE_CLASSES]
identifier[fire] = identifier[index] % identifier[FIRE_CLASSES]
keyword[return] ( identifier[population] , identifier[fire] ) | def convertIndexToState(index):
"""Convert transition probability matrix index to state parameters.
Parameters
----------
index : int
The index into the transition probability matrix that corresponds to
the state parameters.
Returns
-------
population, fire : tuple of int
``population``, the population abundance class of the threatened
species. ``fire``, the time in years since last fire.
"""
assert index < STATES
population = index // FIRE_CLASSES
fire = index % FIRE_CLASSES
return (population, fire) |
def __flip(self, sliceimg):
"""
Flip if asked in self.flipV or self.flipH
:param sliceimg: one image slice
:return: flipp
"""
if self.flipH:
sliceimg = sliceimg[:, -1:0:-1]
if self.flipV:
sliceimg = sliceimg [-1:0:-1,:]
return sliceimg | def function[__flip, parameter[self, sliceimg]]:
constant[
Flip if asked in self.flipV or self.flipH
:param sliceimg: one image slice
:return: flipp
]
if name[self].flipH begin[:]
variable[sliceimg] assign[=] call[name[sliceimg]][tuple[[<ast.Slice object at 0x7da1b2715ba0>, <ast.Slice object at 0x7da1b27146d0>]]]
if name[self].flipV begin[:]
variable[sliceimg] assign[=] call[name[sliceimg]][tuple[[<ast.Slice object at 0x7da1b2714a30>, <ast.Slice object at 0x7da1b2715030>]]]
return[name[sliceimg]] | keyword[def] identifier[__flip] ( identifier[self] , identifier[sliceimg] ):
literal[string]
keyword[if] identifier[self] . identifier[flipH] :
identifier[sliceimg] = identifier[sliceimg] [:,- literal[int] : literal[int] :- literal[int] ]
keyword[if] identifier[self] . identifier[flipV] :
identifier[sliceimg] = identifier[sliceimg] [- literal[int] : literal[int] :- literal[int] ,:]
keyword[return] identifier[sliceimg] | def __flip(self, sliceimg):
"""
Flip if asked in self.flipV or self.flipH
:param sliceimg: one image slice
:return: flipp
"""
if self.flipH:
sliceimg = sliceimg[:, -1:0:-1] # depends on [control=['if'], data=[]]
if self.flipV:
sliceimg = sliceimg[-1:0:-1, :] # depends on [control=['if'], data=[]]
return sliceimg |
def get_vertices_to_edges_matrix(self, want_xyz=True):
"""Returns a matrix M, which if multiplied by vertices,
gives back edges (so "e = M.dot(v)"). Note that this generates
one edge per edge, *not* two edges per triangle.
Args:
want_xyz: if true, takes and returns xyz coordinates, otherwise
takes and returns x *or* y *or* z coordinates
"""
import numpy as np
import scipy.sparse as sp
vpe = np.asarray(self.vertices_per_edge, dtype=np.int32)
IS = np.repeat(np.arange(len(vpe)), 2)
JS = vpe.flatten()
data = np.ones_like(vpe)
data[:, 1] = -1
data = data.flatten()
if want_xyz:
IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
data = np.concatenate((data, data, data))
ij = np.vstack((IS.flatten(), JS.flatten()))
return sp.csc_matrix((data, ij)) | def function[get_vertices_to_edges_matrix, parameter[self, want_xyz]]:
constant[Returns a matrix M, which if multiplied by vertices,
gives back edges (so "e = M.dot(v)"). Note that this generates
one edge per edge, *not* two edges per triangle.
Args:
want_xyz: if true, takes and returns xyz coordinates, otherwise
takes and returns x *or* y *or* z coordinates
]
import module[numpy] as alias[np]
import module[scipy.sparse] as alias[sp]
variable[vpe] assign[=] call[name[np].asarray, parameter[name[self].vertices_per_edge]]
variable[IS] assign[=] call[name[np].repeat, parameter[call[name[np].arange, parameter[call[name[len], parameter[name[vpe]]]]], constant[2]]]
variable[JS] assign[=] call[name[vpe].flatten, parameter[]]
variable[data] assign[=] call[name[np].ones_like, parameter[name[vpe]]]
call[name[data]][tuple[[<ast.Slice object at 0x7da1b26adf90>, <ast.Constant object at 0x7da1b26af880>]]] assign[=] <ast.UnaryOp object at 0x7da1b26ad4b0>
variable[data] assign[=] call[name[data].flatten, parameter[]]
if name[want_xyz] begin[:]
variable[IS] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.BinOp object at 0x7da1b26aff40>, <ast.BinOp object at 0x7da1b26ac0d0>, <ast.BinOp object at 0x7da1b26ae620>]]]]
variable[JS] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.BinOp object at 0x7da1b26adb40>, <ast.BinOp object at 0x7da1b26af5b0>, <ast.BinOp object at 0x7da1b26add80>]]]]
variable[data] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Name object at 0x7da20c6c40a0>, <ast.Name object at 0x7da20c6c7c10>, <ast.Name object at 0x7da20c6c6b90>]]]]
variable[ij] assign[=] call[name[np].vstack, parameter[tuple[[<ast.Call object at 0x7da20c6c4700>, <ast.Call object at 0x7da20c6c5870>]]]]
return[call[name[sp].csc_matrix, parameter[tuple[[<ast.Name object at 0x7da1b26aee90>, <ast.Name object at 0x7da1b26af970>]]]]] | keyword[def] identifier[get_vertices_to_edges_matrix] ( identifier[self] , identifier[want_xyz] = keyword[True] ):
literal[string]
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[import] identifier[scipy] . identifier[sparse] keyword[as] identifier[sp]
identifier[vpe] = identifier[np] . identifier[asarray] ( identifier[self] . identifier[vertices_per_edge] , identifier[dtype] = identifier[np] . identifier[int32] )
identifier[IS] = identifier[np] . identifier[repeat] ( identifier[np] . identifier[arange] ( identifier[len] ( identifier[vpe] )), literal[int] )
identifier[JS] = identifier[vpe] . identifier[flatten] ()
identifier[data] = identifier[np] . identifier[ones_like] ( identifier[vpe] )
identifier[data] [:, literal[int] ]=- literal[int]
identifier[data] = identifier[data] . identifier[flatten] ()
keyword[if] identifier[want_xyz] :
identifier[IS] = identifier[np] . identifier[concatenate] (( identifier[IS] * literal[int] , identifier[IS] * literal[int] + literal[int] , identifier[IS] * literal[int] + literal[int] ))
identifier[JS] = identifier[np] . identifier[concatenate] (( identifier[JS] * literal[int] , identifier[JS] * literal[int] + literal[int] , identifier[JS] * literal[int] + literal[int] ))
identifier[data] = identifier[np] . identifier[concatenate] (( identifier[data] , identifier[data] , identifier[data] ))
identifier[ij] = identifier[np] . identifier[vstack] (( identifier[IS] . identifier[flatten] (), identifier[JS] . identifier[flatten] ()))
keyword[return] identifier[sp] . identifier[csc_matrix] (( identifier[data] , identifier[ij] )) | def get_vertices_to_edges_matrix(self, want_xyz=True):
"""Returns a matrix M, which if multiplied by vertices,
gives back edges (so "e = M.dot(v)"). Note that this generates
one edge per edge, *not* two edges per triangle.
Args:
want_xyz: if true, takes and returns xyz coordinates, otherwise
takes and returns x *or* y *or* z coordinates
"""
import numpy as np
import scipy.sparse as sp
vpe = np.asarray(self.vertices_per_edge, dtype=np.int32)
IS = np.repeat(np.arange(len(vpe)), 2)
JS = vpe.flatten()
data = np.ones_like(vpe)
data[:, 1] = -1
data = data.flatten()
if want_xyz:
IS = np.concatenate((IS * 3, IS * 3 + 1, IS * 3 + 2))
JS = np.concatenate((JS * 3, JS * 3 + 1, JS * 3 + 2))
data = np.concatenate((data, data, data)) # depends on [control=['if'], data=[]]
ij = np.vstack((IS.flatten(), JS.flatten()))
return sp.csc_matrix((data, ij)) |
def generate_func_code_block(self, definition, variable, variable_name, clear_variables=False):
"""
Creates validation rules for current definition.
"""
backup = self._definition, self._variable, self._variable_name
self._definition, self._variable, self._variable_name = definition, variable, variable_name
if clear_variables:
backup_variables = self._variables
self._variables = set()
self._generate_func_code_block(definition)
self._definition, self._variable, self._variable_name = backup
if clear_variables:
self._variables = backup_variables | def function[generate_func_code_block, parameter[self, definition, variable, variable_name, clear_variables]]:
constant[
Creates validation rules for current definition.
]
variable[backup] assign[=] tuple[[<ast.Attribute object at 0x7da207f9b490>, <ast.Attribute object at 0x7da207f999c0>, <ast.Attribute object at 0x7da207f99f30>]]
<ast.Tuple object at 0x7da207f98850> assign[=] tuple[[<ast.Name object at 0x7da207f9bac0>, <ast.Name object at 0x7da207f9b0a0>, <ast.Name object at 0x7da207f9bb20>]]
if name[clear_variables] begin[:]
variable[backup_variables] assign[=] name[self]._variables
name[self]._variables assign[=] call[name[set], parameter[]]
call[name[self]._generate_func_code_block, parameter[name[definition]]]
<ast.Tuple object at 0x7da207f9a710> assign[=] name[backup]
if name[clear_variables] begin[:]
name[self]._variables assign[=] name[backup_variables] | keyword[def] identifier[generate_func_code_block] ( identifier[self] , identifier[definition] , identifier[variable] , identifier[variable_name] , identifier[clear_variables] = keyword[False] ):
literal[string]
identifier[backup] = identifier[self] . identifier[_definition] , identifier[self] . identifier[_variable] , identifier[self] . identifier[_variable_name]
identifier[self] . identifier[_definition] , identifier[self] . identifier[_variable] , identifier[self] . identifier[_variable_name] = identifier[definition] , identifier[variable] , identifier[variable_name]
keyword[if] identifier[clear_variables] :
identifier[backup_variables] = identifier[self] . identifier[_variables]
identifier[self] . identifier[_variables] = identifier[set] ()
identifier[self] . identifier[_generate_func_code_block] ( identifier[definition] )
identifier[self] . identifier[_definition] , identifier[self] . identifier[_variable] , identifier[self] . identifier[_variable_name] = identifier[backup]
keyword[if] identifier[clear_variables] :
identifier[self] . identifier[_variables] = identifier[backup_variables] | def generate_func_code_block(self, definition, variable, variable_name, clear_variables=False):
"""
Creates validation rules for current definition.
"""
backup = (self._definition, self._variable, self._variable_name)
(self._definition, self._variable, self._variable_name) = (definition, variable, variable_name)
if clear_variables:
backup_variables = self._variables
self._variables = set() # depends on [control=['if'], data=[]]
self._generate_func_code_block(definition)
(self._definition, self._variable, self._variable_name) = backup
if clear_variables:
self._variables = backup_variables # depends on [control=['if'], data=[]] |
def get_injuries_by_team(self, season, week, team_id):
"""
Injuries by week and team
"""
result = self._method_call("Injuries/{season}/{week}/{team_id}", "stats", season=season, week=week, team_id=team_id)
return result | def function[get_injuries_by_team, parameter[self, season, week, team_id]]:
constant[
Injuries by week and team
]
variable[result] assign[=] call[name[self]._method_call, parameter[constant[Injuries/{season}/{week}/{team_id}], constant[stats]]]
return[name[result]] | keyword[def] identifier[get_injuries_by_team] ( identifier[self] , identifier[season] , identifier[week] , identifier[team_id] ):
literal[string]
identifier[result] = identifier[self] . identifier[_method_call] ( literal[string] , literal[string] , identifier[season] = identifier[season] , identifier[week] = identifier[week] , identifier[team_id] = identifier[team_id] )
keyword[return] identifier[result] | def get_injuries_by_team(self, season, week, team_id):
"""
Injuries by week and team
"""
result = self._method_call('Injuries/{season}/{week}/{team_id}', 'stats', season=season, week=week, team_id=team_id)
return result |
def get_argument_topology(self):
"""
Helper function to get topology argument.
Raises exception if argument is missing.
Returns the topology argument.
"""
try:
topology = self.get_argument(constants.PARAM_TOPOLOGY)
return topology
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) | def function[get_argument_topology, parameter[self]]:
constant[
Helper function to get topology argument.
Raises exception if argument is missing.
Returns the topology argument.
]
<ast.Try object at 0x7da20c76dbd0> | keyword[def] identifier[get_argument_topology] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[topology] = identifier[self] . identifier[get_argument] ( identifier[constants] . identifier[PARAM_TOPOLOGY] )
keyword[return] identifier[topology]
keyword[except] identifier[tornado] . identifier[web] . identifier[MissingArgumentError] keyword[as] identifier[e] :
keyword[raise] identifier[Exception] ( identifier[e] . identifier[log_message] ) | def get_argument_topology(self):
"""
Helper function to get topology argument.
Raises exception if argument is missing.
Returns the topology argument.
"""
try:
topology = self.get_argument(constants.PARAM_TOPOLOGY)
return topology # depends on [control=['try'], data=[]]
except tornado.web.MissingArgumentError as e:
raise Exception(e.log_message) # depends on [control=['except'], data=['e']] |
def build_policy(self, name, statements, roles, is_managed_policy=False):
"""
Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy
"""
if is_managed_policy:
policy = ManagedPolicy(
self.name_strip(name, True),
PolicyDocument={
"Version": self.VERSION_IAM,
"Statement": statements,
},
Roles=roles,
Path=self.__role_path,
)
else:
policy = PolicyType(
self.name_strip(name, True),
PolicyName=self.name_strip(name, True),
PolicyDocument={
"Version": self.VERSION_IAM,
"Statement": statements,
},
Roles=roles,
)
self.__template.add_resource(policy)
return policy | def function[build_policy, parameter[self, name, statements, roles, is_managed_policy]]:
constant[
Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy
]
if name[is_managed_policy] begin[:]
variable[policy] assign[=] call[name[ManagedPolicy], parameter[call[name[self].name_strip, parameter[name[name], constant[True]]]]]
call[name[self].__template.add_resource, parameter[name[policy]]]
return[name[policy]] | keyword[def] identifier[build_policy] ( identifier[self] , identifier[name] , identifier[statements] , identifier[roles] , identifier[is_managed_policy] = keyword[False] ):
literal[string]
keyword[if] identifier[is_managed_policy] :
identifier[policy] = identifier[ManagedPolicy] (
identifier[self] . identifier[name_strip] ( identifier[name] , keyword[True] ),
identifier[PolicyDocument] ={
literal[string] : identifier[self] . identifier[VERSION_IAM] ,
literal[string] : identifier[statements] ,
},
identifier[Roles] = identifier[roles] ,
identifier[Path] = identifier[self] . identifier[__role_path] ,
)
keyword[else] :
identifier[policy] = identifier[PolicyType] (
identifier[self] . identifier[name_strip] ( identifier[name] , keyword[True] ),
identifier[PolicyName] = identifier[self] . identifier[name_strip] ( identifier[name] , keyword[True] ),
identifier[PolicyDocument] ={
literal[string] : identifier[self] . identifier[VERSION_IAM] ,
literal[string] : identifier[statements] ,
},
identifier[Roles] = identifier[roles] ,
)
identifier[self] . identifier[__template] . identifier[add_resource] ( identifier[policy] )
keyword[return] identifier[policy] | def build_policy(self, name, statements, roles, is_managed_policy=False):
"""
Generate policy for IAM cloudformation template
:param name: Name of the policy
:param statements: The "rules" the policy should have
:param roles: The roles associated with this policy
:param is_managed_policy: True if managed policy
:return: Ref to new policy
"""
if is_managed_policy:
policy = ManagedPolicy(self.name_strip(name, True), PolicyDocument={'Version': self.VERSION_IAM, 'Statement': statements}, Roles=roles, Path=self.__role_path) # depends on [control=['if'], data=[]]
else:
policy = PolicyType(self.name_strip(name, True), PolicyName=self.name_strip(name, True), PolicyDocument={'Version': self.VERSION_IAM, 'Statement': statements}, Roles=roles)
self.__template.add_resource(policy)
return policy |
def get_summary(url, spk=True):
''' simple function to retrieve the header of a BSP file and return SPK object'''
# connect to file at URL
bspurl = urllib2.urlopen(url)
# retrieve the "tip" of a file at URL
bsptip = bspurl.read(10**5) # first 100kB
# save data in fake file object (in-memory)
bspstr = StringIO(bsptip)
# load into DAF object
daf = DAF(bspstr)
# return either SPK or DAF object
if spk:
# make a SPK object
spk = SPK(daf)
# return representation
return spk
else:
# return representation
return daf | def function[get_summary, parameter[url, spk]]:
constant[ simple function to retrieve the header of a BSP file and return SPK object]
variable[bspurl] assign[=] call[name[urllib2].urlopen, parameter[name[url]]]
variable[bsptip] assign[=] call[name[bspurl].read, parameter[binary_operation[constant[10] ** constant[5]]]]
variable[bspstr] assign[=] call[name[StringIO], parameter[name[bsptip]]]
variable[daf] assign[=] call[name[DAF], parameter[name[bspstr]]]
if name[spk] begin[:]
variable[spk] assign[=] call[name[SPK], parameter[name[daf]]]
return[name[spk]] | keyword[def] identifier[get_summary] ( identifier[url] , identifier[spk] = keyword[True] ):
literal[string]
identifier[bspurl] = identifier[urllib2] . identifier[urlopen] ( identifier[url] )
identifier[bsptip] = identifier[bspurl] . identifier[read] ( literal[int] ** literal[int] )
identifier[bspstr] = identifier[StringIO] ( identifier[bsptip] )
identifier[daf] = identifier[DAF] ( identifier[bspstr] )
keyword[if] identifier[spk] :
identifier[spk] = identifier[SPK] ( identifier[daf] )
keyword[return] identifier[spk]
keyword[else] :
keyword[return] identifier[daf] | def get_summary(url, spk=True):
""" simple function to retrieve the header of a BSP file and return SPK object"""
# connect to file at URL
bspurl = urllib2.urlopen(url)
# retrieve the "tip" of a file at URL
bsptip = bspurl.read(10 ** 5) # first 100kB
# save data in fake file object (in-memory)
bspstr = StringIO(bsptip)
# load into DAF object
daf = DAF(bspstr)
# return either SPK or DAF object
if spk:
# make a SPK object
spk = SPK(daf) # return representation
return spk # depends on [control=['if'], data=[]]
else: # return representation
return daf |
def validate_mandatory_str_fields(self, messages):
"""Fields marked as Mandatory and of type string in class
docstring must be of a type that provides __str__ method.
"""
FIELDS = ['name', 'download_location', 'verif_code', 'cr_text']
messages = self.validate_str_fields(FIELDS, False, messages)
return messages | def function[validate_mandatory_str_fields, parameter[self, messages]]:
constant[Fields marked as Mandatory and of type string in class
docstring must be of a type that provides __str__ method.
]
variable[FIELDS] assign[=] list[[<ast.Constant object at 0x7da1b0137460>, <ast.Constant object at 0x7da1b0136b90>, <ast.Constant object at 0x7da1b0137190>, <ast.Constant object at 0x7da18ede5570>]]
variable[messages] assign[=] call[name[self].validate_str_fields, parameter[name[FIELDS], constant[False], name[messages]]]
return[name[messages]] | keyword[def] identifier[validate_mandatory_str_fields] ( identifier[self] , identifier[messages] ):
literal[string]
identifier[FIELDS] =[ literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[messages] = identifier[self] . identifier[validate_str_fields] ( identifier[FIELDS] , keyword[False] , identifier[messages] )
keyword[return] identifier[messages] | def validate_mandatory_str_fields(self, messages):
"""Fields marked as Mandatory and of type string in class
docstring must be of a type that provides __str__ method.
"""
FIELDS = ['name', 'download_location', 'verif_code', 'cr_text']
messages = self.validate_str_fields(FIELDS, False, messages)
return messages |
def provision(self, conf):
"""Provision this metaconfig's config with what we gathered.
Since Config has native support for ini files, we just need to
let this metaconfig's config know about the ini file we found.
In future scenarios, this is where we would implement logic
specific to a metaconfig source if that source is not natively
supported by Config.
"""
if self.ini and self.ini not in conf._ini_paths:
conf._ini_paths.insert(0, self.ini) | def function[provision, parameter[self, conf]]:
constant[Provision this metaconfig's config with what we gathered.
Since Config has native support for ini files, we just need to
let this metaconfig's config know about the ini file we found.
In future scenarios, this is where we would implement logic
specific to a metaconfig source if that source is not natively
supported by Config.
]
if <ast.BoolOp object at 0x7da1b09d0190> begin[:]
call[name[conf]._ini_paths.insert, parameter[constant[0], name[self].ini]] | keyword[def] identifier[provision] ( identifier[self] , identifier[conf] ):
literal[string]
keyword[if] identifier[self] . identifier[ini] keyword[and] identifier[self] . identifier[ini] keyword[not] keyword[in] identifier[conf] . identifier[_ini_paths] :
identifier[conf] . identifier[_ini_paths] . identifier[insert] ( literal[int] , identifier[self] . identifier[ini] ) | def provision(self, conf):
"""Provision this metaconfig's config with what we gathered.
Since Config has native support for ini files, we just need to
let this metaconfig's config know about the ini file we found.
In future scenarios, this is where we would implement logic
specific to a metaconfig source if that source is not natively
supported by Config.
"""
if self.ini and self.ini not in conf._ini_paths:
conf._ini_paths.insert(0, self.ini) # depends on [control=['if'], data=[]] |
def average(a, b, distance_function):
"""
Given two collections ``a`` and ``b``, this will return the mean of all
distances. ``distance_function`` is used to determine the distance between
two elements.
Example::
>>> single([1, 2], [3, 100], lambda x, y: abs(x-y))
26
"""
distances = [distance_function(x, y)
for x in a for y in b]
return sum(distances) / len(distances) | def function[average, parameter[a, b, distance_function]]:
constant[
Given two collections ``a`` and ``b``, this will return the mean of all
distances. ``distance_function`` is used to determine the distance between
two elements.
Example::
>>> single([1, 2], [3, 100], lambda x, y: abs(x-y))
26
]
variable[distances] assign[=] <ast.ListComp object at 0x7da18ede6830>
return[binary_operation[call[name[sum], parameter[name[distances]]] / call[name[len], parameter[name[distances]]]]] | keyword[def] identifier[average] ( identifier[a] , identifier[b] , identifier[distance_function] ):
literal[string]
identifier[distances] =[ identifier[distance_function] ( identifier[x] , identifier[y] )
keyword[for] identifier[x] keyword[in] identifier[a] keyword[for] identifier[y] keyword[in] identifier[b] ]
keyword[return] identifier[sum] ( identifier[distances] )/ identifier[len] ( identifier[distances] ) | def average(a, b, distance_function):
"""
Given two collections ``a`` and ``b``, this will return the mean of all
distances. ``distance_function`` is used to determine the distance between
two elements.
Example::
>>> single([1, 2], [3, 100], lambda x, y: abs(x-y))
26
"""
distances = [distance_function(x, y) for x in a for y in b]
return sum(distances) / len(distances) |
def entropy(self, logits):
""" Categorical distribution entropy calculation - sum probs * log(probs) """
probs = torch.exp(logits)
entropy = - torch.sum(probs * logits, dim=-1)
return entropy | def function[entropy, parameter[self, logits]]:
constant[ Categorical distribution entropy calculation - sum probs * log(probs) ]
variable[probs] assign[=] call[name[torch].exp, parameter[name[logits]]]
variable[entropy] assign[=] <ast.UnaryOp object at 0x7da2043456c0>
return[name[entropy]] | keyword[def] identifier[entropy] ( identifier[self] , identifier[logits] ):
literal[string]
identifier[probs] = identifier[torch] . identifier[exp] ( identifier[logits] )
identifier[entropy] =- identifier[torch] . identifier[sum] ( identifier[probs] * identifier[logits] , identifier[dim] =- literal[int] )
keyword[return] identifier[entropy] | def entropy(self, logits):
""" Categorical distribution entropy calculation - sum probs * log(probs) """
probs = torch.exp(logits)
entropy = -torch.sum(probs * logits, dim=-1)
return entropy |
def traverse(self, fn=None, specs=None, full_breadth=True):
"""Traverses object returning matching items
Traverses the set of children of the object, collecting the
all objects matching the defined specs. Each object can be
processed with the supplied function.
Args:
fn (function, optional): Function applied to matched objects
specs: List of specs to match
Specs must be types, functions or type[.group][.label]
specs to select objects to return, by default applies
to all objects.
full_breadth: Whether to traverse all objects
Whether to traverse the full set of objects on each
container or only the first.
Returns:
list: List of objects that matched
"""
if fn is None:
fn = lambda x: x
if specs is not None and not isinstance(specs, (list, set, tuple)):
specs = [specs]
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches: break
if matches:
accumulator.append(fn(self))
# Assumes composite objects are iterables
if self._deep_indexable:
for el in self:
if el is None:
continue
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth: break
return accumulator | def function[traverse, parameter[self, fn, specs, full_breadth]]:
constant[Traverses object returning matching items
Traverses the set of children of the object, collecting the
all objects matching the defined specs. Each object can be
processed with the supplied function.
Args:
fn (function, optional): Function applied to matched objects
specs: List of specs to match
Specs must be types, functions or type[.group][.label]
specs to select objects to return, by default applies
to all objects.
full_breadth: Whether to traverse all objects
Whether to traverse the full set of objects on each
container or only the first.
Returns:
list: List of objects that matched
]
if compare[name[fn] is constant[None]] begin[:]
variable[fn] assign[=] <ast.Lambda object at 0x7da2054a5600>
if <ast.BoolOp object at 0x7da2054a7c70> begin[:]
variable[specs] assign[=] list[[<ast.Name object at 0x7da2054a49d0>]]
variable[accumulator] assign[=] list[[]]
variable[matches] assign[=] compare[name[specs] is constant[None]]
if <ast.UnaryOp object at 0x7da2054a6ec0> begin[:]
for taget[name[spec]] in starred[name[specs]] begin[:]
variable[matches] assign[=] call[name[self].matches, parameter[name[spec]]]
if name[matches] begin[:]
break
if name[matches] begin[:]
call[name[accumulator].append, parameter[call[name[fn], parameter[name[self]]]]]
if name[self]._deep_indexable begin[:]
for taget[name[el]] in starred[name[self]] begin[:]
if compare[name[el] is constant[None]] begin[:]
continue
<ast.AugAssign object at 0x7da2054a7970>
if <ast.UnaryOp object at 0x7da2054a65f0> begin[:]
break
return[name[accumulator]] | keyword[def] identifier[traverse] ( identifier[self] , identifier[fn] = keyword[None] , identifier[specs] = keyword[None] , identifier[full_breadth] = keyword[True] ):
literal[string]
keyword[if] identifier[fn] keyword[is] keyword[None] :
identifier[fn] = keyword[lambda] identifier[x] : identifier[x]
keyword[if] identifier[specs] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[isinstance] ( identifier[specs] ,( identifier[list] , identifier[set] , identifier[tuple] )):
identifier[specs] =[ identifier[specs] ]
identifier[accumulator] =[]
identifier[matches] = identifier[specs] keyword[is] keyword[None]
keyword[if] keyword[not] identifier[matches] :
keyword[for] identifier[spec] keyword[in] identifier[specs] :
identifier[matches] = identifier[self] . identifier[matches] ( identifier[spec] )
keyword[if] identifier[matches] : keyword[break]
keyword[if] identifier[matches] :
identifier[accumulator] . identifier[append] ( identifier[fn] ( identifier[self] ))
keyword[if] identifier[self] . identifier[_deep_indexable] :
keyword[for] identifier[el] keyword[in] identifier[self] :
keyword[if] identifier[el] keyword[is] keyword[None] :
keyword[continue]
identifier[accumulator] += identifier[el] . identifier[traverse] ( identifier[fn] , identifier[specs] , identifier[full_breadth] )
keyword[if] keyword[not] identifier[full_breadth] : keyword[break]
keyword[return] identifier[accumulator] | def traverse(self, fn=None, specs=None, full_breadth=True):
"""Traverses object returning matching items
Traverses the set of children of the object, collecting the
all objects matching the defined specs. Each object can be
processed with the supplied function.
Args:
fn (function, optional): Function applied to matched objects
specs: List of specs to match
Specs must be types, functions or type[.group][.label]
specs to select objects to return, by default applies
to all objects.
full_breadth: Whether to traverse all objects
Whether to traverse the full set of objects on each
container or only the first.
Returns:
list: List of objects that matched
"""
if fn is None:
fn = lambda x: x # depends on [control=['if'], data=['fn']]
if specs is not None and (not isinstance(specs, (list, set, tuple))):
specs = [specs] # depends on [control=['if'], data=[]]
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['spec']] # depends on [control=['if'], data=[]]
if matches:
accumulator.append(fn(self)) # depends on [control=['if'], data=[]]
# Assumes composite objects are iterables
if self._deep_indexable:
for el in self:
if el is None:
continue # depends on [control=['if'], data=[]]
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']] # depends on [control=['if'], data=[]]
return accumulator |
def lx(mt, x):
""" lx : Returns the number of survivors at begining of age x """
if x < len(mt.lx):
return mt.lx[x]
else:
return 0 | def function[lx, parameter[mt, x]]:
constant[ lx : Returns the number of survivors at begining of age x ]
if compare[name[x] less[<] call[name[len], parameter[name[mt].lx]]] begin[:]
return[call[name[mt].lx][name[x]]] | keyword[def] identifier[lx] ( identifier[mt] , identifier[x] ):
literal[string]
keyword[if] identifier[x] < identifier[len] ( identifier[mt] . identifier[lx] ):
keyword[return] identifier[mt] . identifier[lx] [ identifier[x] ]
keyword[else] :
keyword[return] literal[int] | def lx(mt, x):
""" lx : Returns the number of survivors at begining of age x """
if x < len(mt.lx):
return mt.lx[x] # depends on [control=['if'], data=['x']]
else:
return 0 |
def _print_count(log):
r"""Print run-count information."""
log['cnt2'] += 1 # Current number
cp = log['cnt2']/log['totnr']*100 # Percentage
if log['cnt2'] == 0: # Not sure about this; brute seems to call the
pass # function with the first arguments twice...
elif log['cnt2'] > log['totnr']: # fmin-status
print(" fmin fct calls : %d" % (log['cnt2']-log['totnr']), end='\r')
elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
# Get seconds since start
sec = int(default_timer() - log['time'])
# Get estimate of remaining time, as string
tleft = str(timedelta(seconds=int(100*sec/cp - sec)))
# Print progress
pstr = (" brute fct calls : %d/%d"
% (log['cnt2'], log['totnr']))
if log['totnr'] > 100:
pstr += (" (%d %%); est: %s " % (cp, tleft))
print(pstr, end='\r')
if log['cnt2'] == log['totnr']:
# Empty previous line
print(" "*len(pstr), end='\r')
# Print final brute-message
print(" brute fct calls : %d" % log['totnr'])
# Update percentage cnt1
log['cnt1'] = cp
return log | def function[_print_count, parameter[log]]:
constant[Print run-count information.]
<ast.AugAssign object at 0x7da18f811630>
variable[cp] assign[=] binary_operation[binary_operation[call[name[log]][constant[cnt2]] / call[name[log]][constant[totnr]]] * constant[100]]
if compare[call[name[log]][constant[cnt2]] equal[==] constant[0]] begin[:]
pass
return[name[log]] | keyword[def] identifier[_print_count] ( identifier[log] ):
literal[string]
identifier[log] [ literal[string] ]+= literal[int]
identifier[cp] = identifier[log] [ literal[string] ]/ identifier[log] [ literal[string] ]* literal[int]
keyword[if] identifier[log] [ literal[string] ]== literal[int] :
keyword[pass]
keyword[elif] identifier[log] [ literal[string] ]> identifier[log] [ literal[string] ]:
identifier[print] ( literal[string] %( identifier[log] [ literal[string] ]- identifier[log] [ literal[string] ]), identifier[end] = literal[string] )
keyword[elif] identifier[int] ( identifier[cp] )> identifier[log] [ literal[string] ] keyword[or] identifier[cp] < literal[int] keyword[or] identifier[log] [ literal[string] ]== identifier[log] [ literal[string] ]:
identifier[sec] = identifier[int] ( identifier[default_timer] ()- identifier[log] [ literal[string] ])
identifier[tleft] = identifier[str] ( identifier[timedelta] ( identifier[seconds] = identifier[int] ( literal[int] * identifier[sec] / identifier[cp] - identifier[sec] )))
identifier[pstr] =( literal[string]
%( identifier[log] [ literal[string] ], identifier[log] [ literal[string] ]))
keyword[if] identifier[log] [ literal[string] ]> literal[int] :
identifier[pstr] +=( literal[string] %( identifier[cp] , identifier[tleft] ))
identifier[print] ( identifier[pstr] , identifier[end] = literal[string] )
keyword[if] identifier[log] [ literal[string] ]== identifier[log] [ literal[string] ]:
identifier[print] ( literal[string] * identifier[len] ( identifier[pstr] ), identifier[end] = literal[string] )
identifier[print] ( literal[string] % identifier[log] [ literal[string] ])
identifier[log] [ literal[string] ]= identifier[cp]
keyword[return] identifier[log] | def _print_count(log):
"""Print run-count information."""
log['cnt2'] += 1 # Current number
cp = log['cnt2'] / log['totnr'] * 100 # Percentage
if log['cnt2'] == 0: # Not sure about this; brute seems to call the
pass # function with the first arguments twice... # depends on [control=['if'], data=[]]
elif log['cnt2'] > log['totnr']: # fmin-status
print(' fmin fct calls : %d' % (log['cnt2'] - log['totnr']), end='\r') # depends on [control=['if'], data=[]]
elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
# Get seconds since start
sec = int(default_timer() - log['time'])
# Get estimate of remaining time, as string
tleft = str(timedelta(seconds=int(100 * sec / cp - sec)))
# Print progress
pstr = ' brute fct calls : %d/%d' % (log['cnt2'], log['totnr'])
if log['totnr'] > 100:
pstr += ' (%d %%); est: %s ' % (cp, tleft) # depends on [control=['if'], data=[]]
print(pstr, end='\r')
if log['cnt2'] == log['totnr']:
# Empty previous line
print(' ' * len(pstr), end='\r')
# Print final brute-message
print(' brute fct calls : %d' % log['totnr']) # depends on [control=['if'], data=[]]
# Update percentage cnt1
log['cnt1'] = cp # depends on [control=['if'], data=[]]
return log |
def notify(title, message, icon=icon.ico, retcode=None):
"""
Optional parameters:
* ``icon`` - path to an ICO file to display instead of the ntfy icon
"""
import win32api
import win32con
import win32gui
class WindowsBalloonTip:
def __init__(self, title, msg):
message_map = {
win32con.WM_DESTROY: self.OnDestroy,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = "PythonTaskbar"
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow(
classAtom, "Taskbar", style, 0, 0, win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT, 0, 0, hinst, None)
win32gui.UpdateWindow(self.hwnd)
iconPathName = os.path.abspath(icon)
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = win32gui.LoadImage(
hinst, iconPathName, win32con.IMAGE_ICON, 0, 0, icon_flags)
except:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER + 20, hicon,
"tooltip")
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
win32gui.Shell_NotifyIcon(
win32gui.NIM_MODIFY,
(self.hwnd, 0, win32gui.NIF_INFO, win32con.WM_USER + 20, hicon,
"Balloon tooltip", title, 200, msg),
)
win32gui.DestroyWindow(self.hwnd)
win32gui.UnregisterClass(wc.lpszClassName, None)
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32api.PostQuitMessage(0) # Terminate the app.
WindowsBalloonTip(message, title) | def function[notify, parameter[title, message, icon, retcode]]:
constant[
Optional parameters:
* ``icon`` - path to an ICO file to display instead of the ntfy icon
]
import module[win32api]
import module[win32con]
import module[win32gui]
class class[WindowsBalloonTip, parameter[]] begin[:]
def function[__init__, parameter[self, title, msg]]:
variable[message_map] assign[=] dictionary[[<ast.Attribute object at 0x7da1b1d9bfa0>], [<ast.Attribute object at 0x7da1b1d9bc10>]]
variable[wc] assign[=] call[name[win32gui].WNDCLASS, parameter[]]
variable[hinst] assign[=] call[name[win32api].GetModuleHandle, parameter[constant[None]]]
name[wc].lpszClassName assign[=] constant[PythonTaskbar]
name[wc].lpfnWndProc assign[=] name[message_map]
variable[classAtom] assign[=] call[name[win32gui].RegisterClass, parameter[name[wc]]]
variable[style] assign[=] binary_operation[name[win32con].WS_OVERLAPPED <ast.BitOr object at 0x7da2590d6aa0> name[win32con].WS_SYSMENU]
name[self].hwnd assign[=] call[name[win32gui].CreateWindow, parameter[name[classAtom], constant[Taskbar], name[style], constant[0], constant[0], name[win32con].CW_USEDEFAULT, name[win32con].CW_USEDEFAULT, constant[0], constant[0], name[hinst], constant[None]]]
call[name[win32gui].UpdateWindow, parameter[name[self].hwnd]]
variable[iconPathName] assign[=] call[name[os].path.abspath, parameter[name[icon]]]
variable[icon_flags] assign[=] binary_operation[name[win32con].LR_LOADFROMFILE <ast.BitOr object at 0x7da2590d6aa0> name[win32con].LR_DEFAULTSIZE]
<ast.Try object at 0x7da1b1d99180>
variable[flags] assign[=] binary_operation[binary_operation[name[win32gui].NIF_ICON <ast.BitOr object at 0x7da2590d6aa0> name[win32gui].NIF_MESSAGE] <ast.BitOr object at 0x7da2590d6aa0> name[win32gui].NIF_TIP]
variable[nid] assign[=] tuple[[<ast.Attribute object at 0x7da1b1d98460>, <ast.Constant object at 0x7da1b1d984c0>, <ast.Name object at 0x7da1b1d99e40>, <ast.BinOp object at 0x7da1b1d98a00>, <ast.Name object at 0x7da1b1d9a8c0>, <ast.Constant object at 0x7da1b1d98040>]]
call[name[win32gui].Shell_NotifyIcon, parameter[name[win32gui].NIM_ADD, name[nid]]]
call[name[win32gui].Shell_NotifyIcon, parameter[name[win32gui].NIM_MODIFY, tuple[[<ast.Attribute object at 0x7da1b1d98ac0>, <ast.Constant object at 0x7da1b1d9b010>, <ast.Attribute object at 0x7da1b1d9ab30>, <ast.BinOp object at 0x7da1b1d995a0>, <ast.Name object at 0x7da1b1d9b4c0>, <ast.Constant object at 0x7da1b1d9a4d0>, <ast.Name object at 0x7da1b1d98f70>, <ast.Constant object at 0x7da1b1d9b2e0>, <ast.Name object at 0x7da1b1d989d0>]]]]
call[name[win32gui].DestroyWindow, parameter[name[self].hwnd]]
call[name[win32gui].UnregisterClass, parameter[name[wc].lpszClassName, constant[None]]]
def function[OnDestroy, parameter[self, hwnd, msg, wparam, lparam]]:
call[name[win32api].PostQuitMessage, parameter[constant[0]]]
call[name[WindowsBalloonTip], parameter[name[message], name[title]]] | keyword[def] identifier[notify] ( identifier[title] , identifier[message] , identifier[icon] = identifier[icon] . identifier[ico] , identifier[retcode] = keyword[None] ):
literal[string]
keyword[import] identifier[win32api]
keyword[import] identifier[win32con]
keyword[import] identifier[win32gui]
keyword[class] identifier[WindowsBalloonTip] :
keyword[def] identifier[__init__] ( identifier[self] , identifier[title] , identifier[msg] ):
identifier[message_map] ={
identifier[win32con] . identifier[WM_DESTROY] : identifier[self] . identifier[OnDestroy] ,
}
identifier[wc] = identifier[win32gui] . identifier[WNDCLASS] ()
identifier[hinst] = identifier[wc] . identifier[hInstance] = identifier[win32api] . identifier[GetModuleHandle] ( keyword[None] )
identifier[wc] . identifier[lpszClassName] = literal[string]
identifier[wc] . identifier[lpfnWndProc] = identifier[message_map]
identifier[classAtom] = identifier[win32gui] . identifier[RegisterClass] ( identifier[wc] )
identifier[style] = identifier[win32con] . identifier[WS_OVERLAPPED] | identifier[win32con] . identifier[WS_SYSMENU]
identifier[self] . identifier[hwnd] = identifier[win32gui] . identifier[CreateWindow] (
identifier[classAtom] , literal[string] , identifier[style] , literal[int] , literal[int] , identifier[win32con] . identifier[CW_USEDEFAULT] ,
identifier[win32con] . identifier[CW_USEDEFAULT] , literal[int] , literal[int] , identifier[hinst] , keyword[None] )
identifier[win32gui] . identifier[UpdateWindow] ( identifier[self] . identifier[hwnd] )
identifier[iconPathName] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[icon] )
identifier[icon_flags] = identifier[win32con] . identifier[LR_LOADFROMFILE] | identifier[win32con] . identifier[LR_DEFAULTSIZE]
keyword[try] :
identifier[hicon] = identifier[win32gui] . identifier[LoadImage] (
identifier[hinst] , identifier[iconPathName] , identifier[win32con] . identifier[IMAGE_ICON] , literal[int] , literal[int] , identifier[icon_flags] )
keyword[except] :
identifier[hicon] = identifier[win32gui] . identifier[LoadIcon] ( literal[int] , identifier[win32con] . identifier[IDI_APPLICATION] )
identifier[flags] = identifier[win32gui] . identifier[NIF_ICON] | identifier[win32gui] . identifier[NIF_MESSAGE] | identifier[win32gui] . identifier[NIF_TIP]
identifier[nid] =( identifier[self] . identifier[hwnd] , literal[int] , identifier[flags] , identifier[win32con] . identifier[WM_USER] + literal[int] , identifier[hicon] ,
literal[string] )
identifier[win32gui] . identifier[Shell_NotifyIcon] ( identifier[win32gui] . identifier[NIM_ADD] , identifier[nid] )
identifier[win32gui] . identifier[Shell_NotifyIcon] (
identifier[win32gui] . identifier[NIM_MODIFY] ,
( identifier[self] . identifier[hwnd] , literal[int] , identifier[win32gui] . identifier[NIF_INFO] , identifier[win32con] . identifier[WM_USER] + literal[int] , identifier[hicon] ,
literal[string] , identifier[title] , literal[int] , identifier[msg] ),
)
identifier[win32gui] . identifier[DestroyWindow] ( identifier[self] . identifier[hwnd] )
identifier[win32gui] . identifier[UnregisterClass] ( identifier[wc] . identifier[lpszClassName] , keyword[None] )
keyword[def] identifier[OnDestroy] ( identifier[self] , identifier[hwnd] , identifier[msg] , identifier[wparam] , identifier[lparam] ):
identifier[win32api] . identifier[PostQuitMessage] ( literal[int] )
identifier[WindowsBalloonTip] ( identifier[message] , identifier[title] ) | def notify(title, message, icon=icon.ico, retcode=None):
"""
Optional parameters:
* ``icon`` - path to an ICO file to display instead of the ntfy icon
"""
import win32api
import win32con
import win32gui
class WindowsBalloonTip:
def __init__(self, title, msg):
message_map = {win32con.WM_DESTROY: self.OnDestroy}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = 'PythonTaskbar'
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow(classAtom, 'Taskbar', style, 0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, 0, 0, hinst, None)
win32gui.UpdateWindow(self.hwnd)
iconPathName = os.path.abspath(icon)
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
try:
hicon = win32gui.LoadImage(hinst, iconPathName, win32con.IMAGE_ICON, 0, 0, icon_flags) # depends on [control=['try'], data=[]]
except:
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION) # depends on [control=['except'], data=[]]
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE | win32gui.NIF_TIP
nid = (self.hwnd, 0, flags, win32con.WM_USER + 20, hicon, 'tooltip')
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, (self.hwnd, 0, win32gui.NIF_INFO, win32con.WM_USER + 20, hicon, 'Balloon tooltip', title, 200, msg))
win32gui.DestroyWindow(self.hwnd)
win32gui.UnregisterClass(wc.lpszClassName, None)
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32api.PostQuitMessage(0) # Terminate the app.
WindowsBalloonTip(message, title) |
def register_signals(self):
"""Register signals."""
from .models import Collection
from .receivers import CollectionUpdater
if self.app.config['COLLECTIONS_USE_PERCOLATOR']:
from .percolator import collection_inserted_percolator, \
collection_removed_percolator, \
collection_updated_percolator
# Register collection signals to update percolators
listen(Collection, 'after_insert',
collection_inserted_percolator)
listen(Collection, 'after_update',
collection_updated_percolator)
listen(Collection, 'after_delete',
collection_removed_percolator)
# Register Record signals to update record['_collections']
self.update_function = CollectionUpdater(app=self.app)
signals.before_record_insert.connect(self.update_function,
weak=False)
signals.before_record_update.connect(self.update_function,
weak=False) | def function[register_signals, parameter[self]]:
constant[Register signals.]
from relative_module[models] import module[Collection]
from relative_module[receivers] import module[CollectionUpdater]
if call[name[self].app.config][constant[COLLECTIONS_USE_PERCOLATOR]] begin[:]
from relative_module[percolator] import module[collection_inserted_percolator], module[collection_removed_percolator], module[collection_updated_percolator]
call[name[listen], parameter[name[Collection], constant[after_insert], name[collection_inserted_percolator]]]
call[name[listen], parameter[name[Collection], constant[after_update], name[collection_updated_percolator]]]
call[name[listen], parameter[name[Collection], constant[after_delete], name[collection_removed_percolator]]]
name[self].update_function assign[=] call[name[CollectionUpdater], parameter[]]
call[name[signals].before_record_insert.connect, parameter[name[self].update_function]]
call[name[signals].before_record_update.connect, parameter[name[self].update_function]] | keyword[def] identifier[register_signals] ( identifier[self] ):
literal[string]
keyword[from] . identifier[models] keyword[import] identifier[Collection]
keyword[from] . identifier[receivers] keyword[import] identifier[CollectionUpdater]
keyword[if] identifier[self] . identifier[app] . identifier[config] [ literal[string] ]:
keyword[from] . identifier[percolator] keyword[import] identifier[collection_inserted_percolator] , identifier[collection_removed_percolator] , identifier[collection_updated_percolator]
identifier[listen] ( identifier[Collection] , literal[string] ,
identifier[collection_inserted_percolator] )
identifier[listen] ( identifier[Collection] , literal[string] ,
identifier[collection_updated_percolator] )
identifier[listen] ( identifier[Collection] , literal[string] ,
identifier[collection_removed_percolator] )
identifier[self] . identifier[update_function] = identifier[CollectionUpdater] ( identifier[app] = identifier[self] . identifier[app] )
identifier[signals] . identifier[before_record_insert] . identifier[connect] ( identifier[self] . identifier[update_function] ,
identifier[weak] = keyword[False] )
identifier[signals] . identifier[before_record_update] . identifier[connect] ( identifier[self] . identifier[update_function] ,
identifier[weak] = keyword[False] ) | def register_signals(self):
"""Register signals."""
from .models import Collection
from .receivers import CollectionUpdater
if self.app.config['COLLECTIONS_USE_PERCOLATOR']:
from .percolator import collection_inserted_percolator, collection_removed_percolator, collection_updated_percolator
# Register collection signals to update percolators
listen(Collection, 'after_insert', collection_inserted_percolator)
listen(Collection, 'after_update', collection_updated_percolator)
listen(Collection, 'after_delete', collection_removed_percolator) # depends on [control=['if'], data=[]]
# Register Record signals to update record['_collections']
self.update_function = CollectionUpdater(app=self.app)
signals.before_record_insert.connect(self.update_function, weak=False)
signals.before_record_update.connect(self.update_function, weak=False) |
def send_notification(self, code, subcode):
"""Utility to send notification message.
Closes the socket after sending the message.
:Parameters:
- `socket`: (socket) - socket over which to send notification
message.
- `code`: (int) - BGP Notification code
- `subcode`: (int) - BGP Notification sub-code
RFC ref: http://tools.ietf.org/html/rfc4486
http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml
"""
notification = BGPNotification(code, subcode)
reason = notification.reason
self._send_with_lock(notification)
self._signal_bus.bgp_error(self._peer, code, subcode, reason)
if len(self._localname):
LOG.error('Sent notification to %r >> %s', self._localname,
notification)
self._socket.close() | def function[send_notification, parameter[self, code, subcode]]:
constant[Utility to send notification message.
Closes the socket after sending the message.
:Parameters:
- `socket`: (socket) - socket over which to send notification
message.
- `code`: (int) - BGP Notification code
- `subcode`: (int) - BGP Notification sub-code
RFC ref: http://tools.ietf.org/html/rfc4486
http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml
]
variable[notification] assign[=] call[name[BGPNotification], parameter[name[code], name[subcode]]]
variable[reason] assign[=] name[notification].reason
call[name[self]._send_with_lock, parameter[name[notification]]]
call[name[self]._signal_bus.bgp_error, parameter[name[self]._peer, name[code], name[subcode], name[reason]]]
if call[name[len], parameter[name[self]._localname]] begin[:]
call[name[LOG].error, parameter[constant[Sent notification to %r >> %s], name[self]._localname, name[notification]]]
call[name[self]._socket.close, parameter[]] | keyword[def] identifier[send_notification] ( identifier[self] , identifier[code] , identifier[subcode] ):
literal[string]
identifier[notification] = identifier[BGPNotification] ( identifier[code] , identifier[subcode] )
identifier[reason] = identifier[notification] . identifier[reason]
identifier[self] . identifier[_send_with_lock] ( identifier[notification] )
identifier[self] . identifier[_signal_bus] . identifier[bgp_error] ( identifier[self] . identifier[_peer] , identifier[code] , identifier[subcode] , identifier[reason] )
keyword[if] identifier[len] ( identifier[self] . identifier[_localname] ):
identifier[LOG] . identifier[error] ( literal[string] , identifier[self] . identifier[_localname] ,
identifier[notification] )
identifier[self] . identifier[_socket] . identifier[close] () | def send_notification(self, code, subcode):
"""Utility to send notification message.
Closes the socket after sending the message.
:Parameters:
- `socket`: (socket) - socket over which to send notification
message.
- `code`: (int) - BGP Notification code
- `subcode`: (int) - BGP Notification sub-code
RFC ref: http://tools.ietf.org/html/rfc4486
http://www.iana.org/assignments/bgp-parameters/bgp-parameters.xhtml
"""
notification = BGPNotification(code, subcode)
reason = notification.reason
self._send_with_lock(notification)
self._signal_bus.bgp_error(self._peer, code, subcode, reason)
if len(self._localname):
LOG.error('Sent notification to %r >> %s', self._localname, notification) # depends on [control=['if'], data=[]]
self._socket.close() |
def full_domain_validator(hostname):
"""
Fully validates a domain name as compilant with the standard rules:
- Composed of series of labels concatenated with dots, as are all domain names.
- Each label must be between 1 and 63 characters long.
- The entire hostname (including the delimiting dots) has a maximum of 255 characters.
- Only characters 'a' through 'z' (in a case-insensitive manner), the digits '0' through '9'.
- Labels can't start or end with a hyphen.
"""
HOSTNAME_LABEL_PATTERN = re.compile("(?!-)[A-Z\d-]+(?<!-)$", re.IGNORECASE)
if not hostname:
return
if len(hostname) > 255:
raise ValidationError(_("The domain name cannot be composed of more than 255 characters."))
if hostname[-1:] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
for label in hostname.split("."):
if len(label) > 63:
raise ValidationError(
_("The label '%(label)s' is too long (maximum is 63 characters).") % {'label': label})
if not HOSTNAME_LABEL_PATTERN.match(label):
raise ValidationError(_("Unallowed characters in label '%(label)s'.") % {'label': label}) | def function[full_domain_validator, parameter[hostname]]:
constant[
Fully validates a domain name as compilant with the standard rules:
- Composed of series of labels concatenated with dots, as are all domain names.
- Each label must be between 1 and 63 characters long.
- The entire hostname (including the delimiting dots) has a maximum of 255 characters.
- Only characters 'a' through 'z' (in a case-insensitive manner), the digits '0' through '9'.
- Labels can't start or end with a hyphen.
]
variable[HOSTNAME_LABEL_PATTERN] assign[=] call[name[re].compile, parameter[constant[(?!-)[A-Z\d-]+(?<!-)$], name[re].IGNORECASE]]
if <ast.UnaryOp object at 0x7da1b0d501f0> begin[:]
return[None]
if compare[call[name[len], parameter[name[hostname]]] greater[>] constant[255]] begin[:]
<ast.Raise object at 0x7da1b0d53940>
if compare[call[name[hostname]][<ast.Slice object at 0x7da1b0d50640>] equal[==] constant[.]] begin[:]
variable[hostname] assign[=] call[name[hostname]][<ast.Slice object at 0x7da1b0d50610>]
for taget[name[label]] in starred[call[name[hostname].split, parameter[constant[.]]]] begin[:]
if compare[call[name[len], parameter[name[label]]] greater[>] constant[63]] begin[:]
<ast.Raise object at 0x7da1b0d502e0>
if <ast.UnaryOp object at 0x7da1b0d50ee0> begin[:]
<ast.Raise object at 0x7da1b0d51ab0> | keyword[def] identifier[full_domain_validator] ( identifier[hostname] ):
literal[string]
identifier[HOSTNAME_LABEL_PATTERN] = identifier[re] . identifier[compile] ( literal[string] , identifier[re] . identifier[IGNORECASE] )
keyword[if] keyword[not] identifier[hostname] :
keyword[return]
keyword[if] identifier[len] ( identifier[hostname] )> literal[int] :
keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] ))
keyword[if] identifier[hostname] [- literal[int] :]== literal[string] :
identifier[hostname] = identifier[hostname] [:- literal[int] ]
keyword[for] identifier[label] keyword[in] identifier[hostname] . identifier[split] ( literal[string] ):
keyword[if] identifier[len] ( identifier[label] )> literal[int] :
keyword[raise] identifier[ValidationError] (
identifier[_] ( literal[string] )%{ literal[string] : identifier[label] })
keyword[if] keyword[not] identifier[HOSTNAME_LABEL_PATTERN] . identifier[match] ( identifier[label] ):
keyword[raise] identifier[ValidationError] ( identifier[_] ( literal[string] )%{ literal[string] : identifier[label] }) | def full_domain_validator(hostname):
"""
Fully validates a domain name as compilant with the standard rules:
- Composed of series of labels concatenated with dots, as are all domain names.
- Each label must be between 1 and 63 characters long.
- The entire hostname (including the delimiting dots) has a maximum of 255 characters.
- Only characters 'a' through 'z' (in a case-insensitive manner), the digits '0' through '9'.
- Labels can't start or end with a hyphen.
"""
HOSTNAME_LABEL_PATTERN = re.compile('(?!-)[A-Z\\d-]+(?<!-)$', re.IGNORECASE)
if not hostname:
return # depends on [control=['if'], data=[]]
if len(hostname) > 255:
raise ValidationError(_('The domain name cannot be composed of more than 255 characters.')) # depends on [control=['if'], data=[]]
if hostname[-1:] == '.':
hostname = hostname[:-1] # strip exactly one dot from the right, if present # depends on [control=['if'], data=[]]
for label in hostname.split('.'):
if len(label) > 63:
raise ValidationError(_("The label '%(label)s' is too long (maximum is 63 characters).") % {'label': label}) # depends on [control=['if'], data=[]]
if not HOSTNAME_LABEL_PATTERN.match(label):
raise ValidationError(_("Unallowed characters in label '%(label)s'.") % {'label': label}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['label']] |
def _prepend_schema_name(self, message):
"""
If a custom schema name has been defined, prepends it to the error
message that gets raised when a schema error occurs.
"""
if self._name:
message = "{0!r} {1!s}".format(self._name, message)
return message | def function[_prepend_schema_name, parameter[self, message]]:
constant[
If a custom schema name has been defined, prepends it to the error
message that gets raised when a schema error occurs.
]
if name[self]._name begin[:]
variable[message] assign[=] call[constant[{0!r} {1!s}].format, parameter[name[self]._name, name[message]]]
return[name[message]] | keyword[def] identifier[_prepend_schema_name] ( identifier[self] , identifier[message] ):
literal[string]
keyword[if] identifier[self] . identifier[_name] :
identifier[message] = literal[string] . identifier[format] ( identifier[self] . identifier[_name] , identifier[message] )
keyword[return] identifier[message] | def _prepend_schema_name(self, message):
"""
If a custom schema name has been defined, prepends it to the error
message that gets raised when a schema error occurs.
"""
if self._name:
message = '{0!r} {1!s}'.format(self._name, message) # depends on [control=['if'], data=[]]
return message |
def _get_redis_server(opts=None):
'''
Return the Redis server instance.
Caching the object instance.
'''
global REDIS_SERVER
if REDIS_SERVER:
return REDIS_SERVER
if not opts:
opts = _get_redis_cache_opts()
if opts['cluster_mode']:
REDIS_SERVER = StrictRedisCluster(startup_nodes=opts['startup_nodes'],
skip_full_coverage_check=opts['skip_full_coverage_check'])
else:
REDIS_SERVER = redis.StrictRedis(opts['host'],
opts['port'],
unix_socket_path=opts['unix_socket_path'],
db=opts['db'],
password=opts['password'])
return REDIS_SERVER | def function[_get_redis_server, parameter[opts]]:
constant[
Return the Redis server instance.
Caching the object instance.
]
<ast.Global object at 0x7da1b2184580>
if name[REDIS_SERVER] begin[:]
return[name[REDIS_SERVER]]
if <ast.UnaryOp object at 0x7da1b2185ea0> begin[:]
variable[opts] assign[=] call[name[_get_redis_cache_opts], parameter[]]
if call[name[opts]][constant[cluster_mode]] begin[:]
variable[REDIS_SERVER] assign[=] call[name[StrictRedisCluster], parameter[]]
return[name[REDIS_SERVER]] | keyword[def] identifier[_get_redis_server] ( identifier[opts] = keyword[None] ):
literal[string]
keyword[global] identifier[REDIS_SERVER]
keyword[if] identifier[REDIS_SERVER] :
keyword[return] identifier[REDIS_SERVER]
keyword[if] keyword[not] identifier[opts] :
identifier[opts] = identifier[_get_redis_cache_opts] ()
keyword[if] identifier[opts] [ literal[string] ]:
identifier[REDIS_SERVER] = identifier[StrictRedisCluster] ( identifier[startup_nodes] = identifier[opts] [ literal[string] ],
identifier[skip_full_coverage_check] = identifier[opts] [ literal[string] ])
keyword[else] :
identifier[REDIS_SERVER] = identifier[redis] . identifier[StrictRedis] ( identifier[opts] [ literal[string] ],
identifier[opts] [ literal[string] ],
identifier[unix_socket_path] = identifier[opts] [ literal[string] ],
identifier[db] = identifier[opts] [ literal[string] ],
identifier[password] = identifier[opts] [ literal[string] ])
keyword[return] identifier[REDIS_SERVER] | def _get_redis_server(opts=None):
"""
Return the Redis server instance.
Caching the object instance.
"""
global REDIS_SERVER
if REDIS_SERVER:
return REDIS_SERVER # depends on [control=['if'], data=[]]
if not opts:
opts = _get_redis_cache_opts() # depends on [control=['if'], data=[]]
if opts['cluster_mode']:
REDIS_SERVER = StrictRedisCluster(startup_nodes=opts['startup_nodes'], skip_full_coverage_check=opts['skip_full_coverage_check']) # depends on [control=['if'], data=[]]
else:
REDIS_SERVER = redis.StrictRedis(opts['host'], opts['port'], unix_socket_path=opts['unix_socket_path'], db=opts['db'], password=opts['password'])
return REDIS_SERVER |
def validate_key(self, activation_key):
"""
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or raising ``ActivationError`` if not.
"""
try:
username = signing.loads(
activation_key,
salt=REGISTRATION_SALT,
max_age=settings.ACCOUNT_ACTIVATION_DAYS * 86400
)
return username
except signing.SignatureExpired:
raise ActivationError(
self.EXPIRED_MESSAGE,
code='expired'
)
except signing.BadSignature:
raise ActivationError(
self.INVALID_KEY_MESSAGE,
code='invalid_key',
params={'activation_key': activation_key}
) | def function[validate_key, parameter[self, activation_key]]:
constant[
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or raising ``ActivationError`` if not.
]
<ast.Try object at 0x7da20cabce80> | keyword[def] identifier[validate_key] ( identifier[self] , identifier[activation_key] ):
literal[string]
keyword[try] :
identifier[username] = identifier[signing] . identifier[loads] (
identifier[activation_key] ,
identifier[salt] = identifier[REGISTRATION_SALT] ,
identifier[max_age] = identifier[settings] . identifier[ACCOUNT_ACTIVATION_DAYS] * literal[int]
)
keyword[return] identifier[username]
keyword[except] identifier[signing] . identifier[SignatureExpired] :
keyword[raise] identifier[ActivationError] (
identifier[self] . identifier[EXPIRED_MESSAGE] ,
identifier[code] = literal[string]
)
keyword[except] identifier[signing] . identifier[BadSignature] :
keyword[raise] identifier[ActivationError] (
identifier[self] . identifier[INVALID_KEY_MESSAGE] ,
identifier[code] = literal[string] ,
identifier[params] ={ literal[string] : identifier[activation_key] }
) | def validate_key(self, activation_key):
"""
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or raising ``ActivationError`` if not.
"""
try:
username = signing.loads(activation_key, salt=REGISTRATION_SALT, max_age=settings.ACCOUNT_ACTIVATION_DAYS * 86400)
return username # depends on [control=['try'], data=[]]
except signing.SignatureExpired:
raise ActivationError(self.EXPIRED_MESSAGE, code='expired') # depends on [control=['except'], data=[]]
except signing.BadSignature:
raise ActivationError(self.INVALID_KEY_MESSAGE, code='invalid_key', params={'activation_key': activation_key}) # depends on [control=['except'], data=[]] |
def get_current_tournaments():
"""Get the next 200 tournaments from pokerstars."""
schedule_page = requests.get(TOURNAMENTS_XML_URL)
root = etree.XML(schedule_page.content)
for tour in root.iter('{*}tournament'):
yield _Tournament(
start_date=tour.findtext('{*}start_date'),
name=tour.findtext('{*}name'),
game=tour.findtext('{*}game'),
buyin=tour.findtext('{*}buy_in_fee'),
players=tour.get('players')
) | def function[get_current_tournaments, parameter[]]:
constant[Get the next 200 tournaments from pokerstars.]
variable[schedule_page] assign[=] call[name[requests].get, parameter[name[TOURNAMENTS_XML_URL]]]
variable[root] assign[=] call[name[etree].XML, parameter[name[schedule_page].content]]
for taget[name[tour]] in starred[call[name[root].iter, parameter[constant[{*}tournament]]]] begin[:]
<ast.Yield object at 0x7da1b16bda50> | keyword[def] identifier[get_current_tournaments] ():
literal[string]
identifier[schedule_page] = identifier[requests] . identifier[get] ( identifier[TOURNAMENTS_XML_URL] )
identifier[root] = identifier[etree] . identifier[XML] ( identifier[schedule_page] . identifier[content] )
keyword[for] identifier[tour] keyword[in] identifier[root] . identifier[iter] ( literal[string] ):
keyword[yield] identifier[_Tournament] (
identifier[start_date] = identifier[tour] . identifier[findtext] ( literal[string] ),
identifier[name] = identifier[tour] . identifier[findtext] ( literal[string] ),
identifier[game] = identifier[tour] . identifier[findtext] ( literal[string] ),
identifier[buyin] = identifier[tour] . identifier[findtext] ( literal[string] ),
identifier[players] = identifier[tour] . identifier[get] ( literal[string] )
) | def get_current_tournaments():
"""Get the next 200 tournaments from pokerstars."""
schedule_page = requests.get(TOURNAMENTS_XML_URL)
root = etree.XML(schedule_page.content)
for tour in root.iter('{*}tournament'):
yield _Tournament(start_date=tour.findtext('{*}start_date'), name=tour.findtext('{*}name'), game=tour.findtext('{*}game'), buyin=tour.findtext('{*}buy_in_fee'), players=tour.get('players')) # depends on [control=['for'], data=['tour']] |
def do_intersect(bb1, bb2):
"""
Helper function that returns True if two bounding boxes overlap.
"""
if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]:
return False
if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]:
return False
return True | def function[do_intersect, parameter[bb1, bb2]]:
constant[
Helper function that returns True if two bounding boxes overlap.
]
if <ast.BoolOp object at 0x7da1b13b6080> begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da1b13442b0> begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[do_intersect] ( identifier[bb1] , identifier[bb2] ):
literal[string]
keyword[if] identifier[bb1] [ literal[int] ]+ identifier[bb1] [ literal[int] ]< identifier[bb2] [ literal[int] ] keyword[or] identifier[bb2] [ literal[int] ]+ identifier[bb2] [ literal[int] ]< identifier[bb1] [ literal[int] ]:
keyword[return] keyword[False]
keyword[if] identifier[bb1] [ literal[int] ]+ identifier[bb1] [ literal[int] ]< identifier[bb2] [ literal[int] ] keyword[or] identifier[bb2] [ literal[int] ]+ identifier[bb2] [ literal[int] ]< identifier[bb1] [ literal[int] ]:
keyword[return] keyword[False]
keyword[return] keyword[True] | def do_intersect(bb1, bb2):
"""
Helper function that returns True if two bounding boxes overlap.
"""
if bb1[0] + bb1[2] < bb2[0] or bb2[0] + bb2[2] < bb1[0]:
return False # depends on [control=['if'], data=[]]
if bb1[1] + bb1[3] < bb2[1] or bb2[1] + bb2[3] < bb1[1]:
return False # depends on [control=['if'], data=[]]
return True |
def mime_type(instance):
"""Ensure the 'mime_type' property of file objects comes from the Template
column in the IANA media type registry.
"""
mime_pattern = re.compile(r'^(application|audio|font|image|message|model'
'|multipart|text|video)/[a-zA-Z0-9.+_-]+')
for key, obj in instance['objects'].items():
if ('type' in obj and obj['type'] == 'file' and 'mime_type' in obj):
if enums.media_types():
if obj['mime_type'] not in enums.media_types():
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') should be an IANA registered MIME "
"Type of the form 'type/subtype'."
% (key, obj['mime_type']), instance['id'],
'mime-type')
else:
info("Can't reach IANA website; using regex for mime types.")
if not mime_pattern.match(obj['mime_type']):
yield JSONError("The 'mime_type' property of object '%s' "
"('%s') should be an IANA MIME Type of the"
" form 'type/subtype'."
% (key, obj['mime_type']), instance['id'],
'mime-type') | def function[mime_type, parameter[instance]]:
constant[Ensure the 'mime_type' property of file objects comes from the Template
column in the IANA media type registry.
]
variable[mime_pattern] assign[=] call[name[re].compile, parameter[constant[^(application|audio|font|image|message|model|multipart|text|video)/[a-zA-Z0-9.+_-]+]]]
for taget[tuple[[<ast.Name object at 0x7da1b10a4a00>, <ast.Name object at 0x7da1b10a5120>]]] in starred[call[call[name[instance]][constant[objects]].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b10a6c50> begin[:]
if call[name[enums].media_types, parameter[]] begin[:]
if compare[call[name[obj]][constant[mime_type]] <ast.NotIn object at 0x7da2590d7190> call[name[enums].media_types, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b1041240> | keyword[def] identifier[mime_type] ( identifier[instance] ):
literal[string]
identifier[mime_pattern] = identifier[re] . identifier[compile] ( literal[string]
literal[string] )
keyword[for] identifier[key] , identifier[obj] keyword[in] identifier[instance] [ literal[string] ]. identifier[items] ():
keyword[if] ( literal[string] keyword[in] identifier[obj] keyword[and] identifier[obj] [ literal[string] ]== literal[string] keyword[and] literal[string] keyword[in] identifier[obj] ):
keyword[if] identifier[enums] . identifier[media_types] ():
keyword[if] identifier[obj] [ literal[string] ] keyword[not] keyword[in] identifier[enums] . identifier[media_types] ():
keyword[yield] identifier[JSONError] ( literal[string]
literal[string]
literal[string]
%( identifier[key] , identifier[obj] [ literal[string] ]), identifier[instance] [ literal[string] ],
literal[string] )
keyword[else] :
identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[mime_pattern] . identifier[match] ( identifier[obj] [ literal[string] ]):
keyword[yield] identifier[JSONError] ( literal[string]
literal[string]
literal[string]
%( identifier[key] , identifier[obj] [ literal[string] ]), identifier[instance] [ literal[string] ],
literal[string] ) | def mime_type(instance):
"""Ensure the 'mime_type' property of file objects comes from the Template
column in the IANA media type registry.
"""
mime_pattern = re.compile('^(application|audio|font|image|message|model|multipart|text|video)/[a-zA-Z0-9.+_-]+')
for (key, obj) in instance['objects'].items():
if 'type' in obj and obj['type'] == 'file' and ('mime_type' in obj):
if enums.media_types():
if obj['mime_type'] not in enums.media_types():
yield JSONError("The 'mime_type' property of object '%s' ('%s') should be an IANA registered MIME Type of the form 'type/subtype'." % (key, obj['mime_type']), instance['id'], 'mime-type') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
info("Can't reach IANA website; using regex for mime types.")
if not mime_pattern.match(obj['mime_type']):
yield JSONError("The 'mime_type' property of object '%s' ('%s') should be an IANA MIME Type of the form 'type/subtype'." % (key, obj['mime_type']), instance['id'], 'mime-type') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def _get_updated_rows(self, auth, function):
""" Get rows updated by last update query
* `function` [function]
Function to use for searching (one of the search_* functions).
Helper function used to fetch all rows which was updated by the
latest UPDATE ... RETURNING id query.
"""
# Get dicts for all rows which were edited by building a query for
# search_*. Each row returned from UPDATE ... RETURNING id gives us one
# query part (qp) which then are combined to one big query for the
# search_* API call.
qps = []
for row in self._curs_pg:
qps.append(
{
'operator': 'equals',
'val1': 'id',
'val2': row['id']
}
)
# if we didn't update anything return empty list
if len(qps) == 0:
return []
# fetch list of objects based on IDs
q = qps[0]
for qp in qps[1:]:
q = {
'operator': 'or',
'val1': q,
'val2': qp
}
updated = function(auth, q, { 'max_result': 10000 })['result']
return updated | def function[_get_updated_rows, parameter[self, auth, function]]:
constant[ Get rows updated by last update query
* `function` [function]
Function to use for searching (one of the search_* functions).
Helper function used to fetch all rows which was updated by the
latest UPDATE ... RETURNING id query.
]
variable[qps] assign[=] list[[]]
for taget[name[row]] in starred[name[self]._curs_pg] begin[:]
call[name[qps].append, parameter[dictionary[[<ast.Constant object at 0x7da20e9b2ec0>, <ast.Constant object at 0x7da20e9b0400>, <ast.Constant object at 0x7da20e9b2380>], [<ast.Constant object at 0x7da20e9b0160>, <ast.Constant object at 0x7da20e9b1ae0>, <ast.Subscript object at 0x7da20e9b0370>]]]]
if compare[call[name[len], parameter[name[qps]]] equal[==] constant[0]] begin[:]
return[list[[]]]
variable[q] assign[=] call[name[qps]][constant[0]]
for taget[name[qp]] in starred[call[name[qps]][<ast.Slice object at 0x7da20e9b0820>]] begin[:]
variable[q] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b3eb0>, <ast.Constant object at 0x7da20e9b05b0>, <ast.Constant object at 0x7da20e9b0100>], [<ast.Constant object at 0x7da20e9b1ff0>, <ast.Name object at 0x7da20e9b0a00>, <ast.Name object at 0x7da20e9b1750>]]
variable[updated] assign[=] call[call[name[function], parameter[name[auth], name[q], dictionary[[<ast.Constant object at 0x7da20e9b2fb0>], [<ast.Constant object at 0x7da20e9b0550>]]]]][constant[result]]
return[name[updated]] | keyword[def] identifier[_get_updated_rows] ( identifier[self] , identifier[auth] , identifier[function] ):
literal[string]
identifier[qps] =[]
keyword[for] identifier[row] keyword[in] identifier[self] . identifier[_curs_pg] :
identifier[qps] . identifier[append] (
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[row] [ literal[string] ]
}
)
keyword[if] identifier[len] ( identifier[qps] )== literal[int] :
keyword[return] []
identifier[q] = identifier[qps] [ literal[int] ]
keyword[for] identifier[qp] keyword[in] identifier[qps] [ literal[int] :]:
identifier[q] ={
literal[string] : literal[string] ,
literal[string] : identifier[q] ,
literal[string] : identifier[qp]
}
identifier[updated] = identifier[function] ( identifier[auth] , identifier[q] ,{ literal[string] : literal[int] })[ literal[string] ]
keyword[return] identifier[updated] | def _get_updated_rows(self, auth, function):
""" Get rows updated by last update query
* `function` [function]
Function to use for searching (one of the search_* functions).
Helper function used to fetch all rows which was updated by the
latest UPDATE ... RETURNING id query.
"""
# Get dicts for all rows which were edited by building a query for
# search_*. Each row returned from UPDATE ... RETURNING id gives us one
# query part (qp) which then are combined to one big query for the
# search_* API call.
qps = []
for row in self._curs_pg:
qps.append({'operator': 'equals', 'val1': 'id', 'val2': row['id']}) # depends on [control=['for'], data=['row']]
# if we didn't update anything return empty list
if len(qps) == 0:
return [] # depends on [control=['if'], data=[]]
# fetch list of objects based on IDs
q = qps[0]
for qp in qps[1:]:
q = {'operator': 'or', 'val1': q, 'val2': qp} # depends on [control=['for'], data=['qp']]
updated = function(auth, q, {'max_result': 10000})['result']
return updated |
def t_binaryValue(t):
r'[+-]?[0-9]+[bB]'
# We must match [0-9], and then check the validity of the binary number.
# If we match [0-1], the invalid binary number "2b" would match
# 'decimalValue' 2 and 'IDENTIFIER 'b'.
if re.search(r'[2-9]', t.value) is not None:
msg = _format("Invalid binary number {0!A}", t.value)
t.lexer.last_msg = msg
t.type = 'error'
# Setting error causes the value to be automatically skipped
else:
t.value = int(t.value[0:-1], 2)
return t | def function[t_binaryValue, parameter[t]]:
constant[[+-]?[0-9]+[bB]]
if compare[call[name[re].search, parameter[constant[[2-9]], name[t].value]] is_not constant[None]] begin[:]
variable[msg] assign[=] call[name[_format], parameter[constant[Invalid binary number {0!A}], name[t].value]]
name[t].lexer.last_msg assign[=] name[msg]
name[t].type assign[=] constant[error]
return[name[t]] | keyword[def] identifier[t_binaryValue] ( identifier[t] ):
literal[string]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[t] . identifier[value] ) keyword[is] keyword[not] keyword[None] :
identifier[msg] = identifier[_format] ( literal[string] , identifier[t] . identifier[value] )
identifier[t] . identifier[lexer] . identifier[last_msg] = identifier[msg]
identifier[t] . identifier[type] = literal[string]
keyword[else] :
identifier[t] . identifier[value] = identifier[int] ( identifier[t] . identifier[value] [ literal[int] :- literal[int] ], literal[int] )
keyword[return] identifier[t] | def t_binaryValue(t):
"""[+-]?[0-9]+[bB]"""
# We must match [0-9], and then check the validity of the binary number.
# If we match [0-1], the invalid binary number "2b" would match
# 'decimalValue' 2 and 'IDENTIFIER 'b'.
if re.search('[2-9]', t.value) is not None:
msg = _format('Invalid binary number {0!A}', t.value)
t.lexer.last_msg = msg
t.type = 'error' # depends on [control=['if'], data=[]]
else:
# Setting error causes the value to be automatically skipped
t.value = int(t.value[0:-1], 2)
return t |
def setAsApplication(myappid):
"""
Tells Windows this is an independent application with an unique icon on task bar.
id is an unique string to identify this application, like: 'mycompany.myproduct.subproduct.version'
"""
if os.name == 'nt':
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) | def function[setAsApplication, parameter[myappid]]:
constant[
Tells Windows this is an independent application with an unique icon on task bar.
id is an unique string to identify this application, like: 'mycompany.myproduct.subproduct.version'
]
if compare[name[os].name equal[==] constant[nt]] begin[:]
import module[ctypes]
call[name[ctypes].windll.shell32.SetCurrentProcessExplicitAppUserModelID, parameter[name[myappid]]] | keyword[def] identifier[setAsApplication] ( identifier[myappid] ):
literal[string]
keyword[if] identifier[os] . identifier[name] == literal[string] :
keyword[import] identifier[ctypes]
identifier[ctypes] . identifier[windll] . identifier[shell32] . identifier[SetCurrentProcessExplicitAppUserModelID] ( identifier[myappid] ) | def setAsApplication(myappid):
"""
Tells Windows this is an independent application with an unique icon on task bar.
id is an unique string to identify this application, like: 'mycompany.myproduct.subproduct.version'
"""
if os.name == 'nt':
import ctypes
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) # depends on [control=['if'], data=[]] |
def _sel_to_text(self, cell_range):
"""Copy an array portion to a unicode string"""
if not cell_range:
return
row_min, row_max, col_min, col_max = get_idx_rect(cell_range)
if col_min == 0 and col_max == (self.model().cols_loaded-1):
# we've selected a whole column. It isn't possible to
# select only the first part of a column without loading more,
# so we can treat it as intentional and copy the whole thing
col_max = self.model().total_cols-1
if row_min == 0 and row_max == (self.model().rows_loaded-1):
row_max = self.model().total_rows-1
_data = self.model().get_data()
if PY3:
output = io.BytesIO()
else:
output = io.StringIO()
try:
np.savetxt(output, _data[row_min:row_max+1, col_min:col_max+1],
delimiter='\t', fmt=self.model().get_format())
except:
QMessageBox.warning(self, _("Warning"),
_("It was not possible to copy values for "
"this array"))
return
contents = output.getvalue().decode('utf-8')
output.close()
return contents | def function[_sel_to_text, parameter[self, cell_range]]:
constant[Copy an array portion to a unicode string]
if <ast.UnaryOp object at 0x7da2043479a0> begin[:]
return[None]
<ast.Tuple object at 0x7da2043444f0> assign[=] call[name[get_idx_rect], parameter[name[cell_range]]]
if <ast.BoolOp object at 0x7da204344ac0> begin[:]
variable[col_max] assign[=] binary_operation[call[name[self].model, parameter[]].total_cols - constant[1]]
if <ast.BoolOp object at 0x7da204344a90> begin[:]
variable[row_max] assign[=] binary_operation[call[name[self].model, parameter[]].total_rows - constant[1]]
variable[_data] assign[=] call[call[name[self].model, parameter[]].get_data, parameter[]]
if name[PY3] begin[:]
variable[output] assign[=] call[name[io].BytesIO, parameter[]]
<ast.Try object at 0x7da2043476a0>
variable[contents] assign[=] call[call[name[output].getvalue, parameter[]].decode, parameter[constant[utf-8]]]
call[name[output].close, parameter[]]
return[name[contents]] | keyword[def] identifier[_sel_to_text] ( identifier[self] , identifier[cell_range] ):
literal[string]
keyword[if] keyword[not] identifier[cell_range] :
keyword[return]
identifier[row_min] , identifier[row_max] , identifier[col_min] , identifier[col_max] = identifier[get_idx_rect] ( identifier[cell_range] )
keyword[if] identifier[col_min] == literal[int] keyword[and] identifier[col_max] ==( identifier[self] . identifier[model] (). identifier[cols_loaded] - literal[int] ):
identifier[col_max] = identifier[self] . identifier[model] (). identifier[total_cols] - literal[int]
keyword[if] identifier[row_min] == literal[int] keyword[and] identifier[row_max] ==( identifier[self] . identifier[model] (). identifier[rows_loaded] - literal[int] ):
identifier[row_max] = identifier[self] . identifier[model] (). identifier[total_rows] - literal[int]
identifier[_data] = identifier[self] . identifier[model] (). identifier[get_data] ()
keyword[if] identifier[PY3] :
identifier[output] = identifier[io] . identifier[BytesIO] ()
keyword[else] :
identifier[output] = identifier[io] . identifier[StringIO] ()
keyword[try] :
identifier[np] . identifier[savetxt] ( identifier[output] , identifier[_data] [ identifier[row_min] : identifier[row_max] + literal[int] , identifier[col_min] : identifier[col_max] + literal[int] ],
identifier[delimiter] = literal[string] , identifier[fmt] = identifier[self] . identifier[model] (). identifier[get_format] ())
keyword[except] :
identifier[QMessageBox] . identifier[warning] ( identifier[self] , identifier[_] ( literal[string] ),
identifier[_] ( literal[string]
literal[string] ))
keyword[return]
identifier[contents] = identifier[output] . identifier[getvalue] (). identifier[decode] ( literal[string] )
identifier[output] . identifier[close] ()
keyword[return] identifier[contents] | def _sel_to_text(self, cell_range):
"""Copy an array portion to a unicode string"""
if not cell_range:
return # depends on [control=['if'], data=[]]
(row_min, row_max, col_min, col_max) = get_idx_rect(cell_range)
if col_min == 0 and col_max == self.model().cols_loaded - 1: # we've selected a whole column. It isn't possible to
# select only the first part of a column without loading more,
# so we can treat it as intentional and copy the whole thing
col_max = self.model().total_cols - 1 # depends on [control=['if'], data=[]]
if row_min == 0 and row_max == self.model().rows_loaded - 1:
row_max = self.model().total_rows - 1 # depends on [control=['if'], data=[]]
_data = self.model().get_data()
if PY3:
output = io.BytesIO() # depends on [control=['if'], data=[]]
else:
output = io.StringIO()
try:
np.savetxt(output, _data[row_min:row_max + 1, col_min:col_max + 1], delimiter='\t', fmt=self.model().get_format()) # depends on [control=['try'], data=[]]
except:
QMessageBox.warning(self, _('Warning'), _('It was not possible to copy values for this array'))
return # depends on [control=['except'], data=[]]
contents = output.getvalue().decode('utf-8')
output.close()
return contents |
def run(self, **kwargs):
"""
Run an IDF file with a given EnergyPlus weather file. This is a
wrapper for the EnergyPlus command line interface.
Parameters
----------
**kwargs
See eppy.runner.functions.run()
"""
# write the IDF to the current directory
self.saveas('in.idf')
# if `idd` is not passed explicitly, use the IDF.iddname
idd = kwargs.pop('idd', self.iddname)
epw = kwargs.pop('weather', self.epw)
try:
run(self, weather=epw, idd=idd, **kwargs)
finally:
os.remove('in.idf') | def function[run, parameter[self]]:
constant[
Run an IDF file with a given EnergyPlus weather file. This is a
wrapper for the EnergyPlus command line interface.
Parameters
----------
**kwargs
See eppy.runner.functions.run()
]
call[name[self].saveas, parameter[constant[in.idf]]]
variable[idd] assign[=] call[name[kwargs].pop, parameter[constant[idd], name[self].iddname]]
variable[epw] assign[=] call[name[kwargs].pop, parameter[constant[weather], name[self].epw]]
<ast.Try object at 0x7da1b11ed210> | keyword[def] identifier[run] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[saveas] ( literal[string] )
identifier[idd] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[iddname] )
identifier[epw] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[epw] )
keyword[try] :
identifier[run] ( identifier[self] , identifier[weather] = identifier[epw] , identifier[idd] = identifier[idd] ,** identifier[kwargs] )
keyword[finally] :
identifier[os] . identifier[remove] ( literal[string] ) | def run(self, **kwargs):
"""
Run an IDF file with a given EnergyPlus weather file. This is a
wrapper for the EnergyPlus command line interface.
Parameters
----------
**kwargs
See eppy.runner.functions.run()
"""
# write the IDF to the current directory
self.saveas('in.idf')
# if `idd` is not passed explicitly, use the IDF.iddname
idd = kwargs.pop('idd', self.iddname)
epw = kwargs.pop('weather', self.epw)
try:
run(self, weather=epw, idd=idd, **kwargs) # depends on [control=['try'], data=[]]
finally:
os.remove('in.idf') |
def as_sql(self, compiler, connection): # pylint: disable=arguments-differ
"""Compile SQL for this function."""
sql, params = super().as_sql(compiler, connection)
params.append(self.path)
return sql, params | def function[as_sql, parameter[self, compiler, connection]]:
constant[Compile SQL for this function.]
<ast.Tuple object at 0x7da1b19b4c40> assign[=] call[call[name[super], parameter[]].as_sql, parameter[name[compiler], name[connection]]]
call[name[params].append, parameter[name[self].path]]
return[tuple[[<ast.Name object at 0x7da1b19b5330>, <ast.Name object at 0x7da1b19b74c0>]]] | keyword[def] identifier[as_sql] ( identifier[self] , identifier[compiler] , identifier[connection] ):
literal[string]
identifier[sql] , identifier[params] = identifier[super] (). identifier[as_sql] ( identifier[compiler] , identifier[connection] )
identifier[params] . identifier[append] ( identifier[self] . identifier[path] )
keyword[return] identifier[sql] , identifier[params] | def as_sql(self, compiler, connection): # pylint: disable=arguments-differ
'Compile SQL for this function.'
(sql, params) = super().as_sql(compiler, connection)
params.append(self.path)
return (sql, params) |
def tri_ttr(k, a):
"""
Custom TTR function.
Triangle distribution does not have an analytical TTR function, but because
of its non-smooth nature, a blind integration scheme will converge very
slowly. However, by splitting the integration into two divided at the
discontinuity in the derivative, TTR can be made operative.
"""
from ...quad import quad_clenshaw_curtis
q1, w1 = quad_clenshaw_curtis(int(10**3*a), 0, a)
q2, w2 = quad_clenshaw_curtis(int(10**3*(1-a)), a, 1)
q = numpy.concatenate([q1,q2], 1)
w = numpy.concatenate([w1,w2])
w = w*numpy.where(q<a, 2*q/a, 2*(1-q)/(1-a))
from chaospy.poly import variable
x = variable()
orth = [x*0, x**0]
inner = numpy.sum(q*w, -1)
norms = [1., 1.]
A,B = [],[]
for n in range(k):
A.append(inner/norms[-1])
B.append(norms[-1]/norms[-2])
orth.append((x-A[-1])*orth[-1]-orth[-2]*B[-1])
y = orth[-1](*q)**2*w
inner = numpy.sum(q*y, -1)
norms.append(numpy.sum(y, -1))
A, B = numpy.array(A).T[0], numpy.array(B).T
return A[-1], B[-1] | def function[tri_ttr, parameter[k, a]]:
constant[
Custom TTR function.
Triangle distribution does not have an analytical TTR function, but because
of its non-smooth nature, a blind integration scheme will converge very
slowly. However, by splitting the integration into two divided at the
discontinuity in the derivative, TTR can be made operative.
]
from relative_module[quad] import module[quad_clenshaw_curtis]
<ast.Tuple object at 0x7da204565c90> assign[=] call[name[quad_clenshaw_curtis], parameter[call[name[int], parameter[binary_operation[binary_operation[constant[10] ** constant[3]] * name[a]]]], constant[0], name[a]]]
<ast.Tuple object at 0x7da204566e90> assign[=] call[name[quad_clenshaw_curtis], parameter[call[name[int], parameter[binary_operation[binary_operation[constant[10] ** constant[3]] * binary_operation[constant[1] - name[a]]]]], name[a], constant[1]]]
variable[q] assign[=] call[name[numpy].concatenate, parameter[list[[<ast.Name object at 0x7da2054a64d0>, <ast.Name object at 0x7da2054a7bb0>]], constant[1]]]
variable[w] assign[=] call[name[numpy].concatenate, parameter[list[[<ast.Name object at 0x7da2054a59f0>, <ast.Name object at 0x7da2054a7370>]]]]
variable[w] assign[=] binary_operation[name[w] * call[name[numpy].where, parameter[compare[name[q] less[<] name[a]], binary_operation[binary_operation[constant[2] * name[q]] / name[a]], binary_operation[binary_operation[constant[2] * binary_operation[constant[1] - name[q]]] / binary_operation[constant[1] - name[a]]]]]]
from relative_module[chaospy.poly] import module[variable]
variable[x] assign[=] call[name[variable], parameter[]]
variable[orth] assign[=] list[[<ast.BinOp object at 0x7da2054a7160>, <ast.BinOp object at 0x7da2054a51e0>]]
variable[inner] assign[=] call[name[numpy].sum, parameter[binary_operation[name[q] * name[w]], <ast.UnaryOp object at 0x7da18f58d090>]]
variable[norms] assign[=] list[[<ast.Constant object at 0x7da18f58ec50>, <ast.Constant object at 0x7da18f58ca00>]]
<ast.Tuple object at 0x7da18f58e530> assign[=] tuple[[<ast.List object at 0x7da2044c2050>, <ast.List object at 0x7da2044c2f20>]]
for taget[name[n]] in starred[call[name[range], parameter[name[k]]]] begin[:]
call[name[A].append, parameter[binary_operation[name[inner] / call[name[norms]][<ast.UnaryOp object at 0x7da2044c3eb0>]]]]
call[name[B].append, parameter[binary_operation[call[name[norms]][<ast.UnaryOp object at 0x7da2044c1d80>] / call[name[norms]][<ast.UnaryOp object at 0x7da2044c1510>]]]]
call[name[orth].append, parameter[binary_operation[binary_operation[binary_operation[name[x] - call[name[A]][<ast.UnaryOp object at 0x7da2044c38b0>]] * call[name[orth]][<ast.UnaryOp object at 0x7da2044c2890>]] - binary_operation[call[name[orth]][<ast.UnaryOp object at 0x7da2044c0f10>] * call[name[B]][<ast.UnaryOp object at 0x7da2044c3670>]]]]]
variable[y] assign[=] binary_operation[binary_operation[call[call[name[orth]][<ast.UnaryOp object at 0x7da2044c14b0>], parameter[<ast.Starred object at 0x7da2044c05e0>]] ** constant[2]] * name[w]]
variable[inner] assign[=] call[name[numpy].sum, parameter[binary_operation[name[q] * name[y]], <ast.UnaryOp object at 0x7da2044c14e0>]]
call[name[norms].append, parameter[call[name[numpy].sum, parameter[name[y], <ast.UnaryOp object at 0x7da2044c35b0>]]]]
<ast.Tuple object at 0x7da2044c3df0> assign[=] tuple[[<ast.Subscript object at 0x7da2044c21d0>, <ast.Attribute object at 0x7da2044c1ae0>]]
return[tuple[[<ast.Subscript object at 0x7da2044c2ad0>, <ast.Subscript object at 0x7da2044c3700>]]] | keyword[def] identifier[tri_ttr] ( identifier[k] , identifier[a] ):
literal[string]
keyword[from] ... identifier[quad] keyword[import] identifier[quad_clenshaw_curtis]
identifier[q1] , identifier[w1] = identifier[quad_clenshaw_curtis] ( identifier[int] ( literal[int] ** literal[int] * identifier[a] ), literal[int] , identifier[a] )
identifier[q2] , identifier[w2] = identifier[quad_clenshaw_curtis] ( identifier[int] ( literal[int] ** literal[int] *( literal[int] - identifier[a] )), identifier[a] , literal[int] )
identifier[q] = identifier[numpy] . identifier[concatenate] ([ identifier[q1] , identifier[q2] ], literal[int] )
identifier[w] = identifier[numpy] . identifier[concatenate] ([ identifier[w1] , identifier[w2] ])
identifier[w] = identifier[w] * identifier[numpy] . identifier[where] ( identifier[q] < identifier[a] , literal[int] * identifier[q] / identifier[a] , literal[int] *( literal[int] - identifier[q] )/( literal[int] - identifier[a] ))
keyword[from] identifier[chaospy] . identifier[poly] keyword[import] identifier[variable]
identifier[x] = identifier[variable] ()
identifier[orth] =[ identifier[x] * literal[int] , identifier[x] ** literal[int] ]
identifier[inner] = identifier[numpy] . identifier[sum] ( identifier[q] * identifier[w] ,- literal[int] )
identifier[norms] =[ literal[int] , literal[int] ]
identifier[A] , identifier[B] =[],[]
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[k] ):
identifier[A] . identifier[append] ( identifier[inner] / identifier[norms] [- literal[int] ])
identifier[B] . identifier[append] ( identifier[norms] [- literal[int] ]/ identifier[norms] [- literal[int] ])
identifier[orth] . identifier[append] (( identifier[x] - identifier[A] [- literal[int] ])* identifier[orth] [- literal[int] ]- identifier[orth] [- literal[int] ]* identifier[B] [- literal[int] ])
identifier[y] = identifier[orth] [- literal[int] ](* identifier[q] )** literal[int] * identifier[w]
identifier[inner] = identifier[numpy] . identifier[sum] ( identifier[q] * identifier[y] ,- literal[int] )
identifier[norms] . identifier[append] ( identifier[numpy] . identifier[sum] ( identifier[y] ,- literal[int] ))
identifier[A] , identifier[B] = identifier[numpy] . identifier[array] ( identifier[A] ). identifier[T] [ literal[int] ], identifier[numpy] . identifier[array] ( identifier[B] ). identifier[T]
keyword[return] identifier[A] [- literal[int] ], identifier[B] [- literal[int] ] | def tri_ttr(k, a):
"""
Custom TTR function.
Triangle distribution does not have an analytical TTR function, but because
of its non-smooth nature, a blind integration scheme will converge very
slowly. However, by splitting the integration into two divided at the
discontinuity in the derivative, TTR can be made operative.
"""
from ...quad import quad_clenshaw_curtis
(q1, w1) = quad_clenshaw_curtis(int(10 ** 3 * a), 0, a)
(q2, w2) = quad_clenshaw_curtis(int(10 ** 3 * (1 - a)), a, 1)
q = numpy.concatenate([q1, q2], 1)
w = numpy.concatenate([w1, w2])
w = w * numpy.where(q < a, 2 * q / a, 2 * (1 - q) / (1 - a))
from chaospy.poly import variable
x = variable()
orth = [x * 0, x ** 0]
inner = numpy.sum(q * w, -1)
norms = [1.0, 1.0]
(A, B) = ([], [])
for n in range(k):
A.append(inner / norms[-1])
B.append(norms[-1] / norms[-2])
orth.append((x - A[-1]) * orth[-1] - orth[-2] * B[-1])
y = orth[-1](*q) ** 2 * w
inner = numpy.sum(q * y, -1)
norms.append(numpy.sum(y, -1)) # depends on [control=['for'], data=[]]
(A, B) = (numpy.array(A).T[0], numpy.array(B).T)
return (A[-1], B[-1]) |
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear() | def function[clear_caches, parameter[]]:
constant[Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
]
from relative_module[jinja2.environment] import module[_spontaneous_environments]
from relative_module[jinja2.lexer] import module[_lexer_cache]
call[name[_spontaneous_environments].clear, parameter[]]
call[name[_lexer_cache].clear, parameter[]] | keyword[def] identifier[clear_caches] ():
literal[string]
keyword[from] identifier[jinja2] . identifier[environment] keyword[import] identifier[_spontaneous_environments]
keyword[from] identifier[jinja2] . identifier[lexer] keyword[import] identifier[_lexer_cache]
identifier[_spontaneous_environments] . identifier[clear] ()
identifier[_lexer_cache] . identifier[clear] () | def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear() |
def add_namespace(self, namespace, interface=None, check_extras=True,
load_now=False):
"""
register given namespace in global database of plugins
in case it's already registered, return the registration
"""
tempo = self._namespaces.get(namespace)
if tempo is None:
tempo = _Plugins(namespace, interface, check_extras)
self._namespaces[namespace] = tempo
if load_now:
tempo.load()
return tempo | def function[add_namespace, parameter[self, namespace, interface, check_extras, load_now]]:
constant[
register given namespace in global database of plugins
in case it's already registered, return the registration
]
variable[tempo] assign[=] call[name[self]._namespaces.get, parameter[name[namespace]]]
if compare[name[tempo] is constant[None]] begin[:]
variable[tempo] assign[=] call[name[_Plugins], parameter[name[namespace], name[interface], name[check_extras]]]
call[name[self]._namespaces][name[namespace]] assign[=] name[tempo]
if name[load_now] begin[:]
call[name[tempo].load, parameter[]]
return[name[tempo]] | keyword[def] identifier[add_namespace] ( identifier[self] , identifier[namespace] , identifier[interface] = keyword[None] , identifier[check_extras] = keyword[True] ,
identifier[load_now] = keyword[False] ):
literal[string]
identifier[tempo] = identifier[self] . identifier[_namespaces] . identifier[get] ( identifier[namespace] )
keyword[if] identifier[tempo] keyword[is] keyword[None] :
identifier[tempo] = identifier[_Plugins] ( identifier[namespace] , identifier[interface] , identifier[check_extras] )
identifier[self] . identifier[_namespaces] [ identifier[namespace] ]= identifier[tempo]
keyword[if] identifier[load_now] :
identifier[tempo] . identifier[load] ()
keyword[return] identifier[tempo] | def add_namespace(self, namespace, interface=None, check_extras=True, load_now=False):
"""
register given namespace in global database of plugins
in case it's already registered, return the registration
"""
tempo = self._namespaces.get(namespace)
if tempo is None:
tempo = _Plugins(namespace, interface, check_extras)
self._namespaces[namespace] = tempo # depends on [control=['if'], data=['tempo']]
if load_now:
tempo.load() # depends on [control=['if'], data=[]]
return tempo |
def create_weights(nodes, dist):
"""Create weights for the Laja method."""
poly = chaospy.quad.generate_stieltjes(dist, len(nodes)-1, retall=True)[0]
poly = chaospy.poly.flatten(chaospy.poly.Poly(poly))
weights_inverse = poly(nodes)
weights = numpy.linalg.inv(weights_inverse)
return weights[:, 0] | def function[create_weights, parameter[nodes, dist]]:
constant[Create weights for the Laja method.]
variable[poly] assign[=] call[call[name[chaospy].quad.generate_stieltjes, parameter[name[dist], binary_operation[call[name[len], parameter[name[nodes]]] - constant[1]]]]][constant[0]]
variable[poly] assign[=] call[name[chaospy].poly.flatten, parameter[call[name[chaospy].poly.Poly, parameter[name[poly]]]]]
variable[weights_inverse] assign[=] call[name[poly], parameter[name[nodes]]]
variable[weights] assign[=] call[name[numpy].linalg.inv, parameter[name[weights_inverse]]]
return[call[name[weights]][tuple[[<ast.Slice object at 0x7da18ede61d0>, <ast.Constant object at 0x7da18ede4970>]]]] | keyword[def] identifier[create_weights] ( identifier[nodes] , identifier[dist] ):
literal[string]
identifier[poly] = identifier[chaospy] . identifier[quad] . identifier[generate_stieltjes] ( identifier[dist] , identifier[len] ( identifier[nodes] )- literal[int] , identifier[retall] = keyword[True] )[ literal[int] ]
identifier[poly] = identifier[chaospy] . identifier[poly] . identifier[flatten] ( identifier[chaospy] . identifier[poly] . identifier[Poly] ( identifier[poly] ))
identifier[weights_inverse] = identifier[poly] ( identifier[nodes] )
identifier[weights] = identifier[numpy] . identifier[linalg] . identifier[inv] ( identifier[weights_inverse] )
keyword[return] identifier[weights] [:, literal[int] ] | def create_weights(nodes, dist):
"""Create weights for the Laja method."""
poly = chaospy.quad.generate_stieltjes(dist, len(nodes) - 1, retall=True)[0]
poly = chaospy.poly.flatten(chaospy.poly.Poly(poly))
weights_inverse = poly(nodes)
weights = numpy.linalg.inv(weights_inverse)
return weights[:, 0] |
def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
'''
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
'''
src = Bungiesearch()
if action == 'delete' and not hasattr(model_items, '__iter__'):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.")
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items)
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk')
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step
if refresh:
src.get_es_instance().indices.refresh(index=index_name) | def function[update_index, parameter[model_items, model_name, action, bulk_size, num_docs, start_date, end_date, refresh]]:
constant[
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
]
variable[src] assign[=] call[name[Bungiesearch], parameter[]]
if <ast.BoolOp object at 0x7da1b19902e0> begin[:]
<ast.Raise object at 0x7da1b1990430>
call[name[logger].info, parameter[call[constant[Getting index for model {}.].format, parameter[name[model_name]]]]]
for taget[name[index_name]] in starred[call[name[src].get_index, parameter[name[model_name]]]] begin[:]
variable[index_instance] assign[=] call[name[src].get_model_index, parameter[name[model_name]]]
variable[model] assign[=] call[name[index_instance].get_model, parameter[]]
if compare[name[num_docs] equal[==] <ast.UnaryOp object at 0x7da1b1990400>] begin[:]
if call[name[isinstance], parameter[name[model_items], tuple[[<ast.Name object at 0x7da1b1992d10>, <ast.Name object at 0x7da1b1992d40>]]]] begin[:]
variable[num_docs] assign[=] call[name[len], parameter[name[model_items]]]
call[name[logger].info, parameter[call[constant[{} {} documents on index {}].format, parameter[name[action], name[num_docs], name[index_name]]]]]
variable[prev_step] assign[=] constant[0]
variable[max_docs] assign[=] <ast.IfExp object at 0x7da1b1a1ff10>
for taget[name[next_step]] in starred[call[name[range], parameter[name[bulk_size], name[max_docs], name[bulk_size]]]] begin[:]
call[name[logger].info, parameter[call[constant[{}: documents {} to {} of {} total on index {}.].format, parameter[call[name[action].capitalize, parameter[]], name[prev_step], name[next_step], name[num_docs], name[index_name]]]]]
variable[data] assign[=] call[name[create_indexed_document], parameter[name[index_instance], call[name[model_items]][<ast.Slice object at 0x7da1b1a1f460>], name[action]]]
call[name[bulk_index], parameter[call[name[src].get_es_instance, parameter[]], name[data]]]
variable[prev_step] assign[=] name[next_step]
if name[refresh] begin[:]
call[call[name[src].get_es_instance, parameter[]].indices.refresh, parameter[]] | keyword[def] identifier[update_index] ( identifier[model_items] , identifier[model_name] , identifier[action] = literal[string] , identifier[bulk_size] = literal[int] , identifier[num_docs] =- literal[int] , identifier[start_date] = keyword[None] , identifier[end_date] = keyword[None] , identifier[refresh] = keyword[True] ):
literal[string]
identifier[src] = identifier[Bungiesearch] ()
keyword[if] identifier[action] == literal[string] keyword[and] keyword[not] identifier[hasattr] ( identifier[model_items] , literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[model_name] ))
keyword[for] identifier[index_name] keyword[in] identifier[src] . identifier[get_index] ( identifier[model_name] ):
identifier[index_instance] = identifier[src] . identifier[get_model_index] ( identifier[model_name] )
identifier[model] = identifier[index_instance] . identifier[get_model] ()
keyword[if] identifier[num_docs] ==- literal[int] :
keyword[if] identifier[isinstance] ( identifier[model_items] ,( identifier[list] , identifier[tuple] )):
identifier[num_docs] = identifier[len] ( identifier[model_items] )
keyword[else] :
identifier[model_items] = identifier[filter_model_items] ( identifier[index_instance] , identifier[model_items] , identifier[model_name] , identifier[start_date] , identifier[end_date] )
identifier[num_docs] = identifier[model_items] . identifier[count] ()
keyword[if] keyword[not] identifier[model_items] . identifier[ordered] :
identifier[model_items] = identifier[model_items] . identifier[order_by] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[action] , identifier[num_docs] ))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[action] , identifier[num_docs] , identifier[index_name] ))
identifier[prev_step] = literal[int]
identifier[max_docs] = identifier[num_docs] + identifier[bulk_size] keyword[if] identifier[num_docs] > identifier[bulk_size] keyword[else] identifier[bulk_size] + literal[int]
keyword[for] identifier[next_step] keyword[in] identifier[range] ( identifier[bulk_size] , identifier[max_docs] , identifier[bulk_size] ):
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[action] . identifier[capitalize] (), identifier[prev_step] , identifier[next_step] , identifier[num_docs] , identifier[index_name] ))
identifier[data] = identifier[create_indexed_document] ( identifier[index_instance] , identifier[model_items] [ identifier[prev_step] : identifier[next_step] ], identifier[action] )
identifier[bulk_index] ( identifier[src] . identifier[get_es_instance] (), identifier[data] , identifier[index] = identifier[index_name] , identifier[doc_type] = identifier[model] . identifier[__name__] , identifier[raise_on_error] = keyword[True] )
identifier[prev_step] = identifier[next_step]
keyword[if] identifier[refresh] :
identifier[src] . identifier[get_es_instance] (). identifier[indices] . identifier[refresh] ( identifier[index] = identifier[index_name] ) | def update_index(model_items, model_name, action='index', bulk_size=100, num_docs=-1, start_date=None, end_date=None, refresh=True):
"""
Updates the index for the provided model_items.
:param model_items: a list of model_items (django Model instances, or proxy instances) which are to be indexed/updated or deleted.
If action is 'index', the model_items must be serializable objects. If action is 'delete', the model_items must be primary keys
corresponding to obects in the index.
:param model_name: doctype, which must also be the model name.
:param action: the action that you'd like to perform on this group of data. Must be in ('index', 'delete') and defaults to 'index.'
:param bulk_size: bulk size for indexing. Defaults to 100.
:param num_docs: maximum number of model_items from the provided list to be indexed.
:param start_date: start date for indexing. Must be as YYYY-MM-DD.
:param end_date: end date for indexing. Must be as YYYY-MM-DD.
:param refresh: a boolean that determines whether to refresh the index, making all operations performed since the last refresh
immediately available for search, instead of needing to wait for the scheduled Elasticsearch execution. Defaults to True.
:note: If model_items contain multiple models, then num_docs is applied to *each* model. For example, if bulk_size is set to 5,
and item contains models Article and Article2, then 5 model_items of Article *and* 5 model_items of Article2 will be indexed.
"""
src = Bungiesearch()
if action == 'delete' and (not hasattr(model_items, '__iter__')):
raise ValueError("If action is 'delete', model_items must be an iterable of primary keys.") # depends on [control=['if'], data=[]]
logger.info('Getting index for model {}.'.format(model_name))
for index_name in src.get_index(model_name):
index_instance = src.get_model_index(model_name)
model = index_instance.get_model()
if num_docs == -1:
if isinstance(model_items, (list, tuple)):
num_docs = len(model_items) # depends on [control=['if'], data=[]]
else:
model_items = filter_model_items(index_instance, model_items, model_name, start_date, end_date)
num_docs = model_items.count()
if not model_items.ordered:
model_items = model_items.order_by('pk') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['num_docs']]
else:
logger.warning('Limiting the number of model_items to {} to {}.'.format(action, num_docs))
logger.info('{} {} documents on index {}'.format(action, num_docs, index_name))
prev_step = 0
max_docs = num_docs + bulk_size if num_docs > bulk_size else bulk_size + 1
for next_step in range(bulk_size, max_docs, bulk_size):
logger.info('{}: documents {} to {} of {} total on index {}.'.format(action.capitalize(), prev_step, next_step, num_docs, index_name))
data = create_indexed_document(index_instance, model_items[prev_step:next_step], action)
bulk_index(src.get_es_instance(), data, index=index_name, doc_type=model.__name__, raise_on_error=True)
prev_step = next_step # depends on [control=['for'], data=['next_step']]
if refresh:
src.get_es_instance().indices.refresh(index=index_name) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index_name']] |
def domain_search(auth=None, **kwargs):
'''
Search domains
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_search
salt '*' keystoneng.domain_search name=domain1
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_domains(**kwargs) | def function[domain_search, parameter[auth]]:
constant[
Search domains
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_search
salt '*' keystoneng.domain_search name=domain1
]
variable[cloud] assign[=] call[name[get_operator_cloud], parameter[name[auth]]]
variable[kwargs] assign[=] call[name[_clean_kwargs], parameter[]]
return[call[name[cloud].search_domains, parameter[]]] | keyword[def] identifier[domain_search] ( identifier[auth] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[cloud] = identifier[get_operator_cloud] ( identifier[auth] )
identifier[kwargs] = identifier[_clean_kwargs] (** identifier[kwargs] )
keyword[return] identifier[cloud] . identifier[search_domains] (** identifier[kwargs] ) | def domain_search(auth=None, **kwargs):
"""
Search domains
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_search
salt '*' keystoneng.domain_search name=domain1
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_domains(**kwargs) |
def rotate_and_traslate(cur, alpha, v0):
r"""Rotate and translate a curve."""
if len(cur) > 2 or (type(cur[0][0]) in [list, tuple]):
cur_list = cur[:]
for i in range(len(cur_list)):
curi = cur_list[i]
curi = rotate_and_traslate(curi, alpha, v0)
cur_list[i] = curi
return cur_list
else:
x0, y0 = cur
rot = np.matrix([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
xn = []; yn = []
for i in range(len(x0)):
v = np.matrix([[x0[i]], [y0[i]]])
vi = np.dot(rot, v)
xn += [float(vi[0][0])+v0[0]]; yn += [float(vi[1][0])+v0[1]]
return xn, yn | def function[rotate_and_traslate, parameter[cur, alpha, v0]]:
constant[Rotate and translate a curve.]
if <ast.BoolOp object at 0x7da1b19698a0> begin[:]
variable[cur_list] assign[=] call[name[cur]][<ast.Slice object at 0x7da1b196bf40>]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[cur_list]]]]]] begin[:]
variable[curi] assign[=] call[name[cur_list]][name[i]]
variable[curi] assign[=] call[name[rotate_and_traslate], parameter[name[curi], name[alpha], name[v0]]]
call[name[cur_list]][name[i]] assign[=] name[curi]
return[name[cur_list]] | keyword[def] identifier[rotate_and_traslate] ( identifier[cur] , identifier[alpha] , identifier[v0] ):
literal[string]
keyword[if] identifier[len] ( identifier[cur] )> literal[int] keyword[or] ( identifier[type] ( identifier[cur] [ literal[int] ][ literal[int] ]) keyword[in] [ identifier[list] , identifier[tuple] ]):
identifier[cur_list] = identifier[cur] [:]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[cur_list] )):
identifier[curi] = identifier[cur_list] [ identifier[i] ]
identifier[curi] = identifier[rotate_and_traslate] ( identifier[curi] , identifier[alpha] , identifier[v0] )
identifier[cur_list] [ identifier[i] ]= identifier[curi]
keyword[return] identifier[cur_list]
keyword[else] :
identifier[x0] , identifier[y0] = identifier[cur]
identifier[rot] = identifier[np] . identifier[matrix] ([[ identifier[cos] ( identifier[alpha] ),- identifier[sin] ( identifier[alpha] )],[ identifier[sin] ( identifier[alpha] ), identifier[cos] ( identifier[alpha] )]])
identifier[xn] =[]; identifier[yn] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[x0] )):
identifier[v] = identifier[np] . identifier[matrix] ([[ identifier[x0] [ identifier[i] ]],[ identifier[y0] [ identifier[i] ]]])
identifier[vi] = identifier[np] . identifier[dot] ( identifier[rot] , identifier[v] )
identifier[xn] +=[ identifier[float] ( identifier[vi] [ literal[int] ][ literal[int] ])+ identifier[v0] [ literal[int] ]]; identifier[yn] +=[ identifier[float] ( identifier[vi] [ literal[int] ][ literal[int] ])+ identifier[v0] [ literal[int] ]]
keyword[return] identifier[xn] , identifier[yn] | def rotate_and_traslate(cur, alpha, v0):
"""Rotate and translate a curve."""
if len(cur) > 2 or type(cur[0][0]) in [list, tuple]:
cur_list = cur[:]
for i in range(len(cur_list)):
curi = cur_list[i]
curi = rotate_and_traslate(curi, alpha, v0)
cur_list[i] = curi # depends on [control=['for'], data=['i']]
return cur_list # depends on [control=['if'], data=[]]
else:
(x0, y0) = cur
rot = np.matrix([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
xn = []
yn = []
for i in range(len(x0)):
v = np.matrix([[x0[i]], [y0[i]]])
vi = np.dot(rot, v)
xn += [float(vi[0][0]) + v0[0]]
yn += [float(vi[1][0]) + v0[1]] # depends on [control=['for'], data=['i']]
return (xn, yn) |
def list_market_profit_and_loss(self, market_ids, include_settled_bets=None, include_bsp_bets=None,
net_of_commission=None, session=None, lightweight=None):
"""
Retrieve profit and loss for a given list of OPEN markets.
:param list market_ids: List of markets to calculate profit and loss
:param bool include_settled_bets: Option to include settled bets (partially settled markets only)
:param bool include_bsp_bets: Option to include BSP bets
:param bool net_of_commission: Option to return profit and loss net of users current commission
rate for this market including any special tariffs
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketProfitLoss]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listMarketProfitAndLoss')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketProfitLoss, elapsed_time, lightweight) | def function[list_market_profit_and_loss, parameter[self, market_ids, include_settled_bets, include_bsp_bets, net_of_commission, session, lightweight]]:
constant[
Retrieve profit and loss for a given list of OPEN markets.
:param list market_ids: List of markets to calculate profit and loss
:param bool include_settled_bets: Option to include settled bets (partially settled markets only)
:param bool include_bsp_bets: Option to include BSP bets
:param bool net_of_commission: Option to return profit and loss net of users current commission
rate for this market including any special tariffs
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketProfitLoss]
]
variable[params] assign[=] call[name[clean_locals], parameter[call[name[locals], parameter[]]]]
variable[method] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b1791de0>, <ast.Constant object at 0x7da1b17900a0>]]]
<ast.Tuple object at 0x7da1b1791750> assign[=] call[name[self].request, parameter[name[method], name[params], name[session]]]
return[call[name[self].process_response, parameter[name[response], name[resources].MarketProfitLoss, name[elapsed_time], name[lightweight]]]] | keyword[def] identifier[list_market_profit_and_loss] ( identifier[self] , identifier[market_ids] , identifier[include_settled_bets] = keyword[None] , identifier[include_bsp_bets] = keyword[None] ,
identifier[net_of_commission] = keyword[None] , identifier[session] = keyword[None] , identifier[lightweight] = keyword[None] ):
literal[string]
identifier[params] = identifier[clean_locals] ( identifier[locals] ())
identifier[method] = literal[string] %( identifier[self] . identifier[URI] , literal[string] )
( identifier[response] , identifier[elapsed_time] )= identifier[self] . identifier[request] ( identifier[method] , identifier[params] , identifier[session] )
keyword[return] identifier[self] . identifier[process_response] ( identifier[response] , identifier[resources] . identifier[MarketProfitLoss] , identifier[elapsed_time] , identifier[lightweight] ) | def list_market_profit_and_loss(self, market_ids, include_settled_bets=None, include_bsp_bets=None, net_of_commission=None, session=None, lightweight=None):
"""
Retrieve profit and loss for a given list of OPEN markets.
:param list market_ids: List of markets to calculate profit and loss
:param bool include_settled_bets: Option to include settled bets (partially settled markets only)
:param bool include_bsp_bets: Option to include BSP bets
:param bool net_of_commission: Option to return profit and loss net of users current commission
rate for this market including any special tariffs
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.MarketProfitLoss]
"""
params = clean_locals(locals())
method = '%s%s' % (self.URI, 'listMarketProfitAndLoss')
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.MarketProfitLoss, elapsed_time, lightweight) |
def is_fornyrdhislag(text: str):
"""
Basic check, only the number of lines matters: 8 for fornyrðislag.
>>> text1 = "Hljóðs bið ek allar\\nhelgar kindir,\\nmeiri ok minni\\nmögu Heimdallar;\\nviltu at ek, Valföðr,\\nvel fyr telja\\nforn spjöll fira,\\nþau er fremst of man."
>>> text2 = "Deyr fé,\\ndeyja frændr,\\ndeyr sjalfr it sama,\\nek veit einn,\\nat aldrei deyr:\\ndómr um dauðan hvern."
>>> MetreManager.is_fornyrdhislag(text1)
True
>>> MetreManager.is_fornyrdhislag(text2)
False
:param text:
:return:
"""
lines = [line for line in text.split("\n") if line]
return len(lines) == 8 | def function[is_fornyrdhislag, parameter[text]]:
constant[
Basic check, only the number of lines matters: 8 for fornyrðislag.
>>> text1 = "Hljóðs bið ek allar\nhelgar kindir,\nmeiri ok minni\nmögu Heimdallar;\nviltu at ek, Valföðr,\nvel fyr telja\nforn spjöll fira,\nþau er fremst of man."
>>> text2 = "Deyr fé,\ndeyja frændr,\ndeyr sjalfr it sama,\nek veit einn,\nat aldrei deyr:\ndómr um dauðan hvern."
>>> MetreManager.is_fornyrdhislag(text1)
True
>>> MetreManager.is_fornyrdhislag(text2)
False
:param text:
:return:
]
variable[lines] assign[=] <ast.ListComp object at 0x7da20c76e110>
return[compare[call[name[len], parameter[name[lines]]] equal[==] constant[8]]] | keyword[def] identifier[is_fornyrdhislag] ( identifier[text] : identifier[str] ):
literal[string]
identifier[lines] =[ identifier[line] keyword[for] identifier[line] keyword[in] identifier[text] . identifier[split] ( literal[string] ) keyword[if] identifier[line] ]
keyword[return] identifier[len] ( identifier[lines] )== literal[int] | def is_fornyrdhislag(text: str):
"""
Basic check, only the number of lines matters: 8 for fornyrðislag.
>>> text1 = "Hljóðs bið ek allar\\nhelgar kindir,\\nmeiri ok minni\\nmögu Heimdallar;\\nviltu at ek, Valföðr,\\nvel fyr telja\\nforn spjöll fira,\\nþau er fremst of man."
>>> text2 = "Deyr fé,\\ndeyja frændr,\\ndeyr sjalfr it sama,\\nek veit einn,\\nat aldrei deyr:\\ndómr um dauðan hvern."
>>> MetreManager.is_fornyrdhislag(text1)
True
>>> MetreManager.is_fornyrdhislag(text2)
False
:param text:
:return:
"""
lines = [line for line in text.split('\n') if line]
return len(lines) == 8 |
def uptime(self, event, nickname=None):
"""
Shows the amount of time since the given nickname has been
in the channel. If no nickname is given, I'll use my own.
"""
if nickname and nickname != self.nickname:
try:
uptime = self.timesince(self.joined[nickname])
except KeyError:
return "%s is not in the channel" % nickname
else:
if nickname == self.get_nickname(event):
prefix = "you have"
else:
prefix = "%s has" % nickname
return "%s been here for %s" % (prefix, uptime)
uptime = self.timesince(self.joined[self.nickname])
return "I've been here for %s" % uptime | def function[uptime, parameter[self, event, nickname]]:
constant[
Shows the amount of time since the given nickname has been
in the channel. If no nickname is given, I'll use my own.
]
if <ast.BoolOp object at 0x7da1b0ed9a80> begin[:]
<ast.Try object at 0x7da1b0ed9b40>
variable[uptime] assign[=] call[name[self].timesince, parameter[call[name[self].joined][name[self].nickname]]]
return[binary_operation[constant[I've been here for %s] <ast.Mod object at 0x7da2590d6920> name[uptime]]] | keyword[def] identifier[uptime] ( identifier[self] , identifier[event] , identifier[nickname] = keyword[None] ):
literal[string]
keyword[if] identifier[nickname] keyword[and] identifier[nickname] != identifier[self] . identifier[nickname] :
keyword[try] :
identifier[uptime] = identifier[self] . identifier[timesince] ( identifier[self] . identifier[joined] [ identifier[nickname] ])
keyword[except] identifier[KeyError] :
keyword[return] literal[string] % identifier[nickname]
keyword[else] :
keyword[if] identifier[nickname] == identifier[self] . identifier[get_nickname] ( identifier[event] ):
identifier[prefix] = literal[string]
keyword[else] :
identifier[prefix] = literal[string] % identifier[nickname]
keyword[return] literal[string] %( identifier[prefix] , identifier[uptime] )
identifier[uptime] = identifier[self] . identifier[timesince] ( identifier[self] . identifier[joined] [ identifier[self] . identifier[nickname] ])
keyword[return] literal[string] % identifier[uptime] | def uptime(self, event, nickname=None):
"""
Shows the amount of time since the given nickname has been
in the channel. If no nickname is given, I'll use my own.
"""
if nickname and nickname != self.nickname:
try:
uptime = self.timesince(self.joined[nickname]) # depends on [control=['try'], data=[]]
except KeyError:
return '%s is not in the channel' % nickname # depends on [control=['except'], data=[]]
else:
if nickname == self.get_nickname(event):
prefix = 'you have' # depends on [control=['if'], data=[]]
else:
prefix = '%s has' % nickname
return '%s been here for %s' % (prefix, uptime) # depends on [control=['if'], data=[]]
uptime = self.timesince(self.joined[self.nickname])
return "I've been here for %s" % uptime |
def render(self):
"""Outputs a <ul> for this set of radio fields."""
t = template.loader.get_template('support/forms/profile_picture.html')
return t.render(template.Context({'widget': self, 'MEDIA_URL': settings.MEDIA_URL })) | def function[render, parameter[self]]:
constant[Outputs a <ul> for this set of radio fields.]
variable[t] assign[=] call[name[template].loader.get_template, parameter[constant[support/forms/profile_picture.html]]]
return[call[name[t].render, parameter[call[name[template].Context, parameter[dictionary[[<ast.Constant object at 0x7da1b133d4e0>, <ast.Constant object at 0x7da1b133d450>], [<ast.Name object at 0x7da1b1357a00>, <ast.Attribute object at 0x7da1b1357190>]]]]]]] | keyword[def] identifier[render] ( identifier[self] ):
literal[string]
identifier[t] = identifier[template] . identifier[loader] . identifier[get_template] ( literal[string] )
keyword[return] identifier[t] . identifier[render] ( identifier[template] . identifier[Context] ({ literal[string] : identifier[self] , literal[string] : identifier[settings] . identifier[MEDIA_URL] })) | def render(self):
"""Outputs a <ul> for this set of radio fields."""
t = template.loader.get_template('support/forms/profile_picture.html')
return t.render(template.Context({'widget': self, 'MEDIA_URL': settings.MEDIA_URL})) |
def next(self):
""" Return the next window """
next_index = (self.index + 1) % len(self._browser.driver.window_handles)
next_handle = self._browser.driver.window_handles[next_index]
return Window(self._browser, next_handle) | def function[next, parameter[self]]:
constant[ Return the next window ]
variable[next_index] assign[=] binary_operation[binary_operation[name[self].index + constant[1]] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[self]._browser.driver.window_handles]]]
variable[next_handle] assign[=] call[name[self]._browser.driver.window_handles][name[next_index]]
return[call[name[Window], parameter[name[self]._browser, name[next_handle]]]] | keyword[def] identifier[next] ( identifier[self] ):
literal[string]
identifier[next_index] =( identifier[self] . identifier[index] + literal[int] )% identifier[len] ( identifier[self] . identifier[_browser] . identifier[driver] . identifier[window_handles] )
identifier[next_handle] = identifier[self] . identifier[_browser] . identifier[driver] . identifier[window_handles] [ identifier[next_index] ]
keyword[return] identifier[Window] ( identifier[self] . identifier[_browser] , identifier[next_handle] ) | def next(self):
""" Return the next window """
next_index = (self.index + 1) % len(self._browser.driver.window_handles)
next_handle = self._browser.driver.window_handles[next_index]
return Window(self._browser, next_handle) |
def clear(self):
"""
Remove all cache entries.
"""
db = sqlite3.connect(self.path)
c = db.cursor()
c.execute("DELETE FROM dirhashcache")
db.commit()
db.close() | def function[clear, parameter[self]]:
constant[
Remove all cache entries.
]
variable[db] assign[=] call[name[sqlite3].connect, parameter[name[self].path]]
variable[c] assign[=] call[name[db].cursor, parameter[]]
call[name[c].execute, parameter[constant[DELETE FROM dirhashcache]]]
call[name[db].commit, parameter[]]
call[name[db].close, parameter[]] | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
identifier[db] = identifier[sqlite3] . identifier[connect] ( identifier[self] . identifier[path] )
identifier[c] = identifier[db] . identifier[cursor] ()
identifier[c] . identifier[execute] ( literal[string] )
identifier[db] . identifier[commit] ()
identifier[db] . identifier[close] () | def clear(self):
"""
Remove all cache entries.
"""
db = sqlite3.connect(self.path)
c = db.cursor()
c.execute('DELETE FROM dirhashcache')
db.commit()
db.close() |
def _format_batch_statuses(statuses, batch_ids, tracker):
"""Takes a statuses dict and formats it for transmission with Protobuf and
ZMQ.
Args:
statuses (dict of int): Dict with batch ids as the key, status as value
batch_ids (list of str): The batch ids in their original order
tracker (BatchTracker): A batch tracker with access to invalid info
"""
proto_statuses = []
for batch_id in batch_ids:
if statuses[batch_id] == \
client_batch_submit_pb2.ClientBatchStatus.INVALID:
invalid_txns = tracker.get_invalid_txn_info(batch_id)
for txn_info in invalid_txns:
try:
txn_info['transaction_id'] = txn_info.pop('id')
except KeyError as e:
LOGGER.debug(e)
else:
invalid_txns = None
proto_statuses.append(
client_batch_submit_pb2.ClientBatchStatus(
batch_id=batch_id,
status=statuses[batch_id],
invalid_transactions=invalid_txns))
return proto_statuses | def function[_format_batch_statuses, parameter[statuses, batch_ids, tracker]]:
constant[Takes a statuses dict and formats it for transmission with Protobuf and
ZMQ.
Args:
statuses (dict of int): Dict with batch ids as the key, status as value
batch_ids (list of str): The batch ids in their original order
tracker (BatchTracker): A batch tracker with access to invalid info
]
variable[proto_statuses] assign[=] list[[]]
for taget[name[batch_id]] in starred[name[batch_ids]] begin[:]
if compare[call[name[statuses]][name[batch_id]] equal[==] name[client_batch_submit_pb2].ClientBatchStatus.INVALID] begin[:]
variable[invalid_txns] assign[=] call[name[tracker].get_invalid_txn_info, parameter[name[batch_id]]]
for taget[name[txn_info]] in starred[name[invalid_txns]] begin[:]
<ast.Try object at 0x7da20c7cb9a0>
call[name[proto_statuses].append, parameter[call[name[client_batch_submit_pb2].ClientBatchStatus, parameter[]]]]
return[name[proto_statuses]] | keyword[def] identifier[_format_batch_statuses] ( identifier[statuses] , identifier[batch_ids] , identifier[tracker] ):
literal[string]
identifier[proto_statuses] =[]
keyword[for] identifier[batch_id] keyword[in] identifier[batch_ids] :
keyword[if] identifier[statuses] [ identifier[batch_id] ]== identifier[client_batch_submit_pb2] . identifier[ClientBatchStatus] . identifier[INVALID] :
identifier[invalid_txns] = identifier[tracker] . identifier[get_invalid_txn_info] ( identifier[batch_id] )
keyword[for] identifier[txn_info] keyword[in] identifier[invalid_txns] :
keyword[try] :
identifier[txn_info] [ literal[string] ]= identifier[txn_info] . identifier[pop] ( literal[string] )
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
identifier[LOGGER] . identifier[debug] ( identifier[e] )
keyword[else] :
identifier[invalid_txns] = keyword[None]
identifier[proto_statuses] . identifier[append] (
identifier[client_batch_submit_pb2] . identifier[ClientBatchStatus] (
identifier[batch_id] = identifier[batch_id] ,
identifier[status] = identifier[statuses] [ identifier[batch_id] ],
identifier[invalid_transactions] = identifier[invalid_txns] ))
keyword[return] identifier[proto_statuses] | def _format_batch_statuses(statuses, batch_ids, tracker):
"""Takes a statuses dict and formats it for transmission with Protobuf and
ZMQ.
Args:
statuses (dict of int): Dict with batch ids as the key, status as value
batch_ids (list of str): The batch ids in their original order
tracker (BatchTracker): A batch tracker with access to invalid info
"""
proto_statuses = []
for batch_id in batch_ids:
if statuses[batch_id] == client_batch_submit_pb2.ClientBatchStatus.INVALID:
invalid_txns = tracker.get_invalid_txn_info(batch_id)
for txn_info in invalid_txns:
try:
txn_info['transaction_id'] = txn_info.pop('id') # depends on [control=['try'], data=[]]
except KeyError as e:
LOGGER.debug(e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=['txn_info']] # depends on [control=['if'], data=[]]
else:
invalid_txns = None
proto_statuses.append(client_batch_submit_pb2.ClientBatchStatus(batch_id=batch_id, status=statuses[batch_id], invalid_transactions=invalid_txns)) # depends on [control=['for'], data=['batch_id']]
return proto_statuses |
def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8')
except UnicodeDecodeError:
return unicode(text, 'latin1')
elif isinstance(text, Exception):
if os.name == 'nt' and \
isinstance(text, (OSError, IOError)): # pragma: no cover
# the exception might have a localized error string encoded with
# ANSI codepage if OSError and IOError on Windows
try:
return unicode(str(text), 'mbcs')
except UnicodeError:
pass
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text)
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args])
return unicode(text) | def function[to_unicode, parameter[text, charset]]:
constant[Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
]
if call[name[isinstance], parameter[name[text], name[str]]] begin[:]
<ast.Try object at 0x7da1b14081c0>
return[call[name[unicode], parameter[name[text]]]] | keyword[def] identifier[to_unicode] ( identifier[text] , identifier[charset] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[text] , identifier[str] ):
keyword[try] :
keyword[return] identifier[unicode] ( identifier[text] , identifier[charset] keyword[or] literal[string] )
keyword[except] identifier[UnicodeDecodeError] :
keyword[return] identifier[unicode] ( identifier[text] , literal[string] )
keyword[elif] identifier[isinstance] ( identifier[text] , identifier[Exception] ):
keyword[if] identifier[os] . identifier[name] == literal[string] keyword[and] identifier[isinstance] ( identifier[text] ,( identifier[OSError] , identifier[IOError] )):
keyword[try] :
keyword[return] identifier[unicode] ( identifier[str] ( identifier[text] ), literal[string] )
keyword[except] identifier[UnicodeError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[unicode] ( identifier[text] )
keyword[except] identifier[UnicodeError] :
keyword[return] literal[string] . identifier[join] ([ identifier[to_unicode] ( identifier[arg] ) keyword[for] identifier[arg] keyword[in] identifier[text] . identifier[args] ])
keyword[return] identifier[unicode] ( identifier[text] ) | def to_unicode(text, charset=None):
"""Convert input to an `unicode` object.
For a `str` object, we'll first try to decode the bytes using the given
`charset` encoding (or UTF-8 if none is specified), then we fall back to
the latin1 encoding which might be correct or not, but at least preserves
the original byte sequence by mapping each byte to the corresponding
unicode code point in the range U+0000 to U+00FF.
For anything else, a simple `unicode()` conversion is attempted,
with special care taken with `Exception` objects.
"""
if isinstance(text, str):
try:
return unicode(text, charset or 'utf-8') # depends on [control=['try'], data=[]]
except UnicodeDecodeError:
return unicode(text, 'latin1') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif isinstance(text, Exception):
if os.name == 'nt' and isinstance(text, (OSError, IOError)): # pragma: no cover
# the exception might have a localized error string encoded with
# ANSI codepage if OSError and IOError on Windows
try:
return unicode(str(text), 'mbcs') # depends on [control=['try'], data=[]]
except UnicodeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# two possibilities for storing unicode strings in exception data:
try:
# custom __str__ method on the exception (e.g. PermissionError)
return unicode(text) # depends on [control=['try'], data=[]]
except UnicodeError:
# unicode arguments given to the exception (e.g. parse_date)
return ' '.join([to_unicode(arg) for arg in text.args]) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return unicode(text) |
def _gatherDataFromLookups(gpos, scriptOrder):
"""
Gather kerning and classes from the applicable lookups
and return them in script order.
"""
lookupIndexes = _gatherLookupIndexes(gpos)
seenLookups = set()
kerningDictionaries = []
leftClassDictionaries = []
rightClassDictionaries = []
for script in scriptOrder:
kerning = []
leftClasses = []
rightClasses = []
for lookupIndex in lookupIndexes[script]:
if lookupIndex in seenLookups:
continue
seenLookups.add(lookupIndex)
result = _gatherKerningForLookup(gpos, lookupIndex)
if result is None:
continue
k, lG, rG = result
kerning.append(k)
leftClasses.append(lG)
rightClasses.append(rG)
if kerning:
kerningDictionaries.append(kerning)
leftClassDictionaries.append(leftClasses)
rightClassDictionaries.append(rightClasses)
return kerningDictionaries, leftClassDictionaries, rightClassDictionaries | def function[_gatherDataFromLookups, parameter[gpos, scriptOrder]]:
constant[
Gather kerning and classes from the applicable lookups
and return them in script order.
]
variable[lookupIndexes] assign[=] call[name[_gatherLookupIndexes], parameter[name[gpos]]]
variable[seenLookups] assign[=] call[name[set], parameter[]]
variable[kerningDictionaries] assign[=] list[[]]
variable[leftClassDictionaries] assign[=] list[[]]
variable[rightClassDictionaries] assign[=] list[[]]
for taget[name[script]] in starred[name[scriptOrder]] begin[:]
variable[kerning] assign[=] list[[]]
variable[leftClasses] assign[=] list[[]]
variable[rightClasses] assign[=] list[[]]
for taget[name[lookupIndex]] in starred[call[name[lookupIndexes]][name[script]]] begin[:]
if compare[name[lookupIndex] in name[seenLookups]] begin[:]
continue
call[name[seenLookups].add, parameter[name[lookupIndex]]]
variable[result] assign[=] call[name[_gatherKerningForLookup], parameter[name[gpos], name[lookupIndex]]]
if compare[name[result] is constant[None]] begin[:]
continue
<ast.Tuple object at 0x7da20c6c4790> assign[=] name[result]
call[name[kerning].append, parameter[name[k]]]
call[name[leftClasses].append, parameter[name[lG]]]
call[name[rightClasses].append, parameter[name[rG]]]
if name[kerning] begin[:]
call[name[kerningDictionaries].append, parameter[name[kerning]]]
call[name[leftClassDictionaries].append, parameter[name[leftClasses]]]
call[name[rightClassDictionaries].append, parameter[name[rightClasses]]]
return[tuple[[<ast.Name object at 0x7da20c6c5330>, <ast.Name object at 0x7da20c6c5090>, <ast.Name object at 0x7da20c6c4250>]]] | keyword[def] identifier[_gatherDataFromLookups] ( identifier[gpos] , identifier[scriptOrder] ):
literal[string]
identifier[lookupIndexes] = identifier[_gatherLookupIndexes] ( identifier[gpos] )
identifier[seenLookups] = identifier[set] ()
identifier[kerningDictionaries] =[]
identifier[leftClassDictionaries] =[]
identifier[rightClassDictionaries] =[]
keyword[for] identifier[script] keyword[in] identifier[scriptOrder] :
identifier[kerning] =[]
identifier[leftClasses] =[]
identifier[rightClasses] =[]
keyword[for] identifier[lookupIndex] keyword[in] identifier[lookupIndexes] [ identifier[script] ]:
keyword[if] identifier[lookupIndex] keyword[in] identifier[seenLookups] :
keyword[continue]
identifier[seenLookups] . identifier[add] ( identifier[lookupIndex] )
identifier[result] = identifier[_gatherKerningForLookup] ( identifier[gpos] , identifier[lookupIndex] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[continue]
identifier[k] , identifier[lG] , identifier[rG] = identifier[result]
identifier[kerning] . identifier[append] ( identifier[k] )
identifier[leftClasses] . identifier[append] ( identifier[lG] )
identifier[rightClasses] . identifier[append] ( identifier[rG] )
keyword[if] identifier[kerning] :
identifier[kerningDictionaries] . identifier[append] ( identifier[kerning] )
identifier[leftClassDictionaries] . identifier[append] ( identifier[leftClasses] )
identifier[rightClassDictionaries] . identifier[append] ( identifier[rightClasses] )
keyword[return] identifier[kerningDictionaries] , identifier[leftClassDictionaries] , identifier[rightClassDictionaries] | def _gatherDataFromLookups(gpos, scriptOrder):
"""
Gather kerning and classes from the applicable lookups
and return them in script order.
"""
lookupIndexes = _gatherLookupIndexes(gpos)
seenLookups = set()
kerningDictionaries = []
leftClassDictionaries = []
rightClassDictionaries = []
for script in scriptOrder:
kerning = []
leftClasses = []
rightClasses = []
for lookupIndex in lookupIndexes[script]:
if lookupIndex in seenLookups:
continue # depends on [control=['if'], data=[]]
seenLookups.add(lookupIndex)
result = _gatherKerningForLookup(gpos, lookupIndex)
if result is None:
continue # depends on [control=['if'], data=[]]
(k, lG, rG) = result
kerning.append(k)
leftClasses.append(lG)
rightClasses.append(rG) # depends on [control=['for'], data=['lookupIndex']]
if kerning:
kerningDictionaries.append(kerning)
leftClassDictionaries.append(leftClasses)
rightClassDictionaries.append(rightClasses) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['script']]
return (kerningDictionaries, leftClassDictionaries, rightClassDictionaries) |
def conclude_course(self, id, event):
"""
Conclude a course.
Delete or conclude an existing course
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - event
"""The action to take on the course."""
self._validate_enum(event, ["delete", "conclude"])
params["event"] = event
self.logger.debug("DELETE /api/v1/courses/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/courses/{id}".format(**path), data=data, params=params, no_data=True) | def function[conclude_course, parameter[self, id, event]]:
constant[
Conclude a course.
Delete or conclude an existing course
]
variable[path] assign[=] dictionary[[], []]
variable[data] assign[=] dictionary[[], []]
variable[params] assign[=] dictionary[[], []]
constant[ID]
call[name[path]][constant[id]] assign[=] name[id]
constant[The action to take on the course.]
call[name[self]._validate_enum, parameter[name[event], list[[<ast.Constant object at 0x7da1b0b3b8e0>, <ast.Constant object at 0x7da1b0b3b970>]]]]
call[name[params]][constant[event]] assign[=] name[event]
call[name[self].logger.debug, parameter[call[constant[DELETE /api/v1/courses/{id} with query params: {params} and form data: {data}].format, parameter[]]]]
return[call[name[self].generic_request, parameter[constant[DELETE], call[constant[/api/v1/courses/{id}].format, parameter[]]]]] | keyword[def] identifier[conclude_course] ( identifier[self] , identifier[id] , identifier[event] ):
literal[string]
identifier[path] ={}
identifier[data] ={}
identifier[params] ={}
literal[string]
identifier[path] [ literal[string] ]= identifier[id]
literal[string]
identifier[self] . identifier[_validate_enum] ( identifier[event] ,[ literal[string] , literal[string] ])
identifier[params] [ literal[string] ]= identifier[event]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[params] = identifier[params] , identifier[data] = identifier[data] ,** identifier[path] ))
keyword[return] identifier[self] . identifier[generic_request] ( literal[string] , literal[string] . identifier[format] (** identifier[path] ), identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[no_data] = keyword[True] ) | def conclude_course(self, id, event):
"""
Conclude a course.
Delete or conclude an existing course
"""
path = {}
data = {}
params = {} # REQUIRED - PATH - id
'ID'
path['id'] = id # REQUIRED - event
'The action to take on the course.'
self._validate_enum(event, ['delete', 'conclude'])
params['event'] = event
self.logger.debug('DELETE /api/v1/courses/{id} with query params: {params} and form data: {data}'.format(params=params, data=data, **path))
return self.generic_request('DELETE', '/api/v1/courses/{id}'.format(**path), data=data, params=params, no_data=True) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.