code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def solve(self):
"""Solve rpn expression, return None if not valid."""
popflag = True
self.tmpopslist = []
while True:
while self.opslist and popflag:
op = self.opslist.pop()
if self.is_variable(op):
op = self.variables.get(op)
if self.is_operator(op):
popflag = False
break
self.tmpopslist.append(op)
# operations
tmpr = self._get_temp_result(op)
if tmpr == 'ERROR':
return None
if tmpr is not None:
self.opslist.append('{r:.20f}'.format(r=tmpr))
if len(self.tmpopslist) > 0 or len(self.opslist) > 1:
popflag = True
else:
break
return float(self.opslist[0]) | def function[solve, parameter[self]]:
constant[Solve rpn expression, return None if not valid.]
variable[popflag] assign[=] constant[True]
name[self].tmpopslist assign[=] list[[]]
while constant[True] begin[:]
while <ast.BoolOp object at 0x7da207f9a860> begin[:]
variable[op] assign[=] call[name[self].opslist.pop, parameter[]]
if call[name[self].is_variable, parameter[name[op]]] begin[:]
variable[op] assign[=] call[name[self].variables.get, parameter[name[op]]]
if call[name[self].is_operator, parameter[name[op]]] begin[:]
variable[popflag] assign[=] constant[False]
break
call[name[self].tmpopslist.append, parameter[name[op]]]
variable[tmpr] assign[=] call[name[self]._get_temp_result, parameter[name[op]]]
if compare[name[tmpr] equal[==] constant[ERROR]] begin[:]
return[constant[None]]
if compare[name[tmpr] is_not constant[None]] begin[:]
call[name[self].opslist.append, parameter[call[constant[{r:.20f}].format, parameter[]]]]
if <ast.BoolOp object at 0x7da1b26ae710> begin[:]
variable[popflag] assign[=] constant[True]
return[call[name[float], parameter[call[name[self].opslist][constant[0]]]]] | keyword[def] identifier[solve] ( identifier[self] ):
literal[string]
identifier[popflag] = keyword[True]
identifier[self] . identifier[tmpopslist] =[]
keyword[while] keyword[True] :
keyword[while] identifier[self] . identifier[opslist] keyword[and] identifier[popflag] :
identifier[op] = identifier[self] . identifier[opslist] . identifier[pop] ()
keyword[if] identifier[self] . identifier[is_variable] ( identifier[op] ):
identifier[op] = identifier[self] . identifier[variables] . identifier[get] ( identifier[op] )
keyword[if] identifier[self] . identifier[is_operator] ( identifier[op] ):
identifier[popflag] = keyword[False]
keyword[break]
identifier[self] . identifier[tmpopslist] . identifier[append] ( identifier[op] )
identifier[tmpr] = identifier[self] . identifier[_get_temp_result] ( identifier[op] )
keyword[if] identifier[tmpr] == literal[string] :
keyword[return] keyword[None]
keyword[if] identifier[tmpr] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[opslist] . identifier[append] ( literal[string] . identifier[format] ( identifier[r] = identifier[tmpr] ))
keyword[if] identifier[len] ( identifier[self] . identifier[tmpopslist] )> literal[int] keyword[or] identifier[len] ( identifier[self] . identifier[opslist] )> literal[int] :
identifier[popflag] = keyword[True]
keyword[else] :
keyword[break]
keyword[return] identifier[float] ( identifier[self] . identifier[opslist] [ literal[int] ]) | def solve(self):
"""Solve rpn expression, return None if not valid."""
popflag = True
self.tmpopslist = []
while True:
while self.opslist and popflag:
op = self.opslist.pop()
if self.is_variable(op):
op = self.variables.get(op) # depends on [control=['if'], data=[]]
if self.is_operator(op):
popflag = False
break # depends on [control=['if'], data=[]]
self.tmpopslist.append(op) # depends on [control=['while'], data=[]]
# operations
tmpr = self._get_temp_result(op)
if tmpr == 'ERROR':
return None # depends on [control=['if'], data=[]]
if tmpr is not None:
self.opslist.append('{r:.20f}'.format(r=tmpr)) # depends on [control=['if'], data=['tmpr']]
if len(self.tmpopslist) > 0 or len(self.opslist) > 1:
popflag = True # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
return float(self.opslist[0]) |
def parse_clubs(self, clubs_page):
"""Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
"""
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]
try:
clubs_header = second_col.find(u'div', text=u'Related Clubs')
character_info[u'clubs'] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u'div':
link = curr_elt.find(u'a')
club_id = int(re.match(r'/clubs\.php\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id'))
num_members = int(re.match(r'(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num'))
character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members}))
curr_elt = curr_elt.nextSibling
except:
if not self.session.suppress_parse_exceptions:
raise
return character_info | def function[parse_clubs, parameter[self, clubs_page]]:
constant[Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
]
variable[character_info] assign[=] call[name[self].parse_sidebar, parameter[name[clubs_page]]]
variable[second_col] assign[=] call[call[call[call[call[name[clubs_page].find, parameter[constant[div], dictionary[[<ast.Constant object at 0x7da1b265d600>], [<ast.Constant object at 0x7da1b265d630>]]]].find, parameter[constant[table]]].find, parameter[constant[tr]]].find_all, parameter[constant[td]]]][constant[1]]
<ast.Try object at 0x7da1b265d990>
return[name[character_info]] | keyword[def] identifier[parse_clubs] ( identifier[self] , identifier[clubs_page] ):
literal[string]
identifier[character_info] = identifier[self] . identifier[parse_sidebar] ( identifier[clubs_page] )
identifier[second_col] = identifier[clubs_page] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }). identifier[find] ( literal[string] ). identifier[find] ( literal[string] ). identifier[find_all] ( literal[string] , identifier[recursive] = keyword[False] )[ literal[int] ]
keyword[try] :
identifier[clubs_header] = identifier[second_col] . identifier[find] ( literal[string] , identifier[text] = literal[string] )
identifier[character_info] [ literal[string] ]=[]
keyword[if] identifier[clubs_header] :
identifier[curr_elt] = identifier[clubs_header] . identifier[nextSibling]
keyword[while] identifier[curr_elt] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[curr_elt] . identifier[name] == literal[string] :
identifier[link] = identifier[curr_elt] . identifier[find] ( literal[string] )
identifier[club_id] = identifier[int] ( identifier[re] . identifier[match] ( literal[string] , identifier[link] . identifier[get] ( literal[string] )). identifier[group] ( literal[string] ))
identifier[num_members] = identifier[int] ( identifier[re] . identifier[match] ( literal[string] , identifier[curr_elt] . identifier[find] ( literal[string] ). identifier[text] ). identifier[group] ( literal[string] ))
identifier[character_info] [ literal[string] ]. identifier[append] ( identifier[self] . identifier[session] . identifier[club] ( identifier[club_id] ). identifier[set] ({ literal[string] : identifier[link] . identifier[text] , literal[string] : identifier[num_members] }))
identifier[curr_elt] = identifier[curr_elt] . identifier[nextSibling]
keyword[except] :
keyword[if] keyword[not] identifier[self] . identifier[session] . identifier[suppress_parse_exceptions] :
keyword[raise]
keyword[return] identifier[character_info] | def parse_clubs(self, clubs_page):
"""Parses the DOM and returns character clubs attributes.
:type clubs_page: :class:`bs4.BeautifulSoup`
:param clubs_page: MAL character clubs page's DOM
:rtype: dict
:return: character clubs attributes.
"""
character_info = self.parse_sidebar(clubs_page)
second_col = clubs_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1]
try:
clubs_header = second_col.find(u'div', text=u'Related Clubs')
character_info[u'clubs'] = []
if clubs_header:
curr_elt = clubs_header.nextSibling
while curr_elt is not None:
if curr_elt.name == u'div':
link = curr_elt.find(u'a')
club_id = int(re.match('/clubs\\.php\\?cid=(?P<id>[0-9]+)', link.get(u'href')).group(u'id'))
num_members = int(re.match('(?P<num>[0-9]+) members', curr_elt.find(u'small').text).group(u'num'))
character_info[u'clubs'].append(self.session.club(club_id).set({'name': link.text, 'num_members': num_members})) # depends on [control=['if'], data=[]]
curr_elt = curr_elt.nextSibling # depends on [control=['while'], data=['curr_elt']] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
if not self.session.suppress_parse_exceptions:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
return character_info |
def delete_group(self, group_id):
"""
Removes a group.
:param group_id: The unique ID of the group.
:type group_id: ``str``
"""
response = self._perform_request(
url='/um/groups/%s' % group_id,
method='DELETE')
return response | def function[delete_group, parameter[self, group_id]]:
constant[
Removes a group.
:param group_id: The unique ID of the group.
:type group_id: ``str``
]
variable[response] assign[=] call[name[self]._perform_request, parameter[]]
return[name[response]] | keyword[def] identifier[delete_group] ( identifier[self] , identifier[group_id] ):
literal[string]
identifier[response] = identifier[self] . identifier[_perform_request] (
identifier[url] = literal[string] % identifier[group_id] ,
identifier[method] = literal[string] )
keyword[return] identifier[response] | def delete_group(self, group_id):
"""
Removes a group.
:param group_id: The unique ID of the group.
:type group_id: ``str``
"""
response = self._perform_request(url='/um/groups/%s' % group_id, method='DELETE')
return response |
def create_history_model(self, model, inherited):
"""
Creates a historical model to associate with the model provided.
"""
attrs = {
"__module__": self.module,
"_history_excluded_fields": self.excluded_fields,
}
app_module = "%s.models" % model._meta.app_label
if inherited:
# inherited use models module
attrs["__module__"] = model.__module__
elif model.__module__ != self.module:
# registered under different app
attrs["__module__"] = self.module
elif app_module != self.module:
# Abuse an internal API because the app registry is loading.
app = apps.app_configs[model._meta.app_label]
models_module = app.name
attrs["__module__"] = models_module
fields = self.copy_fields(model)
attrs.update(fields)
attrs.update(self.get_extra_fields(model, fields))
# type in python2 wants str as a first argument
attrs.update(Meta=type(str("Meta"), (), self.get_meta_options(model)))
if self.table_name is not None:
attrs["Meta"].db_table = self.table_name
# Set as the default then check for overrides
name = self.get_history_model_name(model)
registered_models[model._meta.db_table] = model
return python_2_unicode_compatible(type(str(name), self.bases, attrs)) | def function[create_history_model, parameter[self, model, inherited]]:
constant[
Creates a historical model to associate with the model provided.
]
variable[attrs] assign[=] dictionary[[<ast.Constant object at 0x7da20c6a82b0>, <ast.Constant object at 0x7da20c6a87f0>], [<ast.Attribute object at 0x7da20c6a8dc0>, <ast.Attribute object at 0x7da20c6a8a60>]]
variable[app_module] assign[=] binary_operation[constant[%s.models] <ast.Mod object at 0x7da2590d6920> name[model]._meta.app_label]
if name[inherited] begin[:]
call[name[attrs]][constant[__module__]] assign[=] name[model].__module__
variable[fields] assign[=] call[name[self].copy_fields, parameter[name[model]]]
call[name[attrs].update, parameter[name[fields]]]
call[name[attrs].update, parameter[call[name[self].get_extra_fields, parameter[name[model], name[fields]]]]]
call[name[attrs].update, parameter[]]
if compare[name[self].table_name is_not constant[None]] begin[:]
call[name[attrs]][constant[Meta]].db_table assign[=] name[self].table_name
variable[name] assign[=] call[name[self].get_history_model_name, parameter[name[model]]]
call[name[registered_models]][name[model]._meta.db_table] assign[=] name[model]
return[call[name[python_2_unicode_compatible], parameter[call[name[type], parameter[call[name[str], parameter[name[name]]], name[self].bases, name[attrs]]]]]] | keyword[def] identifier[create_history_model] ( identifier[self] , identifier[model] , identifier[inherited] ):
literal[string]
identifier[attrs] ={
literal[string] : identifier[self] . identifier[module] ,
literal[string] : identifier[self] . identifier[excluded_fields] ,
}
identifier[app_module] = literal[string] % identifier[model] . identifier[_meta] . identifier[app_label]
keyword[if] identifier[inherited] :
identifier[attrs] [ literal[string] ]= identifier[model] . identifier[__module__]
keyword[elif] identifier[model] . identifier[__module__] != identifier[self] . identifier[module] :
identifier[attrs] [ literal[string] ]= identifier[self] . identifier[module]
keyword[elif] identifier[app_module] != identifier[self] . identifier[module] :
identifier[app] = identifier[apps] . identifier[app_configs] [ identifier[model] . identifier[_meta] . identifier[app_label] ]
identifier[models_module] = identifier[app] . identifier[name]
identifier[attrs] [ literal[string] ]= identifier[models_module]
identifier[fields] = identifier[self] . identifier[copy_fields] ( identifier[model] )
identifier[attrs] . identifier[update] ( identifier[fields] )
identifier[attrs] . identifier[update] ( identifier[self] . identifier[get_extra_fields] ( identifier[model] , identifier[fields] ))
identifier[attrs] . identifier[update] ( identifier[Meta] = identifier[type] ( identifier[str] ( literal[string] ),(), identifier[self] . identifier[get_meta_options] ( identifier[model] )))
keyword[if] identifier[self] . identifier[table_name] keyword[is] keyword[not] keyword[None] :
identifier[attrs] [ literal[string] ]. identifier[db_table] = identifier[self] . identifier[table_name]
identifier[name] = identifier[self] . identifier[get_history_model_name] ( identifier[model] )
identifier[registered_models] [ identifier[model] . identifier[_meta] . identifier[db_table] ]= identifier[model]
keyword[return] identifier[python_2_unicode_compatible] ( identifier[type] ( identifier[str] ( identifier[name] ), identifier[self] . identifier[bases] , identifier[attrs] )) | def create_history_model(self, model, inherited):
"""
Creates a historical model to associate with the model provided.
"""
attrs = {'__module__': self.module, '_history_excluded_fields': self.excluded_fields}
app_module = '%s.models' % model._meta.app_label
if inherited:
# inherited use models module
attrs['__module__'] = model.__module__ # depends on [control=['if'], data=[]]
elif model.__module__ != self.module:
# registered under different app
attrs['__module__'] = self.module # depends on [control=['if'], data=[]]
elif app_module != self.module:
# Abuse an internal API because the app registry is loading.
app = apps.app_configs[model._meta.app_label]
models_module = app.name
attrs['__module__'] = models_module # depends on [control=['if'], data=[]]
fields = self.copy_fields(model)
attrs.update(fields)
attrs.update(self.get_extra_fields(model, fields))
# type in python2 wants str as a first argument
attrs.update(Meta=type(str('Meta'), (), self.get_meta_options(model)))
if self.table_name is not None:
attrs['Meta'].db_table = self.table_name # depends on [control=['if'], data=[]]
# Set as the default then check for overrides
name = self.get_history_model_name(model)
registered_models[model._meta.db_table] = model
return python_2_unicode_compatible(type(str(name), self.bases, attrs)) |
def load(image):
r"""
Loads the ``image`` and returns a ndarray with the image's pixel content as well as
a header object.
The header can, with restrictions, be used to extract additional meta-information
about the image (e.g. using the methods in `~medpy.io.Header`). Additionally
it serves as meta-data container that can be passes to `~medpy.io.save.save` when the
altered image is saved to the hard drive again. Note that the transfer of meta-data is
only possible, and even then not guaranteed, when the source and target image formats
are the same.
MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving.
The supported image file formats should include at least the following.
Medical formats:
- ITK MetaImage (.mha/.raw, .mhd)
- Neuroimaging Informatics Technology Initiative (NIfTI) (.nia, .nii, .nii.gz, .hdr, .img, .img.gz)
- Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz)
- Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom)
- Digital Imaging and Communications in Medicine (DICOM) series (<directory>/)
- Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr)
- Medical Imaging NetCDF (MINC) (.mnc, .MNC)
- Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz)
Microscopy formats:
- Medical Research Council (MRC) (.mrc, .rec)
- Bio-Rad (.pic, .PIC)
- LSM (Zeiss) microscopy images (.tif, .TIF, .tiff, .TIFF, .lsm, .LSM)
- Stimulate / Signal Data (SDT) (.sdt)
Visualization formats:
- VTK images (.vtk)
Other formats:
- Portable Network Graphics (PNG) (.png, .PNG)
- Joint Photographic Experts Group (JPEG) (.jpg, .JPG, .jpeg, .JPEG)
- Tagged Image File Format (TIFF) (.tif, .TIF, .tiff, .TIFF)
- Windows bitmap (.bmp, .BMP)
- Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5)
- MSX-DOS Screen-x (.ge4, .ge5)
For informations about which image formats, dimensionalities and pixel data types
your current configuration supports, run `python3 tests/support.py > myformats.log`.
Further information see https://simpleitk.readthedocs.io .
Parameters
----------
image : string
Path to the image to load.
Returns
-------
image_data : ndarray
The image data as numpy array with order `x,y,z,c`.
image_header : Header
The image metadata as :mod:`medpy.io.Header`.
Raises
------
ImageLoadingError
If the image could not be loaded due to some reason.
"""
logger = Logger.getInstance()
logger.info('Loading image {}...'.format(image))
if not os.path.exists(image):
raise ImageLoadingError('The supplied image {} does not exist.'.format(image))
if os.path.isdir(image):
# !TODO: this does not load the meta-data, find a way to load it from a series, too
logger.info('Loading image as DICOM series. If more than one found in folder {} defaulting to first.'.format(image))
sitkimage = sitk.ReadImage(sitk.ImageSeriesReader_GetGDCMSeriesFileNames(image))
else:
sitkimage = sitk.ReadImage(image)
# Make image array data and header
header = Header(sitkimage=sitkimage)
image = sitk.GetArrayFromImage(sitkimage)
# Roll axes from z,y,x,c to x,y,z,c
if image.ndim == 4:
image = np.moveaxis(image, -1, 0)
image = image.T
return image, header | def function[load, parameter[image]]:
constant[
Loads the ``image`` and returns a ndarray with the image's pixel content as well as
a header object.
The header can, with restrictions, be used to extract additional meta-information
about the image (e.g. using the methods in `~medpy.io.Header`). Additionally
it serves as meta-data container that can be passes to `~medpy.io.save.save` when the
altered image is saved to the hard drive again. Note that the transfer of meta-data is
only possible, and even then not guaranteed, when the source and target image formats
are the same.
MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving.
The supported image file formats should include at least the following.
Medical formats:
- ITK MetaImage (.mha/.raw, .mhd)
- Neuroimaging Informatics Technology Initiative (NIfTI) (.nia, .nii, .nii.gz, .hdr, .img, .img.gz)
- Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz)
- Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom)
- Digital Imaging and Communications in Medicine (DICOM) series (<directory>/)
- Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr)
- Medical Imaging NetCDF (MINC) (.mnc, .MNC)
- Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz)
Microscopy formats:
- Medical Research Council (MRC) (.mrc, .rec)
- Bio-Rad (.pic, .PIC)
- LSM (Zeiss) microscopy images (.tif, .TIF, .tiff, .TIFF, .lsm, .LSM)
- Stimulate / Signal Data (SDT) (.sdt)
Visualization formats:
- VTK images (.vtk)
Other formats:
- Portable Network Graphics (PNG) (.png, .PNG)
- Joint Photographic Experts Group (JPEG) (.jpg, .JPG, .jpeg, .JPEG)
- Tagged Image File Format (TIFF) (.tif, .TIF, .tiff, .TIFF)
- Windows bitmap (.bmp, .BMP)
- Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5)
- MSX-DOS Screen-x (.ge4, .ge5)
For informations about which image formats, dimensionalities and pixel data types
your current configuration supports, run `python3 tests/support.py > myformats.log`.
Further information see https://simpleitk.readthedocs.io .
Parameters
----------
image : string
Path to the image to load.
Returns
-------
image_data : ndarray
The image data as numpy array with order `x,y,z,c`.
image_header : Header
The image metadata as :mod:`medpy.io.Header`.
Raises
------
ImageLoadingError
If the image could not be loaded due to some reason.
]
variable[logger] assign[=] call[name[Logger].getInstance, parameter[]]
call[name[logger].info, parameter[call[constant[Loading image {}...].format, parameter[name[image]]]]]
if <ast.UnaryOp object at 0x7da18ede5fc0> begin[:]
<ast.Raise object at 0x7da18ede6170>
if call[name[os].path.isdir, parameter[name[image]]] begin[:]
call[name[logger].info, parameter[call[constant[Loading image as DICOM series. If more than one found in folder {} defaulting to first.].format, parameter[name[image]]]]]
variable[sitkimage] assign[=] call[name[sitk].ReadImage, parameter[call[name[sitk].ImageSeriesReader_GetGDCMSeriesFileNames, parameter[name[image]]]]]
variable[header] assign[=] call[name[Header], parameter[]]
variable[image] assign[=] call[name[sitk].GetArrayFromImage, parameter[name[sitkimage]]]
if compare[name[image].ndim equal[==] constant[4]] begin[:]
variable[image] assign[=] call[name[np].moveaxis, parameter[name[image], <ast.UnaryOp object at 0x7da18f09fbb0>, constant[0]]]
variable[image] assign[=] name[image].T
return[tuple[[<ast.Name object at 0x7da18f09e050>, <ast.Name object at 0x7da18f09e4d0>]]] | keyword[def] identifier[load] ( identifier[image] ):
literal[string]
identifier[logger] = identifier[Logger] . identifier[getInstance] ()
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[image] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[image] ):
keyword[raise] identifier[ImageLoadingError] ( literal[string] . identifier[format] ( identifier[image] ))
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[image] ):
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[image] ))
identifier[sitkimage] = identifier[sitk] . identifier[ReadImage] ( identifier[sitk] . identifier[ImageSeriesReader_GetGDCMSeriesFileNames] ( identifier[image] ))
keyword[else] :
identifier[sitkimage] = identifier[sitk] . identifier[ReadImage] ( identifier[image] )
identifier[header] = identifier[Header] ( identifier[sitkimage] = identifier[sitkimage] )
identifier[image] = identifier[sitk] . identifier[GetArrayFromImage] ( identifier[sitkimage] )
keyword[if] identifier[image] . identifier[ndim] == literal[int] :
identifier[image] = identifier[np] . identifier[moveaxis] ( identifier[image] ,- literal[int] , literal[int] )
identifier[image] = identifier[image] . identifier[T]
keyword[return] identifier[image] , identifier[header] | def load(image):
"""
Loads the ``image`` and returns a ndarray with the image's pixel content as well as
a header object.
The header can, with restrictions, be used to extract additional meta-information
about the image (e.g. using the methods in `~medpy.io.Header`). Additionally
it serves as meta-data container that can be passes to `~medpy.io.save.save` when the
altered image is saved to the hard drive again. Note that the transfer of meta-data is
only possible, and even then not guaranteed, when the source and target image formats
are the same.
MedPy relies on SimpleITK, which enables the power of ITK for image loading and saving.
The supported image file formats should include at least the following.
Medical formats:
- ITK MetaImage (.mha/.raw, .mhd)
- Neuroimaging Informatics Technology Initiative (NIfTI) (.nia, .nii, .nii.gz, .hdr, .img, .img.gz)
- Analyze (plain, SPM99, SPM2) (.hdr/.img, .img.gz)
- Digital Imaging and Communications in Medicine (DICOM) (.dcm, .dicom)
- Digital Imaging and Communications in Medicine (DICOM) series (<directory>/)
- Nearly Raw Raster Data (Nrrd) (.nrrd, .nhdr)
- Medical Imaging NetCDF (MINC) (.mnc, .MNC)
- Guys Image Processing Lab (GIPL) (.gipl, .gipl.gz)
Microscopy formats:
- Medical Research Council (MRC) (.mrc, .rec)
- Bio-Rad (.pic, .PIC)
- LSM (Zeiss) microscopy images (.tif, .TIF, .tiff, .TIFF, .lsm, .LSM)
- Stimulate / Signal Data (SDT) (.sdt)
Visualization formats:
- VTK images (.vtk)
Other formats:
- Portable Network Graphics (PNG) (.png, .PNG)
- Joint Photographic Experts Group (JPEG) (.jpg, .JPG, .jpeg, .JPEG)
- Tagged Image File Format (TIFF) (.tif, .TIF, .tiff, .TIFF)
- Windows bitmap (.bmp, .BMP)
- Hierarchical Data Format (HDF5) (.h5 , .hdf5 , .he5)
- MSX-DOS Screen-x (.ge4, .ge5)
For informations about which image formats, dimensionalities and pixel data types
your current configuration supports, run `python3 tests/support.py > myformats.log`.
Further information see https://simpleitk.readthedocs.io .
Parameters
----------
image : string
Path to the image to load.
Returns
-------
image_data : ndarray
The image data as numpy array with order `x,y,z,c`.
image_header : Header
The image metadata as :mod:`medpy.io.Header`.
Raises
------
ImageLoadingError
If the image could not be loaded due to some reason.
"""
logger = Logger.getInstance()
logger.info('Loading image {}...'.format(image))
if not os.path.exists(image):
raise ImageLoadingError('The supplied image {} does not exist.'.format(image)) # depends on [control=['if'], data=[]]
if os.path.isdir(image):
# !TODO: this does not load the meta-data, find a way to load it from a series, too
logger.info('Loading image as DICOM series. If more than one found in folder {} defaulting to first.'.format(image))
sitkimage = sitk.ReadImage(sitk.ImageSeriesReader_GetGDCMSeriesFileNames(image)) # depends on [control=['if'], data=[]]
else:
sitkimage = sitk.ReadImage(image)
# Make image array data and header
header = Header(sitkimage=sitkimage)
image = sitk.GetArrayFromImage(sitkimage)
# Roll axes from z,y,x,c to x,y,z,c
if image.ndim == 4:
image = np.moveaxis(image, -1, 0) # depends on [control=['if'], data=[]]
image = image.T
return (image, header) |
def get_netconf_client_capabilities_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_netconf_client_capabilities = ET.Element("get_netconf_client_capabilities")
config = get_netconf_client_capabilities
input = ET.SubElement(get_netconf_client_capabilities, "input")
session_id = ET.SubElement(input, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[get_netconf_client_capabilities_input_session_id, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[get_netconf_client_capabilities] assign[=] call[name[ET].Element, parameter[constant[get_netconf_client_capabilities]]]
variable[config] assign[=] name[get_netconf_client_capabilities]
variable[input] assign[=] call[name[ET].SubElement, parameter[name[get_netconf_client_capabilities], constant[input]]]
variable[session_id] assign[=] call[name[ET].SubElement, parameter[name[input], constant[session-id]]]
name[session_id].text assign[=] call[name[kwargs].pop, parameter[constant[session_id]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[get_netconf_client_capabilities_input_session_id] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[get_netconf_client_capabilities] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[config] = identifier[get_netconf_client_capabilities]
identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[get_netconf_client_capabilities] , literal[string] )
identifier[session_id] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] )
identifier[session_id] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def get_netconf_client_capabilities_input_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
get_netconf_client_capabilities = ET.Element('get_netconf_client_capabilities')
config = get_netconf_client_capabilities
input = ET.SubElement(get_netconf_client_capabilities, 'input')
session_id = ET.SubElement(input, 'session-id')
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False):
"""Load CIFAR-10 dataset.
It consists of 60000 32x32 colour images in 10 classes, with
6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with
10000 images. The test batch contains exactly 1000 randomly-selected images from
each class. The training batches contain the remaining images in random order,
but some training batches may contain more images from one class than another.
Between them, the training batches contain exactly 5000 images from each class.
Parameters
----------
shape : tupe
The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).
path : str
The path that the data is downloaded to, defaults is ``data/cifar10/``.
plotable : boolean
Whether to plot some image examples, False as default.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
References
----------
- `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__
- `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__
- `<https://teratail.com/questions/28932>`__
"""
path = os.path.join(path, 'cifar10')
logging.info("Load or Download cifar10 > {}".format(path))
# Helper function to unpickle the data
def unpickle(file):
fp = open(file, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp)
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data
filename = 'cifar-10-python.tar.gz'
url = 'https://www.cs.toronto.edu/~kriz/'
# Download and uncompress file
maybe_download_and_extract(filename, path, url, extract=True)
# Unpickle file and fill in data
X_train = None
y_train = []
for i in range(1, 6):
data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "data_batch_{}".format(i)))
if i == 1:
X_train = data_dic['data']
else:
X_train = np.vstack((X_train, data_dic['data']))
y_train += data_dic['labels']
test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', "test_batch"))
X_test = test_data_dic['data']
y_test = np.array(test_data_dic['labels'])
if shape == (-1, 3, 32, 32):
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
elif shape == (-1, 32, 32, 3):
X_test = X_test.reshape(shape, order='F')
X_train = X_train.reshape(shape, order='F')
X_test = np.transpose(X_test, (0, 2, 1, 3))
X_train = np.transpose(X_train, (0, 2, 1, 3))
else:
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
y_train = np.array(y_train)
if plotable:
logging.info('\nCIFAR-10')
fig = plt.figure(1)
logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape)
plt.ion() # interactive mode
count = 1
for _ in range(10): # each row
for _ in range(10): # each column
_ = fig.add_subplot(10, 10, count)
if shape == (-1, 3, 32, 32):
# plt.imshow(X_train[count-1], interpolation='nearest')
plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)), interpolation='nearest')
# plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
elif shape == (-1, 32, 32, 3):
plt.imshow(X_train[count - 1], interpolation='nearest')
# plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
else:
raise Exception("Do not support the given 'shape' to plot the image examples")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick)
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1
plt.draw() # interactive mode
plt.pause(3) # interactive mode
logging.info("X_train: %s" % X_train.shape)
logging.info("y_train: %s" % y_train.shape)
logging.info("X_test: %s" % X_test.shape)
logging.info("y_test: %s" % y_test.shape)
X_train = np.asarray(X_train, dtype=np.float32)
X_test = np.asarray(X_test, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
return X_train, y_train, X_test, y_test | def function[load_cifar10_dataset, parameter[shape, path, plotable]]:
constant[Load CIFAR-10 dataset.
It consists of 60000 32x32 colour images in 10 classes, with
6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with
10000 images. The test batch contains exactly 1000 randomly-selected images from
each class. The training batches contain the remaining images in random order,
but some training batches may contain more images from one class than another.
Between them, the training batches contain exactly 5000 images from each class.
Parameters
----------
shape : tupe
The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).
path : str
The path that the data is downloaded to, defaults is ``data/cifar10/``.
plotable : boolean
Whether to plot some image examples, False as default.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
References
----------
- `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__
- `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__
- `<https://teratail.com/questions/28932>`__
]
variable[path] assign[=] call[name[os].path.join, parameter[name[path], constant[cifar10]]]
call[name[logging].info, parameter[call[constant[Load or Download cifar10 > {}].format, parameter[name[path]]]]]
def function[unpickle, parameter[file]]:
variable[fp] assign[=] call[name[open], parameter[name[file], constant[rb]]]
if compare[name[sys].version_info.major equal[==] constant[2]] begin[:]
variable[data] assign[=] call[name[pickle].load, parameter[name[fp]]]
call[name[fp].close, parameter[]]
return[name[data]]
variable[filename] assign[=] constant[cifar-10-python.tar.gz]
variable[url] assign[=] constant[https://www.cs.toronto.edu/~kriz/]
call[name[maybe_download_and_extract], parameter[name[filename], name[path], name[url]]]
variable[X_train] assign[=] constant[None]
variable[y_train] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], constant[6]]]] begin[:]
variable[data_dic] assign[=] call[name[unpickle], parameter[call[name[os].path.join, parameter[name[path], constant[cifar-10-batches-py/], call[constant[data_batch_{}].format, parameter[name[i]]]]]]]
if compare[name[i] equal[==] constant[1]] begin[:]
variable[X_train] assign[=] call[name[data_dic]][constant[data]]
<ast.AugAssign object at 0x7da1b0145f60>
variable[test_data_dic] assign[=] call[name[unpickle], parameter[call[name[os].path.join, parameter[name[path], constant[cifar-10-batches-py/], constant[test_batch]]]]]
variable[X_test] assign[=] call[name[test_data_dic]][constant[data]]
variable[y_test] assign[=] call[name[np].array, parameter[call[name[test_data_dic]][constant[labels]]]]
if compare[name[shape] equal[==] tuple[[<ast.UnaryOp object at 0x7da1b0145960>, <ast.Constant object at 0x7da1b0146230>, <ast.Constant object at 0x7da1b0144f70>, <ast.Constant object at 0x7da1b0145810>]]] begin[:]
variable[X_test] assign[=] call[name[X_test].reshape, parameter[name[shape]]]
variable[X_train] assign[=] call[name[X_train].reshape, parameter[name[shape]]]
variable[y_train] assign[=] call[name[np].array, parameter[name[y_train]]]
if name[plotable] begin[:]
call[name[logging].info, parameter[constant[
CIFAR-10]]]
variable[fig] assign[=] call[name[plt].figure, parameter[constant[1]]]
call[name[logging].info, parameter[binary_operation[constant[Shape of a training image: X_train[0] %s] <ast.Mod object at 0x7da2590d6920> call[name[X_train]][constant[0]].shape]]]
call[name[plt].ion, parameter[]]
variable[count] assign[=] constant[1]
for taget[name[_]] in starred[call[name[range], parameter[constant[10]]]] begin[:]
for taget[name[_]] in starred[call[name[range], parameter[constant[10]]]] begin[:]
variable[_] assign[=] call[name[fig].add_subplot, parameter[constant[10], constant[10], name[count]]]
if compare[name[shape] equal[==] tuple[[<ast.UnaryOp object at 0x7da1b002aec0>, <ast.Constant object at 0x7da1b002b1f0>, <ast.Constant object at 0x7da1b0029570>, <ast.Constant object at 0x7da1b0029210>]]] begin[:]
call[name[plt].imshow, parameter[call[name[np].transpose, parameter[call[name[X_train]][binary_operation[name[count] - constant[1]]], tuple[[<ast.Constant object at 0x7da1b0028220>, <ast.Constant object at 0x7da1b002a110>, <ast.Constant object at 0x7da1b0028be0>]]]]]]
call[call[name[plt].gca, parameter[]].xaxis.set_major_locator, parameter[call[name[plt].NullLocator, parameter[]]]]
call[call[name[plt].gca, parameter[]].yaxis.set_major_locator, parameter[call[name[plt].NullLocator, parameter[]]]]
variable[count] assign[=] binary_operation[name[count] + constant[1]]
call[name[plt].draw, parameter[]]
call[name[plt].pause, parameter[constant[3]]]
call[name[logging].info, parameter[binary_operation[constant[X_train: %s] <ast.Mod object at 0x7da2590d6920> name[X_train].shape]]]
call[name[logging].info, parameter[binary_operation[constant[y_train: %s] <ast.Mod object at 0x7da2590d6920> name[y_train].shape]]]
call[name[logging].info, parameter[binary_operation[constant[X_test: %s] <ast.Mod object at 0x7da2590d6920> name[X_test].shape]]]
call[name[logging].info, parameter[binary_operation[constant[y_test: %s] <ast.Mod object at 0x7da2590d6920> name[y_test].shape]]]
variable[X_train] assign[=] call[name[np].asarray, parameter[name[X_train]]]
variable[X_test] assign[=] call[name[np].asarray, parameter[name[X_test]]]
variable[y_train] assign[=] call[name[np].asarray, parameter[name[y_train]]]
variable[y_test] assign[=] call[name[np].asarray, parameter[name[y_test]]]
return[tuple[[<ast.Name object at 0x7da1b01477c0>, <ast.Name object at 0x7da1b0147940>, <ast.Name object at 0x7da1b0147970>, <ast.Name object at 0x7da1b01479d0>]]] | keyword[def] identifier[load_cifar10_dataset] ( identifier[shape] =(- literal[int] , literal[int] , literal[int] , literal[int] ), identifier[path] = literal[string] , identifier[plotable] = keyword[False] ):
literal[string]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] )
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[path] ))
keyword[def] identifier[unpickle] ( identifier[file] ):
identifier[fp] = identifier[open] ( identifier[file] , literal[string] )
keyword[if] identifier[sys] . identifier[version_info] . identifier[major] == literal[int] :
identifier[data] = identifier[pickle] . identifier[load] ( identifier[fp] )
keyword[elif] identifier[sys] . identifier[version_info] . identifier[major] == literal[int] :
identifier[data] = identifier[pickle] . identifier[load] ( identifier[fp] , identifier[encoding] = literal[string] )
identifier[fp] . identifier[close] ()
keyword[return] identifier[data]
identifier[filename] = literal[string]
identifier[url] = literal[string]
identifier[maybe_download_and_extract] ( identifier[filename] , identifier[path] , identifier[url] , identifier[extract] = keyword[True] )
identifier[X_train] = keyword[None]
identifier[y_train] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[data_dic] = identifier[unpickle] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] , literal[string] . identifier[format] ( identifier[i] )))
keyword[if] identifier[i] == literal[int] :
identifier[X_train] = identifier[data_dic] [ literal[string] ]
keyword[else] :
identifier[X_train] = identifier[np] . identifier[vstack] (( identifier[X_train] , identifier[data_dic] [ literal[string] ]))
identifier[y_train] += identifier[data_dic] [ literal[string] ]
identifier[test_data_dic] = identifier[unpickle] ( identifier[os] . identifier[path] . identifier[join] ( identifier[path] , literal[string] , literal[string] ))
identifier[X_test] = identifier[test_data_dic] [ literal[string] ]
identifier[y_test] = identifier[np] . identifier[array] ( identifier[test_data_dic] [ literal[string] ])
keyword[if] identifier[shape] ==(- literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[X_test] = identifier[X_test] . identifier[reshape] ( identifier[shape] )
identifier[X_train] = identifier[X_train] . identifier[reshape] ( identifier[shape] )
keyword[elif] identifier[shape] ==(- literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[X_test] = identifier[X_test] . identifier[reshape] ( identifier[shape] , identifier[order] = literal[string] )
identifier[X_train] = identifier[X_train] . identifier[reshape] ( identifier[shape] , identifier[order] = literal[string] )
identifier[X_test] = identifier[np] . identifier[transpose] ( identifier[X_test] ,( literal[int] , literal[int] , literal[int] , literal[int] ))
identifier[X_train] = identifier[np] . identifier[transpose] ( identifier[X_train] ,( literal[int] , literal[int] , literal[int] , literal[int] ))
keyword[else] :
identifier[X_test] = identifier[X_test] . identifier[reshape] ( identifier[shape] )
identifier[X_train] = identifier[X_train] . identifier[reshape] ( identifier[shape] )
identifier[y_train] = identifier[np] . identifier[array] ( identifier[y_train] )
keyword[if] identifier[plotable] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[fig] = identifier[plt] . identifier[figure] ( literal[int] )
identifier[logging] . identifier[info] ( literal[string] % identifier[X_train] [ literal[int] ]. identifier[shape] )
identifier[plt] . identifier[ion] ()
identifier[count] = literal[int]
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ):
keyword[for] identifier[_] keyword[in] identifier[range] ( literal[int] ):
identifier[_] = identifier[fig] . identifier[add_subplot] ( literal[int] , literal[int] , identifier[count] )
keyword[if] identifier[shape] ==(- literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[plt] . identifier[imshow] ( identifier[np] . identifier[transpose] ( identifier[X_train] [ identifier[count] - literal[int] ],( literal[int] , literal[int] , literal[int] )), identifier[interpolation] = literal[string] )
keyword[elif] identifier[shape] ==(- literal[int] , literal[int] , literal[int] , literal[int] ):
identifier[plt] . identifier[imshow] ( identifier[X_train] [ identifier[count] - literal[int] ], identifier[interpolation] = literal[string] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[plt] . identifier[gca] (). identifier[xaxis] . identifier[set_major_locator] ( identifier[plt] . identifier[NullLocator] ())
identifier[plt] . identifier[gca] (). identifier[yaxis] . identifier[set_major_locator] ( identifier[plt] . identifier[NullLocator] ())
identifier[count] = identifier[count] + literal[int]
identifier[plt] . identifier[draw] ()
identifier[plt] . identifier[pause] ( literal[int] )
identifier[logging] . identifier[info] ( literal[string] % identifier[X_train] . identifier[shape] )
identifier[logging] . identifier[info] ( literal[string] % identifier[y_train] . identifier[shape] )
identifier[logging] . identifier[info] ( literal[string] % identifier[X_test] . identifier[shape] )
identifier[logging] . identifier[info] ( literal[string] % identifier[y_test] . identifier[shape] )
identifier[X_train] = identifier[np] . identifier[asarray] ( identifier[X_train] , identifier[dtype] = identifier[np] . identifier[float32] )
identifier[X_test] = identifier[np] . identifier[asarray] ( identifier[X_test] , identifier[dtype] = identifier[np] . identifier[float32] )
identifier[y_train] = identifier[np] . identifier[asarray] ( identifier[y_train] , identifier[dtype] = identifier[np] . identifier[int32] )
identifier[y_test] = identifier[np] . identifier[asarray] ( identifier[y_test] , identifier[dtype] = identifier[np] . identifier[int32] )
keyword[return] identifier[X_train] , identifier[y_train] , identifier[X_test] , identifier[y_test] | def load_cifar10_dataset(shape=(-1, 32, 32, 3), path='data', plotable=False):
"""Load CIFAR-10 dataset.
It consists of 60000 32x32 colour images in 10 classes, with
6000 images per class. There are 50000 training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with
10000 images. The test batch contains exactly 1000 randomly-selected images from
each class. The training batches contain the remaining images in random order,
but some training batches may contain more images from one class than another.
Between them, the training batches contain exactly 5000 images from each class.
Parameters
----------
shape : tupe
The shape of digit images e.g. (-1, 3, 32, 32) and (-1, 32, 32, 3).
path : str
The path that the data is downloaded to, defaults is ``data/cifar10/``.
plotable : boolean
Whether to plot some image examples, False as default.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
References
----------
- `CIFAR website <https://www.cs.toronto.edu/~kriz/cifar.html>`__
- `Data download link <https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz>`__
- `<https://teratail.com/questions/28932>`__
"""
path = os.path.join(path, 'cifar10')
logging.info('Load or Download cifar10 > {}'.format(path))
# Helper function to unpickle the data
def unpickle(file):
fp = open(file, 'rb')
if sys.version_info.major == 2:
data = pickle.load(fp) # depends on [control=['if'], data=[]]
elif sys.version_info.major == 3:
data = pickle.load(fp, encoding='latin-1') # depends on [control=['if'], data=[]]
fp.close()
return data
filename = 'cifar-10-python.tar.gz'
url = 'https://www.cs.toronto.edu/~kriz/'
# Download and uncompress file
maybe_download_and_extract(filename, path, url, extract=True)
# Unpickle file and fill in data
X_train = None
y_train = []
for i in range(1, 6):
data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', 'data_batch_{}'.format(i)))
if i == 1:
X_train = data_dic['data'] # depends on [control=['if'], data=[]]
else:
X_train = np.vstack((X_train, data_dic['data']))
y_train += data_dic['labels'] # depends on [control=['for'], data=['i']]
test_data_dic = unpickle(os.path.join(path, 'cifar-10-batches-py/', 'test_batch'))
X_test = test_data_dic['data']
y_test = np.array(test_data_dic['labels'])
if shape == (-1, 3, 32, 32):
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape) # depends on [control=['if'], data=['shape']]
elif shape == (-1, 32, 32, 3):
X_test = X_test.reshape(shape, order='F')
X_train = X_train.reshape(shape, order='F')
X_test = np.transpose(X_test, (0, 2, 1, 3))
X_train = np.transpose(X_train, (0, 2, 1, 3)) # depends on [control=['if'], data=['shape']]
else:
X_test = X_test.reshape(shape)
X_train = X_train.reshape(shape)
y_train = np.array(y_train)
if plotable:
logging.info('\nCIFAR-10')
fig = plt.figure(1)
logging.info('Shape of a training image: X_train[0] %s' % X_train[0].shape)
plt.ion() # interactive mode
count = 1
for _ in range(10): # each row
for _ in range(10): # each column
_ = fig.add_subplot(10, 10, count)
if shape == (-1, 3, 32, 32):
# plt.imshow(X_train[count-1], interpolation='nearest')
plt.imshow(np.transpose(X_train[count - 1], (1, 2, 0)), interpolation='nearest') # depends on [control=['if'], data=[]]
# plt.imshow(np.transpose(X_train[count-1], (2, 1, 0)), interpolation='nearest')
elif shape == (-1, 32, 32, 3):
plt.imshow(X_train[count - 1], interpolation='nearest') # depends on [control=['if'], data=[]]
else:
# plt.imshow(np.transpose(X_train[count-1], (1, 0, 2)), interpolation='nearest')
raise Exception("Do not support the given 'shape' to plot the image examples")
plt.gca().xaxis.set_major_locator(plt.NullLocator()) # 不显示刻度(tick)
plt.gca().yaxis.set_major_locator(plt.NullLocator())
count = count + 1 # depends on [control=['for'], data=['_']] # depends on [control=['for'], data=['_']]
plt.draw() # interactive mode
plt.pause(3) # interactive mode
logging.info('X_train: %s' % X_train.shape)
logging.info('y_train: %s' % y_train.shape)
logging.info('X_test: %s' % X_test.shape)
logging.info('y_test: %s' % y_test.shape) # depends on [control=['if'], data=[]]
X_train = np.asarray(X_train, dtype=np.float32)
X_test = np.asarray(X_test, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
return (X_train, y_train, X_test, y_test) |
def _compute_partial_derivative_site_amp(self, C, pga1100, vs30):
"""
Partial derivative of site amplification term with respect to
PGA on rock (equation 26), as described in the errata and not
in the original paper.
"""
delta_amp = np.zeros_like(vs30)
vlin = C['VLIN']
c = self.CONSTS['c']
b = C['b']
n = self.CONSTS['n']
idx = vs30 < vlin
delta_amp[idx] = (- b * pga1100[idx] / (pga1100[idx] + c) +
b * pga1100[idx] / (pga1100[idx] + c *
((vs30[idx] / vlin) ** n)))
return delta_amp | def function[_compute_partial_derivative_site_amp, parameter[self, C, pga1100, vs30]]:
constant[
Partial derivative of site amplification term with respect to
PGA on rock (equation 26), as described in the errata and not
in the original paper.
]
variable[delta_amp] assign[=] call[name[np].zeros_like, parameter[name[vs30]]]
variable[vlin] assign[=] call[name[C]][constant[VLIN]]
variable[c] assign[=] call[name[self].CONSTS][constant[c]]
variable[b] assign[=] call[name[C]][constant[b]]
variable[n] assign[=] call[name[self].CONSTS][constant[n]]
variable[idx] assign[=] compare[name[vs30] less[<] name[vlin]]
call[name[delta_amp]][name[idx]] assign[=] binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18ede6b60> * call[name[pga1100]][name[idx]]] / binary_operation[call[name[pga1100]][name[idx]] + name[c]]] + binary_operation[binary_operation[name[b] * call[name[pga1100]][name[idx]]] / binary_operation[call[name[pga1100]][name[idx]] + binary_operation[name[c] * binary_operation[binary_operation[call[name[vs30]][name[idx]] / name[vlin]] ** name[n]]]]]]
return[name[delta_amp]] | keyword[def] identifier[_compute_partial_derivative_site_amp] ( identifier[self] , identifier[C] , identifier[pga1100] , identifier[vs30] ):
literal[string]
identifier[delta_amp] = identifier[np] . identifier[zeros_like] ( identifier[vs30] )
identifier[vlin] = identifier[C] [ literal[string] ]
identifier[c] = identifier[self] . identifier[CONSTS] [ literal[string] ]
identifier[b] = identifier[C] [ literal[string] ]
identifier[n] = identifier[self] . identifier[CONSTS] [ literal[string] ]
identifier[idx] = identifier[vs30] < identifier[vlin]
identifier[delta_amp] [ identifier[idx] ]=(- identifier[b] * identifier[pga1100] [ identifier[idx] ]/( identifier[pga1100] [ identifier[idx] ]+ identifier[c] )+
identifier[b] * identifier[pga1100] [ identifier[idx] ]/( identifier[pga1100] [ identifier[idx] ]+ identifier[c] *
(( identifier[vs30] [ identifier[idx] ]/ identifier[vlin] )** identifier[n] )))
keyword[return] identifier[delta_amp] | def _compute_partial_derivative_site_amp(self, C, pga1100, vs30):
"""
Partial derivative of site amplification term with respect to
PGA on rock (equation 26), as described in the errata and not
in the original paper.
"""
delta_amp = np.zeros_like(vs30)
vlin = C['VLIN']
c = self.CONSTS['c']
b = C['b']
n = self.CONSTS['n']
idx = vs30 < vlin
delta_amp[idx] = -b * pga1100[idx] / (pga1100[idx] + c) + b * pga1100[idx] / (pga1100[idx] + c * (vs30[idx] / vlin) ** n)
return delta_amp |
def load(self):
"""Load configuration from the defined locations."""
if not self.loaded:
self.values = configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS)
for path in self.locations():
try:
part = configobj.ConfigObj(infile=path, **self.DEFAULT_CONFIG_OPTS)
except configobj.ConfigObjError as cause:
raise LoggedFailure("Error in file '{path}': {cause}".format(path=pretty_path(path), cause=cause))
self.values.merge(part)
self.loaded = True
return self.values | def function[load, parameter[self]]:
constant[Load configuration from the defined locations.]
if <ast.UnaryOp object at 0x7da204347610> begin[:]
name[self].values assign[=] call[name[configobj].ConfigObj, parameter[dictionary[[], []]]]
for taget[name[path]] in starred[call[name[self].locations, parameter[]]] begin[:]
<ast.Try object at 0x7da204346da0>
call[name[self].values.merge, parameter[name[part]]]
name[self].loaded assign[=] constant[True]
return[name[self].values] | keyword[def] identifier[load] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[loaded] :
identifier[self] . identifier[values] = identifier[configobj] . identifier[ConfigObj] ({},** identifier[self] . identifier[DEFAULT_CONFIG_OPTS] )
keyword[for] identifier[path] keyword[in] identifier[self] . identifier[locations] ():
keyword[try] :
identifier[part] = identifier[configobj] . identifier[ConfigObj] ( identifier[infile] = identifier[path] ,** identifier[self] . identifier[DEFAULT_CONFIG_OPTS] )
keyword[except] identifier[configobj] . identifier[ConfigObjError] keyword[as] identifier[cause] :
keyword[raise] identifier[LoggedFailure] ( literal[string] . identifier[format] ( identifier[path] = identifier[pretty_path] ( identifier[path] ), identifier[cause] = identifier[cause] ))
identifier[self] . identifier[values] . identifier[merge] ( identifier[part] )
identifier[self] . identifier[loaded] = keyword[True]
keyword[return] identifier[self] . identifier[values] | def load(self):
"""Load configuration from the defined locations."""
if not self.loaded:
self.values = configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS)
for path in self.locations():
try:
part = configobj.ConfigObj(infile=path, **self.DEFAULT_CONFIG_OPTS) # depends on [control=['try'], data=[]]
except configobj.ConfigObjError as cause:
raise LoggedFailure("Error in file '{path}': {cause}".format(path=pretty_path(path), cause=cause)) # depends on [control=['except'], data=['cause']]
self.values.merge(part) # depends on [control=['for'], data=['path']]
self.loaded = True # depends on [control=['if'], data=[]]
return self.values |
def remove_entity_layer(self):
"""
Removes the entity layer (if exists) of the object (in memory)
"""
if self.entity_layer is not None:
this_node = self.entity_layer.get_node()
self.root.remove(this_node)
self.entity_layer = None
if self.header is not None:
self.header.remove_lp('entities') | def function[remove_entity_layer, parameter[self]]:
constant[
Removes the entity layer (if exists) of the object (in memory)
]
if compare[name[self].entity_layer is_not constant[None]] begin[:]
variable[this_node] assign[=] call[name[self].entity_layer.get_node, parameter[]]
call[name[self].root.remove, parameter[name[this_node]]]
name[self].entity_layer assign[=] constant[None]
if compare[name[self].header is_not constant[None]] begin[:]
call[name[self].header.remove_lp, parameter[constant[entities]]] | keyword[def] identifier[remove_entity_layer] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[entity_layer] keyword[is] keyword[not] keyword[None] :
identifier[this_node] = identifier[self] . identifier[entity_layer] . identifier[get_node] ()
identifier[self] . identifier[root] . identifier[remove] ( identifier[this_node] )
identifier[self] . identifier[entity_layer] = keyword[None]
keyword[if] identifier[self] . identifier[header] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[header] . identifier[remove_lp] ( literal[string] ) | def remove_entity_layer(self):
"""
Removes the entity layer (if exists) of the object (in memory)
"""
if self.entity_layer is not None:
this_node = self.entity_layer.get_node()
self.root.remove(this_node)
self.entity_layer = None # depends on [control=['if'], data=[]]
if self.header is not None:
self.header.remove_lp('entities') # depends on [control=['if'], data=[]] |
def extract_listing(pid):
"""Extract listing; return list of tuples (artist(s), title, label)."""
print("Extracting tracklisting...")
listing_etree = open_listing_page(pid + '/segments.inc')
track_divs = listing_etree.xpath('//div[@class="segment__track"]')
listing = []
for track_div in track_divs:
try:
artist_names = track_div.xpath('.//span[@property="byArtist"]'
'//span[@class="artist"]/text()')
except ValueError:
artist_names = ['']
if not artist_names:
artist_names = ['']
if len(artist_names) > 1:
artists = ', '.join(artist_names[:-1]) + ' & ' + artist_names[-1]
else:
artists = artist_names[0]
try:
title, = track_div.xpath('.//p/span[@property="name"]/text()')
except ValueError:
title = ''
try:
label, = track_div.xpath('.//abbr[@title="Record Label"]'
'/span[@property="name"]/text()')
except ValueError:
label = ''
listing.append((artists, title, label))
return listing | def function[extract_listing, parameter[pid]]:
constant[Extract listing; return list of tuples (artist(s), title, label).]
call[name[print], parameter[constant[Extracting tracklisting...]]]
variable[listing_etree] assign[=] call[name[open_listing_page], parameter[binary_operation[name[pid] + constant[/segments.inc]]]]
variable[track_divs] assign[=] call[name[listing_etree].xpath, parameter[constant[//div[@class="segment__track"]]]]
variable[listing] assign[=] list[[]]
for taget[name[track_div]] in starred[name[track_divs]] begin[:]
<ast.Try object at 0x7da20e9612a0>
if <ast.UnaryOp object at 0x7da20e9605b0> begin[:]
variable[artist_names] assign[=] list[[<ast.Constant object at 0x7da20e962050>]]
if compare[call[name[len], parameter[name[artist_names]]] greater[>] constant[1]] begin[:]
variable[artists] assign[=] binary_operation[binary_operation[call[constant[, ].join, parameter[call[name[artist_names]][<ast.Slice object at 0x7da20e961f30>]]] + constant[ & ]] + call[name[artist_names]][<ast.UnaryOp object at 0x7da20e963df0>]]
<ast.Try object at 0x7da20e960bb0>
<ast.Try object at 0x7da18f00d480>
call[name[listing].append, parameter[tuple[[<ast.Name object at 0x7da18f00ceb0>, <ast.Name object at 0x7da18f00d2a0>, <ast.Name object at 0x7da18f00c4f0>]]]]
return[name[listing]] | keyword[def] identifier[extract_listing] ( identifier[pid] ):
literal[string]
identifier[print] ( literal[string] )
identifier[listing_etree] = identifier[open_listing_page] ( identifier[pid] + literal[string] )
identifier[track_divs] = identifier[listing_etree] . identifier[xpath] ( literal[string] )
identifier[listing] =[]
keyword[for] identifier[track_div] keyword[in] identifier[track_divs] :
keyword[try] :
identifier[artist_names] = identifier[track_div] . identifier[xpath] ( literal[string]
literal[string] )
keyword[except] identifier[ValueError] :
identifier[artist_names] =[ literal[string] ]
keyword[if] keyword[not] identifier[artist_names] :
identifier[artist_names] =[ literal[string] ]
keyword[if] identifier[len] ( identifier[artist_names] )> literal[int] :
identifier[artists] = literal[string] . identifier[join] ( identifier[artist_names] [:- literal[int] ])+ literal[string] + identifier[artist_names] [- literal[int] ]
keyword[else] :
identifier[artists] = identifier[artist_names] [ literal[int] ]
keyword[try] :
identifier[title] ,= identifier[track_div] . identifier[xpath] ( literal[string] )
keyword[except] identifier[ValueError] :
identifier[title] = literal[string]
keyword[try] :
identifier[label] ,= identifier[track_div] . identifier[xpath] ( literal[string]
literal[string] )
keyword[except] identifier[ValueError] :
identifier[label] = literal[string]
identifier[listing] . identifier[append] (( identifier[artists] , identifier[title] , identifier[label] ))
keyword[return] identifier[listing] | def extract_listing(pid):
"""Extract listing; return list of tuples (artist(s), title, label)."""
print('Extracting tracklisting...')
listing_etree = open_listing_page(pid + '/segments.inc')
track_divs = listing_etree.xpath('//div[@class="segment__track"]')
listing = []
for track_div in track_divs:
try:
artist_names = track_div.xpath('.//span[@property="byArtist"]//span[@class="artist"]/text()') # depends on [control=['try'], data=[]]
except ValueError:
artist_names = [''] # depends on [control=['except'], data=[]]
if not artist_names:
artist_names = [''] # depends on [control=['if'], data=[]]
if len(artist_names) > 1:
artists = ', '.join(artist_names[:-1]) + ' & ' + artist_names[-1] # depends on [control=['if'], data=[]]
else:
artists = artist_names[0]
try:
(title,) = track_div.xpath('.//p/span[@property="name"]/text()') # depends on [control=['try'], data=[]]
except ValueError:
title = '' # depends on [control=['except'], data=[]]
try:
(label,) = track_div.xpath('.//abbr[@title="Record Label"]/span[@property="name"]/text()') # depends on [control=['try'], data=[]]
except ValueError:
label = '' # depends on [control=['except'], data=[]]
listing.append((artists, title, label)) # depends on [control=['for'], data=['track_div']]
return listing |
def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path="~/.gcetools/instances", group_name=None, region=None, zone=None):
"""
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
"""
data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone)
roles = _get_roles(data)
env.roledefs.update(roles)
_data_loaded = True
return INSTANCES_CACHE | def function[update_roles_gce, parameter[use_cache, cache_expiration, cache_path, group_name, region, zone]]:
constant[
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
]
variable[data] assign[=] call[name[_get_data], parameter[name[use_cache], name[cache_expiration]]]
variable[roles] assign[=] call[name[_get_roles], parameter[name[data]]]
call[name[env].roledefs.update, parameter[name[roles]]]
variable[_data_loaded] assign[=] constant[True]
return[name[INSTANCES_CACHE]] | keyword[def] identifier[update_roles_gce] ( identifier[use_cache] = keyword[True] , identifier[cache_expiration] = literal[int] , identifier[cache_path] = literal[string] , identifier[group_name] = keyword[None] , identifier[region] = keyword[None] , identifier[zone] = keyword[None] ):
literal[string]
identifier[data] = identifier[_get_data] ( identifier[use_cache] , identifier[cache_expiration] , identifier[group_name] = identifier[group_name] , identifier[region] = identifier[region] , identifier[zone] = identifier[zone] )
identifier[roles] = identifier[_get_roles] ( identifier[data] )
identifier[env] . identifier[roledefs] . identifier[update] ( identifier[roles] )
identifier[_data_loaded] = keyword[True]
keyword[return] identifier[INSTANCES_CACHE] | def update_roles_gce(use_cache=True, cache_expiration=86400, cache_path='~/.gcetools/instances', group_name=None, region=None, zone=None):
"""
Dynamically update fabric's roles by using assigning the tags associated with
each machine in Google Compute Engine.
use_cache - will store a local cache in ~/.gcetools/
cache_expiration - cache expiration in seconds (default: 1 day)
cache_path - the path to store instances data (default: ~/.gcetools/instances)
group_name - optional managed instance group to use instead of the global instance pool
region - gce region name (such as `us-central1`) for a regional managed instance group
zone - gce zone name (such as `us-central1-a`) for a zone managed instance group
How to use:
- Call 'update_roles_gce' at the end of your fabfile.py (it will run each
time you run fabric).
- On each function use the regular @roles decorator and set the role to the name
of one of the tags associated with the instances you wish to work with
"""
data = _get_data(use_cache, cache_expiration, group_name=group_name, region=region, zone=zone)
roles = _get_roles(data)
env.roledefs.update(roles)
_data_loaded = True
return INSTANCES_CACHE |
def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalList, self).dispatch(*args, **kwargs) | def function[dispatch, parameter[self]]:
constant[This decorator sets this view to have restricted permissions.]
return[call[call[name[super], parameter[name[AnimalList], name[self]]].dispatch, parameter[<ast.Starred object at 0x7da20e74b2e0>]]] | keyword[def] identifier[dispatch] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[super] ( identifier[AnimalList] , identifier[self] ). identifier[dispatch] (* identifier[args] ,** identifier[kwargs] ) | def dispatch(self, *args, **kwargs):
"""This decorator sets this view to have restricted permissions."""
return super(AnimalList, self).dispatch(*args, **kwargs) |
def filter(self, table, group_types, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [group_type for group_type in group_types
if query in group_type.name.lower()] | def function[filter, parameter[self, table, group_types, filter_string]]:
constant[Naive case-insensitive search.]
variable[query] assign[=] call[name[filter_string].lower, parameter[]]
return[<ast.ListComp object at 0x7da1b18dd660>] | keyword[def] identifier[filter] ( identifier[self] , identifier[table] , identifier[group_types] , identifier[filter_string] ):
literal[string]
identifier[query] = identifier[filter_string] . identifier[lower] ()
keyword[return] [ identifier[group_type] keyword[for] identifier[group_type] keyword[in] identifier[group_types]
keyword[if] identifier[query] keyword[in] identifier[group_type] . identifier[name] . identifier[lower] ()] | def filter(self, table, group_types, filter_string):
"""Naive case-insensitive search."""
query = filter_string.lower()
return [group_type for group_type in group_types if query in group_type.name.lower()] |
def standalone_from_launchable(cls, launch):
"""
Given a launchable resource, create a definition of a standalone
instance, which doesn't depend on or contain references to other
elements.
"""
attrs = copy.copy(launch.el_attrs)
# Remove attributes we overwrite / don't need
del attrs["Type"]
if attrs.has_key("DependsOn"):
del attrs["DependsOn"]
if attrs["Properties"].has_key("SpotPrice"):
del attrs["Properties"]["SpotPrice"]
if attrs["Properties"].has_key("InstanceMonitoring"):
del attrs["Properties"]["InstanceMonitoring"]
if attrs["Properties"].has_key("SecurityGroups"):
del attrs["Properties"]["SecurityGroups"]
if attrs["Properties"].has_key("InstanceId"):
raise RuntimeError("Can't make instance from launchable containing InstanceId property")
inst = EC2Instance(**attrs)
# TODO: shallow copy?
inst.iscm = launch.iscm
return inst | def function[standalone_from_launchable, parameter[cls, launch]]:
constant[
Given a launchable resource, create a definition of a standalone
instance, which doesn't depend on or contain references to other
elements.
]
variable[attrs] assign[=] call[name[copy].copy, parameter[name[launch].el_attrs]]
<ast.Delete object at 0x7da2041dbac0>
if call[name[attrs].has_key, parameter[constant[DependsOn]]] begin[:]
<ast.Delete object at 0x7da2041da3b0>
if call[call[name[attrs]][constant[Properties]].has_key, parameter[constant[SpotPrice]]] begin[:]
<ast.Delete object at 0x7da2041d89a0>
if call[call[name[attrs]][constant[Properties]].has_key, parameter[constant[InstanceMonitoring]]] begin[:]
<ast.Delete object at 0x7da2041d96c0>
if call[call[name[attrs]][constant[Properties]].has_key, parameter[constant[SecurityGroups]]] begin[:]
<ast.Delete object at 0x7da2041d9960>
if call[call[name[attrs]][constant[Properties]].has_key, parameter[constant[InstanceId]]] begin[:]
<ast.Raise object at 0x7da2041d8070>
variable[inst] assign[=] call[name[EC2Instance], parameter[]]
name[inst].iscm assign[=] name[launch].iscm
return[name[inst]] | keyword[def] identifier[standalone_from_launchable] ( identifier[cls] , identifier[launch] ):
literal[string]
identifier[attrs] = identifier[copy] . identifier[copy] ( identifier[launch] . identifier[el_attrs] )
keyword[del] identifier[attrs] [ literal[string] ]
keyword[if] identifier[attrs] . identifier[has_key] ( literal[string] ):
keyword[del] identifier[attrs] [ literal[string] ]
keyword[if] identifier[attrs] [ literal[string] ]. identifier[has_key] ( literal[string] ):
keyword[del] identifier[attrs] [ literal[string] ][ literal[string] ]
keyword[if] identifier[attrs] [ literal[string] ]. identifier[has_key] ( literal[string] ):
keyword[del] identifier[attrs] [ literal[string] ][ literal[string] ]
keyword[if] identifier[attrs] [ literal[string] ]. identifier[has_key] ( literal[string] ):
keyword[del] identifier[attrs] [ literal[string] ][ literal[string] ]
keyword[if] identifier[attrs] [ literal[string] ]. identifier[has_key] ( literal[string] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[inst] = identifier[EC2Instance] (** identifier[attrs] )
identifier[inst] . identifier[iscm] = identifier[launch] . identifier[iscm]
keyword[return] identifier[inst] | def standalone_from_launchable(cls, launch):
"""
Given a launchable resource, create a definition of a standalone
instance, which doesn't depend on or contain references to other
elements.
"""
attrs = copy.copy(launch.el_attrs)
# Remove attributes we overwrite / don't need
del attrs['Type']
if attrs.has_key('DependsOn'):
del attrs['DependsOn'] # depends on [control=['if'], data=[]]
if attrs['Properties'].has_key('SpotPrice'):
del attrs['Properties']['SpotPrice'] # depends on [control=['if'], data=[]]
if attrs['Properties'].has_key('InstanceMonitoring'):
del attrs['Properties']['InstanceMonitoring'] # depends on [control=['if'], data=[]]
if attrs['Properties'].has_key('SecurityGroups'):
del attrs['Properties']['SecurityGroups'] # depends on [control=['if'], data=[]]
if attrs['Properties'].has_key('InstanceId'):
raise RuntimeError("Can't make instance from launchable containing InstanceId property") # depends on [control=['if'], data=[]]
inst = EC2Instance(**attrs)
# TODO: shallow copy?
inst.iscm = launch.iscm
return inst |
def append(self, func, *args, **kwargs):
"""
add a task to the chain
takes the same parameters as async_task()
"""
self.chain.append((func, args, kwargs))
# remove existing results
if self.started:
delete_group(self.group)
self.started = False
return self.length() | def function[append, parameter[self, func]]:
constant[
add a task to the chain
takes the same parameters as async_task()
]
call[name[self].chain.append, parameter[tuple[[<ast.Name object at 0x7da1b170ef20>, <ast.Name object at 0x7da1b170fd30>, <ast.Name object at 0x7da1b170d840>]]]]
if name[self].started begin[:]
call[name[delete_group], parameter[name[self].group]]
name[self].started assign[=] constant[False]
return[call[name[self].length, parameter[]]] | keyword[def] identifier[append] ( identifier[self] , identifier[func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[chain] . identifier[append] (( identifier[func] , identifier[args] , identifier[kwargs] ))
keyword[if] identifier[self] . identifier[started] :
identifier[delete_group] ( identifier[self] . identifier[group] )
identifier[self] . identifier[started] = keyword[False]
keyword[return] identifier[self] . identifier[length] () | def append(self, func, *args, **kwargs):
"""
add a task to the chain
takes the same parameters as async_task()
"""
self.chain.append((func, args, kwargs))
# remove existing results
if self.started:
delete_group(self.group)
self.started = False # depends on [control=['if'], data=[]]
return self.length() |
def waitGetPoses(self, pRenderPoseArray, unRenderPoseArrayCount, pGamePoseArray, unGamePoseArrayCount):
"""
Scene applications should call this function to get poses to render with (and optionally poses predicted an additional frame out to use for gameplay).
This function will block until "running start" milliseconds before the start of the frame, and should be called at the last moment before needing to
start rendering.
* Return codes:
- IsNotSceneApplication (make sure to call VR_Init with VRApplicaiton_Scene)
- DoNotHaveFocus (some other app has taken focus - this will throttle the call to 10hz to reduce the impact on that app)
"""
fn = self.function_table.waitGetPoses
# TODO: Automate this manual translation
# Convert non-pointer python arguments to pointers
if pRenderPoseArray is not None:
pRenderPoseArray = byref(pRenderPoseArray[0])
if pGamePoseArray is not None:
pGamePoseArray = byref(pGamePoseArray[0])
result = fn(pRenderPoseArray, unRenderPoseArrayCount, pGamePoseArray, unGamePoseArrayCount)
return result | def function[waitGetPoses, parameter[self, pRenderPoseArray, unRenderPoseArrayCount, pGamePoseArray, unGamePoseArrayCount]]:
constant[
Scene applications should call this function to get poses to render with (and optionally poses predicted an additional frame out to use for gameplay).
This function will block until "running start" milliseconds before the start of the frame, and should be called at the last moment before needing to
start rendering.
* Return codes:
- IsNotSceneApplication (make sure to call VR_Init with VRApplicaiton_Scene)
- DoNotHaveFocus (some other app has taken focus - this will throttle the call to 10hz to reduce the impact on that app)
]
variable[fn] assign[=] name[self].function_table.waitGetPoses
if compare[name[pRenderPoseArray] is_not constant[None]] begin[:]
variable[pRenderPoseArray] assign[=] call[name[byref], parameter[call[name[pRenderPoseArray]][constant[0]]]]
if compare[name[pGamePoseArray] is_not constant[None]] begin[:]
variable[pGamePoseArray] assign[=] call[name[byref], parameter[call[name[pGamePoseArray]][constant[0]]]]
variable[result] assign[=] call[name[fn], parameter[name[pRenderPoseArray], name[unRenderPoseArrayCount], name[pGamePoseArray], name[unGamePoseArrayCount]]]
return[name[result]] | keyword[def] identifier[waitGetPoses] ( identifier[self] , identifier[pRenderPoseArray] , identifier[unRenderPoseArrayCount] , identifier[pGamePoseArray] , identifier[unGamePoseArrayCount] ):
literal[string]
identifier[fn] = identifier[self] . identifier[function_table] . identifier[waitGetPoses]
keyword[if] identifier[pRenderPoseArray] keyword[is] keyword[not] keyword[None] :
identifier[pRenderPoseArray] = identifier[byref] ( identifier[pRenderPoseArray] [ literal[int] ])
keyword[if] identifier[pGamePoseArray] keyword[is] keyword[not] keyword[None] :
identifier[pGamePoseArray] = identifier[byref] ( identifier[pGamePoseArray] [ literal[int] ])
identifier[result] = identifier[fn] ( identifier[pRenderPoseArray] , identifier[unRenderPoseArrayCount] , identifier[pGamePoseArray] , identifier[unGamePoseArrayCount] )
keyword[return] identifier[result] | def waitGetPoses(self, pRenderPoseArray, unRenderPoseArrayCount, pGamePoseArray, unGamePoseArrayCount):
"""
Scene applications should call this function to get poses to render with (and optionally poses predicted an additional frame out to use for gameplay).
This function will block until "running start" milliseconds before the start of the frame, and should be called at the last moment before needing to
start rendering.
* Return codes:
- IsNotSceneApplication (make sure to call VR_Init with VRApplicaiton_Scene)
- DoNotHaveFocus (some other app has taken focus - this will throttle the call to 10hz to reduce the impact on that app)
"""
fn = self.function_table.waitGetPoses
# TODO: Automate this manual translation
# Convert non-pointer python arguments to pointers
if pRenderPoseArray is not None:
pRenderPoseArray = byref(pRenderPoseArray[0]) # depends on [control=['if'], data=['pRenderPoseArray']]
if pGamePoseArray is not None:
pGamePoseArray = byref(pGamePoseArray[0]) # depends on [control=['if'], data=['pGamePoseArray']]
result = fn(pRenderPoseArray, unRenderPoseArrayCount, pGamePoseArray, unGamePoseArrayCount)
return result |
def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False | def function[force_invalidate, parameter[self, vts]]:
constant[Force invalidation of a VersionedTargetSet.]
for taget[name[vt]] in starred[name[vts].versioned_targets] begin[:]
call[name[self]._invalidator.force_invalidate, parameter[name[vt].cache_key]]
name[vt].valid assign[=] constant[False]
call[name[self]._invalidator.force_invalidate, parameter[name[vts].cache_key]]
name[vts].valid assign[=] constant[False] | keyword[def] identifier[force_invalidate] ( identifier[self] , identifier[vts] ):
literal[string]
keyword[for] identifier[vt] keyword[in] identifier[vts] . identifier[versioned_targets] :
identifier[self] . identifier[_invalidator] . identifier[force_invalidate] ( identifier[vt] . identifier[cache_key] )
identifier[vt] . identifier[valid] = keyword[False]
identifier[self] . identifier[_invalidator] . identifier[force_invalidate] ( identifier[vts] . identifier[cache_key] )
identifier[vts] . identifier[valid] = keyword[False] | def force_invalidate(self, vts):
"""Force invalidation of a VersionedTargetSet."""
for vt in vts.versioned_targets:
self._invalidator.force_invalidate(vt.cache_key)
vt.valid = False # depends on [control=['for'], data=['vt']]
self._invalidator.force_invalidate(vts.cache_key)
vts.valid = False |
def get_slice_location(dcmdata, teil=None):
""" get location of the slice
:param dcmdata: dicom data structure
:param teil: filename. Used when slice location doesnt exist
:return:
"""
slice_location = None
if hasattr(dcmdata, 'SliceLocation'):
# print(dcmdata.SliceLocation)
# print(type(dcmdata.SliceLocation))
try:
slice_location = float(dcmdata.SliceLocation)
except Exception as exc:
logger.info("It is not possible to use SliceLocation")
logger.debug(traceback.format_exc())
if slice_location is None and hasattr(dcmdata, "SliceThickness") and teil is not None:
logger.debug(
"Estimating SliceLocation wiht image number and SliceThickness"
)
# from builtins import map
i = list(map(int, re.findall('\d+', teil)))
i = i[-1]
try:
slice_location = float(i * float(dcmdata.SliceThickness))
except ValueError as e:
print(type(dcmdata.SliceThickness))
print(dcmdata.SliceThickness)
logger.debug(traceback.format_exc())
logger.debug("SliceThickness problem")
if slice_location is None and hasattr(dcmdata, "ImagePositionPatient") and hasattr(dcmdata,
"ImageOrientationPatient"):
if dcmdata.ImageOrientationPatient == [1, 0, 0, 0, 1, 0]:
slice_location = dcmdata.ImagePositionPatient[2]
else:
logger.warning("Unknown ImageOrientationPatient")
if slice_location is None:
logger.warning("Problem with slice location")
return slice_location | def function[get_slice_location, parameter[dcmdata, teil]]:
constant[ get location of the slice
:param dcmdata: dicom data structure
:param teil: filename. Used when slice location doesnt exist
:return:
]
variable[slice_location] assign[=] constant[None]
if call[name[hasattr], parameter[name[dcmdata], constant[SliceLocation]]] begin[:]
<ast.Try object at 0x7da18dc98250>
if <ast.BoolOp object at 0x7da18dc988e0> begin[:]
call[name[logger].debug, parameter[constant[Estimating SliceLocation wiht image number and SliceThickness]]]
variable[i] assign[=] call[name[list], parameter[call[name[map], parameter[name[int], call[name[re].findall, parameter[constant[\d+], name[teil]]]]]]]
variable[i] assign[=] call[name[i]][<ast.UnaryOp object at 0x7da18dc9b0d0>]
<ast.Try object at 0x7da18dc99c60>
if <ast.BoolOp object at 0x7da18dc9ab00> begin[:]
if compare[name[dcmdata].ImageOrientationPatient equal[==] list[[<ast.Constant object at 0x7da18dc99510>, <ast.Constant object at 0x7da18dc9beb0>, <ast.Constant object at 0x7da18dc9b370>, <ast.Constant object at 0x7da18dc9a890>, <ast.Constant object at 0x7da18dc9aa70>, <ast.Constant object at 0x7da18dc99930>]]] begin[:]
variable[slice_location] assign[=] call[name[dcmdata].ImagePositionPatient][constant[2]]
if compare[name[slice_location] is constant[None]] begin[:]
call[name[logger].warning, parameter[constant[Problem with slice location]]]
return[name[slice_location]] | keyword[def] identifier[get_slice_location] ( identifier[dcmdata] , identifier[teil] = keyword[None] ):
literal[string]
identifier[slice_location] = keyword[None]
keyword[if] identifier[hasattr] ( identifier[dcmdata] , literal[string] ):
keyword[try] :
identifier[slice_location] = identifier[float] ( identifier[dcmdata] . identifier[SliceLocation] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[debug] ( identifier[traceback] . identifier[format_exc] ())
keyword[if] identifier[slice_location] keyword[is] keyword[None] keyword[and] identifier[hasattr] ( identifier[dcmdata] , literal[string] ) keyword[and] identifier[teil] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[debug] (
literal[string]
)
identifier[i] = identifier[list] ( identifier[map] ( identifier[int] , identifier[re] . identifier[findall] ( literal[string] , identifier[teil] )))
identifier[i] = identifier[i] [- literal[int] ]
keyword[try] :
identifier[slice_location] = identifier[float] ( identifier[i] * identifier[float] ( identifier[dcmdata] . identifier[SliceThickness] ))
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[print] ( identifier[type] ( identifier[dcmdata] . identifier[SliceThickness] ))
identifier[print] ( identifier[dcmdata] . identifier[SliceThickness] )
identifier[logger] . identifier[debug] ( identifier[traceback] . identifier[format_exc] ())
identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[slice_location] keyword[is] keyword[None] keyword[and] identifier[hasattr] ( identifier[dcmdata] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[dcmdata] ,
literal[string] ):
keyword[if] identifier[dcmdata] . identifier[ImageOrientationPatient] ==[ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]:
identifier[slice_location] = identifier[dcmdata] . identifier[ImagePositionPatient] [ literal[int] ]
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[if] identifier[slice_location] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] identifier[slice_location] | def get_slice_location(dcmdata, teil=None):
""" get location of the slice
:param dcmdata: dicom data structure
:param teil: filename. Used when slice location doesnt exist
:return:
"""
slice_location = None
if hasattr(dcmdata, 'SliceLocation'):
# print(dcmdata.SliceLocation)
# print(type(dcmdata.SliceLocation))
try:
slice_location = float(dcmdata.SliceLocation) # depends on [control=['try'], data=[]]
except Exception as exc:
logger.info('It is not possible to use SliceLocation')
logger.debug(traceback.format_exc()) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if slice_location is None and hasattr(dcmdata, 'SliceThickness') and (teil is not None):
logger.debug('Estimating SliceLocation wiht image number and SliceThickness')
# from builtins import map
i = list(map(int, re.findall('\\d+', teil)))
i = i[-1]
try:
slice_location = float(i * float(dcmdata.SliceThickness)) # depends on [control=['try'], data=[]]
except ValueError as e:
print(type(dcmdata.SliceThickness))
print(dcmdata.SliceThickness)
logger.debug(traceback.format_exc())
logger.debug('SliceThickness problem') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if slice_location is None and hasattr(dcmdata, 'ImagePositionPatient') and hasattr(dcmdata, 'ImageOrientationPatient'):
if dcmdata.ImageOrientationPatient == [1, 0, 0, 0, 1, 0]:
slice_location = dcmdata.ImagePositionPatient[2] # depends on [control=['if'], data=[]]
else:
logger.warning('Unknown ImageOrientationPatient') # depends on [control=['if'], data=[]]
if slice_location is None:
logger.warning('Problem with slice location') # depends on [control=['if'], data=[]]
return slice_location |
def mcycle(return_X_y=True):
"""motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
"""
# y is real
# recommend LinearGAM
motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0)
if return_X_y:
X = motor.times.values
y = motor.accel
return _clean_X_y(X, y)
return motor | def function[mcycle, parameter[return_X_y]]:
constant[motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
]
variable[motor] assign[=] call[name[pd].read_csv, parameter[binary_operation[name[PATH] + constant[/mcycle.csv]]]]
if name[return_X_y] begin[:]
variable[X] assign[=] name[motor].times.values
variable[y] assign[=] name[motor].accel
return[call[name[_clean_X_y], parameter[name[X], name[y]]]]
return[name[motor]] | keyword[def] identifier[mcycle] ( identifier[return_X_y] = keyword[True] ):
literal[string]
identifier[motor] = identifier[pd] . identifier[read_csv] ( identifier[PATH] + literal[string] , identifier[index_col] = literal[int] )
keyword[if] identifier[return_X_y] :
identifier[X] = identifier[motor] . identifier[times] . identifier[values]
identifier[y] = identifier[motor] . identifier[accel]
keyword[return] identifier[_clean_X_y] ( identifier[X] , identifier[y] )
keyword[return] identifier[motor] | def mcycle(return_X_y=True):
"""motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
"""
# y is real
# recommend LinearGAM
motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0)
if return_X_y:
X = motor.times.values
y = motor.accel
return _clean_X_y(X, y) # depends on [control=['if'], data=[]]
return motor |
def tar(self):
"""tar in bytes format"""
if not self.generated:
for data in self.generate():
pass
return self._tar_buffer.getvalue() | def function[tar, parameter[self]]:
constant[tar in bytes format]
if <ast.UnaryOp object at 0x7da1b15947c0> begin[:]
for taget[name[data]] in starred[call[name[self].generate, parameter[]]] begin[:]
pass
return[call[name[self]._tar_buffer.getvalue, parameter[]]] | keyword[def] identifier[tar] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[generated] :
keyword[for] identifier[data] keyword[in] identifier[self] . identifier[generate] ():
keyword[pass]
keyword[return] identifier[self] . identifier[_tar_buffer] . identifier[getvalue] () | def tar(self):
"""tar in bytes format"""
if not self.generated:
for data in self.generate():
pass # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return self._tar_buffer.getvalue() |
def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs) | def function[SetConsoleTextAttribute, parameter[stream_id, attrs]]:
constant[Set a console text attribute.]
variable[handle] assign[=] call[name[handles]][name[stream_id]]
return[call[name[windll].kernel32.SetConsoleTextAttribute, parameter[name[handle], name[attrs]]]] | keyword[def] identifier[SetConsoleTextAttribute] ( identifier[stream_id] , identifier[attrs] ):
literal[string]
identifier[handle] = identifier[handles] [ identifier[stream_id] ]
keyword[return] identifier[windll] . identifier[kernel32] . identifier[SetConsoleTextAttribute] ( identifier[handle] , identifier[attrs] ) | def SetConsoleTextAttribute(stream_id, attrs):
"""Set a console text attribute."""
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs) |
def _storeSample(self, inputVector, trueCatIndex, partition=0):
"""
Store a training sample and associated category label
"""
# If this is the first sample, then allocate a numpy array
# of the appropriate size in which to store all samples.
if self._samples is None:
self._samples = numpy.zeros((0, len(inputVector)), dtype=RealNumpyDType)
assert self._labels is None
self._labels = []
# Add the sample vector and category lable
self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=0)
self._labels += [trueCatIndex]
# Add the partition ID
if self._partitions is None:
self._partitions = []
if partition is None:
partition = 0
self._partitions += [partition] | def function[_storeSample, parameter[self, inputVector, trueCatIndex, partition]]:
constant[
Store a training sample and associated category label
]
if compare[name[self]._samples is constant[None]] begin[:]
name[self]._samples assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Constant object at 0x7da20c7c9db0>, <ast.Call object at 0x7da20c7c8730>]]]]
assert[compare[name[self]._labels is constant[None]]]
name[self]._labels assign[=] list[[]]
name[self]._samples assign[=] call[name[numpy].concatenate, parameter[tuple[[<ast.Attribute object at 0x7da20c7c9ff0>, <ast.Call object at 0x7da20c7c9540>]]]]
<ast.AugAssign object at 0x7da20c7cb280>
if compare[name[self]._partitions is constant[None]] begin[:]
name[self]._partitions assign[=] list[[]]
if compare[name[partition] is constant[None]] begin[:]
variable[partition] assign[=] constant[0]
<ast.AugAssign object at 0x7da20c7cbd30> | keyword[def] identifier[_storeSample] ( identifier[self] , identifier[inputVector] , identifier[trueCatIndex] , identifier[partition] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[_samples] keyword[is] keyword[None] :
identifier[self] . identifier[_samples] = identifier[numpy] . identifier[zeros] (( literal[int] , identifier[len] ( identifier[inputVector] )), identifier[dtype] = identifier[RealNumpyDType] )
keyword[assert] identifier[self] . identifier[_labels] keyword[is] keyword[None]
identifier[self] . identifier[_labels] =[]
identifier[self] . identifier[_samples] = identifier[numpy] . identifier[concatenate] (( identifier[self] . identifier[_samples] , identifier[numpy] . identifier[atleast_2d] ( identifier[inputVector] )), identifier[axis] = literal[int] )
identifier[self] . identifier[_labels] +=[ identifier[trueCatIndex] ]
keyword[if] identifier[self] . identifier[_partitions] keyword[is] keyword[None] :
identifier[self] . identifier[_partitions] =[]
keyword[if] identifier[partition] keyword[is] keyword[None] :
identifier[partition] = literal[int]
identifier[self] . identifier[_partitions] +=[ identifier[partition] ] | def _storeSample(self, inputVector, trueCatIndex, partition=0):
"""
Store a training sample and associated category label
"""
# If this is the first sample, then allocate a numpy array
# of the appropriate size in which to store all samples.
if self._samples is None:
self._samples = numpy.zeros((0, len(inputVector)), dtype=RealNumpyDType)
assert self._labels is None
self._labels = [] # depends on [control=['if'], data=[]]
# Add the sample vector and category lable
self._samples = numpy.concatenate((self._samples, numpy.atleast_2d(inputVector)), axis=0)
self._labels += [trueCatIndex]
# Add the partition ID
if self._partitions is None:
self._partitions = [] # depends on [control=['if'], data=[]]
if partition is None:
partition = 0 # depends on [control=['if'], data=['partition']]
self._partitions += [partition] |
def Read(self, file_object):
"""Reads a plist from a file-like object.
Args:
file_object (dfvfs.FileIO): a file-like object containing plist data.
Raises:
IOError: if the plist file-like object cannot be read.
OSError: if the plist file-like object cannot be read.
"""
try:
self.root_key = biplist.readPlist(file_object)
except (
biplist.NotBinaryPlistException,
biplist.InvalidPlistException) as exception:
raise IOError(exception) | def function[Read, parameter[self, file_object]]:
constant[Reads a plist from a file-like object.
Args:
file_object (dfvfs.FileIO): a file-like object containing plist data.
Raises:
IOError: if the plist file-like object cannot be read.
OSError: if the plist file-like object cannot be read.
]
<ast.Try object at 0x7da20c7cb4c0> | keyword[def] identifier[Read] ( identifier[self] , identifier[file_object] ):
literal[string]
keyword[try] :
identifier[self] . identifier[root_key] = identifier[biplist] . identifier[readPlist] ( identifier[file_object] )
keyword[except] (
identifier[biplist] . identifier[NotBinaryPlistException] ,
identifier[biplist] . identifier[InvalidPlistException] ) keyword[as] identifier[exception] :
keyword[raise] identifier[IOError] ( identifier[exception] ) | def Read(self, file_object):
"""Reads a plist from a file-like object.
Args:
file_object (dfvfs.FileIO): a file-like object containing plist data.
Raises:
IOError: if the plist file-like object cannot be read.
OSError: if the plist file-like object cannot be read.
"""
try:
self.root_key = biplist.readPlist(file_object) # depends on [control=['try'], data=[]]
except (biplist.NotBinaryPlistException, biplist.InvalidPlistException) as exception:
raise IOError(exception) # depends on [control=['except'], data=['exception']] |
def _speak_none_inherit(self, element):
"""
No speak any content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._isolate_text_node(element)
self._visit(element, self._speak_none) | def function[_speak_none_inherit, parameter[self, element]]:
constant[
No speak any content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
]
call[name[self]._isolate_text_node, parameter[name[element]]]
call[name[self]._visit, parameter[name[element], name[self]._speak_none]] | keyword[def] identifier[_speak_none_inherit] ( identifier[self] , identifier[element] ):
literal[string]
identifier[self] . identifier[_isolate_text_node] ( identifier[element] )
identifier[self] . identifier[_visit] ( identifier[element] , identifier[self] . identifier[_speak_none] ) | def _speak_none_inherit(self, element):
"""
No speak any content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._isolate_text_node(element)
self._visit(element, self._speak_none) |
def save_as(self):
"""Dialog for getting name, location of dataset export."""
filename = splitext(self.filename)[0]
filename, _ = QFileDialog.getSaveFileName(self, 'Export events',
filename)
if filename == '':
return
self.filename = filename
short_filename = short_strings(basename(self.filename))
self.idx_filename.setText(short_filename) | def function[save_as, parameter[self]]:
constant[Dialog for getting name, location of dataset export.]
variable[filename] assign[=] call[call[name[splitext], parameter[name[self].filename]]][constant[0]]
<ast.Tuple object at 0x7da1b0e8f7f0> assign[=] call[name[QFileDialog].getSaveFileName, parameter[name[self], constant[Export events], name[filename]]]
if compare[name[filename] equal[==] constant[]] begin[:]
return[None]
name[self].filename assign[=] name[filename]
variable[short_filename] assign[=] call[name[short_strings], parameter[call[name[basename], parameter[name[self].filename]]]]
call[name[self].idx_filename.setText, parameter[name[short_filename]]] | keyword[def] identifier[save_as] ( identifier[self] ):
literal[string]
identifier[filename] = identifier[splitext] ( identifier[self] . identifier[filename] )[ literal[int] ]
identifier[filename] , identifier[_] = identifier[QFileDialog] . identifier[getSaveFileName] ( identifier[self] , literal[string] ,
identifier[filename] )
keyword[if] identifier[filename] == literal[string] :
keyword[return]
identifier[self] . identifier[filename] = identifier[filename]
identifier[short_filename] = identifier[short_strings] ( identifier[basename] ( identifier[self] . identifier[filename] ))
identifier[self] . identifier[idx_filename] . identifier[setText] ( identifier[short_filename] ) | def save_as(self):
"""Dialog for getting name, location of dataset export."""
filename = splitext(self.filename)[0]
(filename, _) = QFileDialog.getSaveFileName(self, 'Export events', filename)
if filename == '':
return # depends on [control=['if'], data=[]]
self.filename = filename
short_filename = short_strings(basename(self.filename))
self.idx_filename.setText(short_filename) |
def add_record_set(self, record_set):
"""Append a record set to the 'additions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError("Pass a ResourceRecordSet")
self._additions += (record_set,) | def function[add_record_set, parameter[self, record_set]]:
constant[Append a record set to the 'additions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
]
if <ast.UnaryOp object at 0x7da18f00d690> begin[:]
<ast.Raise object at 0x7da2045643a0>
<ast.AugAssign object at 0x7da204566050> | keyword[def] identifier[add_record_set] ( identifier[self] , identifier[record_set] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[record_set] , identifier[ResourceRecordSet] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_additions] +=( identifier[record_set] ,) | def add_record_set(self, record_set):
"""Append a record set to the 'additions' for the change set.
:type record_set:
:class:`google.cloud.dns.resource_record_set.ResourceRecordSet`
:param record_set: the record set to append.
:raises: ``ValueError`` if ``record_set`` is not of the required type.
"""
if not isinstance(record_set, ResourceRecordSet):
raise ValueError('Pass a ResourceRecordSet') # depends on [control=['if'], data=[]]
self._additions += (record_set,) |
def normalize_api_url(self):
"""
Checks that the API URL used to initialize this object actually returns
JSON. If it doesn't, make some educated guesses and try to find the
correct URL.
:returns: a valid API URL or ``None``
"""
def tester(self, api_url):
"""
Attempts to fetch general information about the MediaWiki instance
in order to test whether *api_url* will return JSON.
"""
data = self._fetch_http(api_url, {'action': 'query',
'meta': 'siteinfo'})
try:
data_json = json.loads(data)
return (data, data_json)
except ValueError:
return (data, None)
data, data_json = tester(self, self._api_url)
if data_json:
return self._api_url
else:
# if there's an index.php in the URL, we might find the API
if 'index.php' in self._api_url:
test_api_url = self._api_url.split('index.php')[0] + 'api.php'
test_data, test_data_json = tester(self, test_api_url)
if test_data_json:
self._api_url = test_api_url
return self._api_url
return None | def function[normalize_api_url, parameter[self]]:
constant[
Checks that the API URL used to initialize this object actually returns
JSON. If it doesn't, make some educated guesses and try to find the
correct URL.
:returns: a valid API URL or ``None``
]
def function[tester, parameter[self, api_url]]:
constant[
Attempts to fetch general information about the MediaWiki instance
in order to test whether *api_url* will return JSON.
]
variable[data] assign[=] call[name[self]._fetch_http, parameter[name[api_url], dictionary[[<ast.Constant object at 0x7da1b23466b0>, <ast.Constant object at 0x7da1b2347700>], [<ast.Constant object at 0x7da1b2345180>, <ast.Constant object at 0x7da1b23473d0>]]]]
<ast.Try object at 0x7da1b2345060>
<ast.Tuple object at 0x7da1b2347be0> assign[=] call[name[tester], parameter[name[self], name[self]._api_url]]
if name[data_json] begin[:]
return[name[self]._api_url] | keyword[def] identifier[normalize_api_url] ( identifier[self] ):
literal[string]
keyword[def] identifier[tester] ( identifier[self] , identifier[api_url] ):
literal[string]
identifier[data] = identifier[self] . identifier[_fetch_http] ( identifier[api_url] ,{ literal[string] : literal[string] ,
literal[string] : literal[string] })
keyword[try] :
identifier[data_json] = identifier[json] . identifier[loads] ( identifier[data] )
keyword[return] ( identifier[data] , identifier[data_json] )
keyword[except] identifier[ValueError] :
keyword[return] ( identifier[data] , keyword[None] )
identifier[data] , identifier[data_json] = identifier[tester] ( identifier[self] , identifier[self] . identifier[_api_url] )
keyword[if] identifier[data_json] :
keyword[return] identifier[self] . identifier[_api_url]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_api_url] :
identifier[test_api_url] = identifier[self] . identifier[_api_url] . identifier[split] ( literal[string] )[ literal[int] ]+ literal[string]
identifier[test_data] , identifier[test_data_json] = identifier[tester] ( identifier[self] , identifier[test_api_url] )
keyword[if] identifier[test_data_json] :
identifier[self] . identifier[_api_url] = identifier[test_api_url]
keyword[return] identifier[self] . identifier[_api_url]
keyword[return] keyword[None] | def normalize_api_url(self):
"""
Checks that the API URL used to initialize this object actually returns
JSON. If it doesn't, make some educated guesses and try to find the
correct URL.
:returns: a valid API URL or ``None``
"""
def tester(self, api_url):
"""
Attempts to fetch general information about the MediaWiki instance
in order to test whether *api_url* will return JSON.
"""
data = self._fetch_http(api_url, {'action': 'query', 'meta': 'siteinfo'})
try:
data_json = json.loads(data)
return (data, data_json) # depends on [control=['try'], data=[]]
except ValueError:
return (data, None) # depends on [control=['except'], data=[]]
(data, data_json) = tester(self, self._api_url)
if data_json:
return self._api_url # depends on [control=['if'], data=[]]
else:
# if there's an index.php in the URL, we might find the API
if 'index.php' in self._api_url:
test_api_url = self._api_url.split('index.php')[0] + 'api.php'
(test_data, test_data_json) = tester(self, test_api_url)
if test_data_json:
self._api_url = test_api_url
return self._api_url # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def write_dicts_to_json(self, data):
"""Saves .json file with data
:param data: Data
"""
with open(self.path, "w") as out:
json.dump(
data, # data
out, # file handler
indent=4, sort_keys=True # pretty print
) | def function[write_dicts_to_json, parameter[self, data]]:
constant[Saves .json file with data
:param data: Data
]
with call[name[open], parameter[name[self].path, constant[w]]] begin[:]
call[name[json].dump, parameter[name[data], name[out]]] | keyword[def] identifier[write_dicts_to_json] ( identifier[self] , identifier[data] ):
literal[string]
keyword[with] identifier[open] ( identifier[self] . identifier[path] , literal[string] ) keyword[as] identifier[out] :
identifier[json] . identifier[dump] (
identifier[data] ,
identifier[out] ,
identifier[indent] = literal[int] , identifier[sort_keys] = keyword[True]
) | def write_dicts_to_json(self, data):
"""Saves .json file with data
:param data: Data
"""
with open(self.path, 'w') as out: # data
# file handler
# pretty print
json.dump(data, out, indent=4, sort_keys=True) # depends on [control=['with'], data=['out']] |
def smart_email_send(self, smart_email_id, to, consent_to_track, cc=None, bcc=None, attachments=None, data=None, add_recipients_to_list=None):
"""Sends the smart email."""
validate_consent_to_track(consent_to_track)
body = {
"To": to,
"CC": cc,
"BCC": bcc,
"Attachments": attachments,
"Data": data,
"AddRecipientsToList": add_recipients_to_list,
"ConsentToTrack": consent_to_track,
}
response = self._post("/transactional/smartEmail/%s/send" %
smart_email_id, json.dumps(body))
return json_to_py(response) | def function[smart_email_send, parameter[self, smart_email_id, to, consent_to_track, cc, bcc, attachments, data, add_recipients_to_list]]:
constant[Sends the smart email.]
call[name[validate_consent_to_track], parameter[name[consent_to_track]]]
variable[body] assign[=] dictionary[[<ast.Constant object at 0x7da18f721750>, <ast.Constant object at 0x7da18f722dd0>, <ast.Constant object at 0x7da20c7ca260>, <ast.Constant object at 0x7da20c7c8d90>, <ast.Constant object at 0x7da20c7cb0a0>, <ast.Constant object at 0x7da20c7cac80>, <ast.Constant object at 0x7da20c7c9030>], [<ast.Name object at 0x7da20c7c9780>, <ast.Name object at 0x7da20c7c9630>, <ast.Name object at 0x7da20c7c87f0>, <ast.Name object at 0x7da20c7c9db0>, <ast.Name object at 0x7da20c7c8100>, <ast.Name object at 0x7da20c7c9870>, <ast.Name object at 0x7da20c7caec0>]]
variable[response] assign[=] call[name[self]._post, parameter[binary_operation[constant[/transactional/smartEmail/%s/send] <ast.Mod object at 0x7da2590d6920> name[smart_email_id]], call[name[json].dumps, parameter[name[body]]]]]
return[call[name[json_to_py], parameter[name[response]]]] | keyword[def] identifier[smart_email_send] ( identifier[self] , identifier[smart_email_id] , identifier[to] , identifier[consent_to_track] , identifier[cc] = keyword[None] , identifier[bcc] = keyword[None] , identifier[attachments] = keyword[None] , identifier[data] = keyword[None] , identifier[add_recipients_to_list] = keyword[None] ):
literal[string]
identifier[validate_consent_to_track] ( identifier[consent_to_track] )
identifier[body] ={
literal[string] : identifier[to] ,
literal[string] : identifier[cc] ,
literal[string] : identifier[bcc] ,
literal[string] : identifier[attachments] ,
literal[string] : identifier[data] ,
literal[string] : identifier[add_recipients_to_list] ,
literal[string] : identifier[consent_to_track] ,
}
identifier[response] = identifier[self] . identifier[_post] ( literal[string] %
identifier[smart_email_id] , identifier[json] . identifier[dumps] ( identifier[body] ))
keyword[return] identifier[json_to_py] ( identifier[response] ) | def smart_email_send(self, smart_email_id, to, consent_to_track, cc=None, bcc=None, attachments=None, data=None, add_recipients_to_list=None):
"""Sends the smart email."""
validate_consent_to_track(consent_to_track)
body = {'To': to, 'CC': cc, 'BCC': bcc, 'Attachments': attachments, 'Data': data, 'AddRecipientsToList': add_recipients_to_list, 'ConsentToTrack': consent_to_track}
response = self._post('/transactional/smartEmail/%s/send' % smart_email_id, json.dumps(body))
return json_to_py(response) |
def find_font(face, bold, italic):
"""Find font"""
bold = FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR
italic = FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN
face = face.encode('utf8')
fontconfig.FcInit()
pattern = fontconfig.FcPatternCreate()
fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold)
fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic)
fontconfig.FcPatternAddString(pattern, FC_FAMILY, face)
fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern)
fontconfig.FcDefaultSubstitute(pattern)
result = FcType()
match = fontconfig.FcFontMatch(0, pattern, byref(result))
fontconfig.FcPatternDestroy(pattern)
if not match:
raise RuntimeError('Could not match font "%s"' % face)
value = FcValue()
fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value))
if(value.u.s != face):
warnings.warn('Could not find face match "%s", falling back to "%s"'
% (face, value.u.s))
result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value))
if result != 0:
raise RuntimeError('No filename or FT face for "%s"' % face)
fname = value.u.s
return fname.decode('utf-8') | def function[find_font, parameter[face, bold, italic]]:
constant[Find font]
variable[bold] assign[=] <ast.IfExp object at 0x7da1b0ebd570>
variable[italic] assign[=] <ast.IfExp object at 0x7da1b0ebdba0>
variable[face] assign[=] call[name[face].encode, parameter[constant[utf8]]]
call[name[fontconfig].FcInit, parameter[]]
variable[pattern] assign[=] call[name[fontconfig].FcPatternCreate, parameter[]]
call[name[fontconfig].FcPatternAddInteger, parameter[name[pattern], name[FC_WEIGHT], name[bold]]]
call[name[fontconfig].FcPatternAddInteger, parameter[name[pattern], name[FC_SLANT], name[italic]]]
call[name[fontconfig].FcPatternAddString, parameter[name[pattern], name[FC_FAMILY], name[face]]]
call[name[fontconfig].FcConfigSubstitute, parameter[constant[0], name[pattern], name[FcMatchPattern]]]
call[name[fontconfig].FcDefaultSubstitute, parameter[name[pattern]]]
variable[result] assign[=] call[name[FcType], parameter[]]
variable[match] assign[=] call[name[fontconfig].FcFontMatch, parameter[constant[0], name[pattern], call[name[byref], parameter[name[result]]]]]
call[name[fontconfig].FcPatternDestroy, parameter[name[pattern]]]
if <ast.UnaryOp object at 0x7da1b0ebeb60> begin[:]
<ast.Raise object at 0x7da1b0ebd480>
variable[value] assign[=] call[name[FcValue], parameter[]]
call[name[fontconfig].FcPatternGet, parameter[name[match], name[FC_FAMILY], constant[0], call[name[byref], parameter[name[value]]]]]
if compare[name[value].u.s not_equal[!=] name[face]] begin[:]
call[name[warnings].warn, parameter[binary_operation[constant[Could not find face match "%s", falling back to "%s"] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0ebfbe0>, <ast.Attribute object at 0x7da1b0ebcb80>]]]]]
variable[result] assign[=] call[name[fontconfig].FcPatternGet, parameter[name[match], name[FC_FILE], constant[0], call[name[byref], parameter[name[value]]]]]
if compare[name[result] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0ebf1c0>
variable[fname] assign[=] name[value].u.s
return[call[name[fname].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[find_font] ( identifier[face] , identifier[bold] , identifier[italic] ):
literal[string]
identifier[bold] = identifier[FC_WEIGHT_BOLD] keyword[if] identifier[bold] keyword[else] identifier[FC_WEIGHT_REGULAR]
identifier[italic] = identifier[FC_SLANT_ITALIC] keyword[if] identifier[italic] keyword[else] identifier[FC_SLANT_ROMAN]
identifier[face] = identifier[face] . identifier[encode] ( literal[string] )
identifier[fontconfig] . identifier[FcInit] ()
identifier[pattern] = identifier[fontconfig] . identifier[FcPatternCreate] ()
identifier[fontconfig] . identifier[FcPatternAddInteger] ( identifier[pattern] , identifier[FC_WEIGHT] , identifier[bold] )
identifier[fontconfig] . identifier[FcPatternAddInteger] ( identifier[pattern] , identifier[FC_SLANT] , identifier[italic] )
identifier[fontconfig] . identifier[FcPatternAddString] ( identifier[pattern] , identifier[FC_FAMILY] , identifier[face] )
identifier[fontconfig] . identifier[FcConfigSubstitute] ( literal[int] , identifier[pattern] , identifier[FcMatchPattern] )
identifier[fontconfig] . identifier[FcDefaultSubstitute] ( identifier[pattern] )
identifier[result] = identifier[FcType] ()
identifier[match] = identifier[fontconfig] . identifier[FcFontMatch] ( literal[int] , identifier[pattern] , identifier[byref] ( identifier[result] ))
identifier[fontconfig] . identifier[FcPatternDestroy] ( identifier[pattern] )
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[face] )
identifier[value] = identifier[FcValue] ()
identifier[fontconfig] . identifier[FcPatternGet] ( identifier[match] , identifier[FC_FAMILY] , literal[int] , identifier[byref] ( identifier[value] ))
keyword[if] ( identifier[value] . identifier[u] . identifier[s] != identifier[face] ):
identifier[warnings] . identifier[warn] ( literal[string]
%( identifier[face] , identifier[value] . identifier[u] . identifier[s] ))
identifier[result] = identifier[fontconfig] . identifier[FcPatternGet] ( identifier[match] , identifier[FC_FILE] , literal[int] , identifier[byref] ( identifier[value] ))
keyword[if] identifier[result] != literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[face] )
identifier[fname] = identifier[value] . identifier[u] . identifier[s]
keyword[return] identifier[fname] . identifier[decode] ( literal[string] ) | def find_font(face, bold, italic):
"""Find font"""
bold = FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR
italic = FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN
face = face.encode('utf8')
fontconfig.FcInit()
pattern = fontconfig.FcPatternCreate()
fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold)
fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic)
fontconfig.FcPatternAddString(pattern, FC_FAMILY, face)
fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern)
fontconfig.FcDefaultSubstitute(pattern)
result = FcType()
match = fontconfig.FcFontMatch(0, pattern, byref(result))
fontconfig.FcPatternDestroy(pattern)
if not match:
raise RuntimeError('Could not match font "%s"' % face) # depends on [control=['if'], data=[]]
value = FcValue()
fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value))
if value.u.s != face:
warnings.warn('Could not find face match "%s", falling back to "%s"' % (face, value.u.s)) # depends on [control=['if'], data=['face']]
result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value))
if result != 0:
raise RuntimeError('No filename or FT face for "%s"' % face) # depends on [control=['if'], data=[]]
fname = value.u.s
return fname.decode('utf-8') |
def clean_query(self):
"""
Removes any `None` value from an elasticsearch query.
"""
if self.query:
for key, value in self.query.items():
if isinstance(value, list) and None in value:
self.query[key] = [v for v in value if v is not None] | def function[clean_query, parameter[self]]:
constant[
Removes any `None` value from an elasticsearch query.
]
if name[self].query begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0a70070>, <ast.Name object at 0x7da1b0a70880>]]] in starred[call[name[self].query.items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b0a71cf0> begin[:]
call[name[self].query][name[key]] assign[=] <ast.ListComp object at 0x7da1b0a1ec50> | keyword[def] identifier[clean_query] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[query] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[query] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ) keyword[and] keyword[None] keyword[in] identifier[value] :
identifier[self] . identifier[query] [ identifier[key] ]=[ identifier[v] keyword[for] identifier[v] keyword[in] identifier[value] keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] ] | def clean_query(self):
"""
Removes any `None` value from an elasticsearch query.
"""
if self.query:
for (key, value) in self.query.items():
if isinstance(value, list) and None in value:
self.query[key] = [v for v in value if v is not None] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] |
def setResponseFromWSAddress(self, address, localURL):
'''Server-side has to set these fields in response.
address -- Address instance, representing a WS-Address
'''
self.From = localURL
self.header_pyobjs = None
pyobjs = []
namespaceURI = self.wsAddressURI
for nsuri,name,value in (\
(namespaceURI, "Action", self._action),
(namespaceURI, "MessageID","uuid:%s" %time.time()),
(namespaceURI, "RelatesTo", address.getMessageID()),
(namespaceURI, "To", self.anonymousURI),):
typecode = GED(nsuri, name)
pyobjs.append(typecode.pyclass(value))
typecode = GED(nsuri, "From")
pyobj = typecode.pyclass()
pyobj._Address = self.From
pyobjs.append(pyobj)
self.header_pyobjs = tuple(pyobjs) | def function[setResponseFromWSAddress, parameter[self, address, localURL]]:
constant[Server-side has to set these fields in response.
address -- Address instance, representing a WS-Address
]
name[self].From assign[=] name[localURL]
name[self].header_pyobjs assign[=] constant[None]
variable[pyobjs] assign[=] list[[]]
variable[namespaceURI] assign[=] name[self].wsAddressURI
for taget[tuple[[<ast.Name object at 0x7da2054a7910>, <ast.Name object at 0x7da2054a4fd0>, <ast.Name object at 0x7da2054a5b40>]]] in starred[tuple[[<ast.Tuple object at 0x7da2054a7bb0>, <ast.Tuple object at 0x7da2054a7610>, <ast.Tuple object at 0x7da2054a4100>, <ast.Tuple object at 0x7da2054a5630>]]] begin[:]
variable[typecode] assign[=] call[name[GED], parameter[name[nsuri], name[name]]]
call[name[pyobjs].append, parameter[call[name[typecode].pyclass, parameter[name[value]]]]]
variable[typecode] assign[=] call[name[GED], parameter[name[nsuri], constant[From]]]
variable[pyobj] assign[=] call[name[typecode].pyclass, parameter[]]
name[pyobj]._Address assign[=] name[self].From
call[name[pyobjs].append, parameter[name[pyobj]]]
name[self].header_pyobjs assign[=] call[name[tuple], parameter[name[pyobjs]]] | keyword[def] identifier[setResponseFromWSAddress] ( identifier[self] , identifier[address] , identifier[localURL] ):
literal[string]
identifier[self] . identifier[From] = identifier[localURL]
identifier[self] . identifier[header_pyobjs] = keyword[None]
identifier[pyobjs] =[]
identifier[namespaceURI] = identifier[self] . identifier[wsAddressURI]
keyword[for] identifier[nsuri] , identifier[name] , identifier[value] keyword[in] (( identifier[namespaceURI] , literal[string] , identifier[self] . identifier[_action] ),
( identifier[namespaceURI] , literal[string] , literal[string] % identifier[time] . identifier[time] ()),
( identifier[namespaceURI] , literal[string] , identifier[address] . identifier[getMessageID] ()),
( identifier[namespaceURI] , literal[string] , identifier[self] . identifier[anonymousURI] ),):
identifier[typecode] = identifier[GED] ( identifier[nsuri] , identifier[name] )
identifier[pyobjs] . identifier[append] ( identifier[typecode] . identifier[pyclass] ( identifier[value] ))
identifier[typecode] = identifier[GED] ( identifier[nsuri] , literal[string] )
identifier[pyobj] = identifier[typecode] . identifier[pyclass] ()
identifier[pyobj] . identifier[_Address] = identifier[self] . identifier[From]
identifier[pyobjs] . identifier[append] ( identifier[pyobj] )
identifier[self] . identifier[header_pyobjs] = identifier[tuple] ( identifier[pyobjs] ) | def setResponseFromWSAddress(self, address, localURL):
"""Server-side has to set these fields in response.
address -- Address instance, representing a WS-Address
"""
self.From = localURL
self.header_pyobjs = None
pyobjs = []
namespaceURI = self.wsAddressURI
for (nsuri, name, value) in ((namespaceURI, 'Action', self._action), (namespaceURI, 'MessageID', 'uuid:%s' % time.time()), (namespaceURI, 'RelatesTo', address.getMessageID()), (namespaceURI, 'To', self.anonymousURI)):
typecode = GED(nsuri, name)
pyobjs.append(typecode.pyclass(value)) # depends on [control=['for'], data=[]]
typecode = GED(nsuri, 'From')
pyobj = typecode.pyclass()
pyobj._Address = self.From
pyobjs.append(pyobj)
self.header_pyobjs = tuple(pyobjs) |
async def update_contents(self, **params):
"""Updates users content row
Accepts:
- txid
- cid
- description
- write_price
- read_price
- confirmed
- coinid
"""
if params.get("message"):
params = json.loads(params.get("message", "{}"))
if not params:
return {"error":400, "reason":"Missed required fields"}
txid = params.get("txid")
coinid = params.get("coinid").upper()
try:
coinid = coinid.replace("TEST", "")
except:
pass
database = client[coinid]
content_collection = database[settings.CONTENT]
content = await content_collection.find_one({"txid":txid})
if not content:
return {"error":404,
"reason":"Update content. Content with txid %s not found" % txid}
if content.get("hash"):
self.account.blockchain.setendpoint(settings.bridges[coinid])
cid = await self.account.blockchain.getcid(hash=content["hash"])
await content_collection.find_one_and_update({"txid":txid}, {"$set":{"cid":int(cid)}})
await content_collection.find_one_and_update({"txid":txid}, {"$set":{"hash":None}})
updated = await content_collection.find_one({"txid":txid})
return {i:updated[i] for i in updated if i != "_id"} | <ast.AsyncFunctionDef object at 0x7da237eee740> | keyword[async] keyword[def] identifier[update_contents] ( identifier[self] ,** identifier[params] ):
literal[string]
keyword[if] identifier[params] . identifier[get] ( literal[string] ):
identifier[params] = identifier[json] . identifier[loads] ( identifier[params] . identifier[get] ( literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[params] :
keyword[return] { literal[string] : literal[int] , literal[string] : literal[string] }
identifier[txid] = identifier[params] . identifier[get] ( literal[string] )
identifier[coinid] = identifier[params] . identifier[get] ( literal[string] ). identifier[upper] ()
keyword[try] :
identifier[coinid] = identifier[coinid] . identifier[replace] ( literal[string] , literal[string] )
keyword[except] :
keyword[pass]
identifier[database] = identifier[client] [ identifier[coinid] ]
identifier[content_collection] = identifier[database] [ identifier[settings] . identifier[CONTENT] ]
identifier[content] = keyword[await] identifier[content_collection] . identifier[find_one] ({ literal[string] : identifier[txid] })
keyword[if] keyword[not] identifier[content] :
keyword[return] { literal[string] : literal[int] ,
literal[string] : literal[string] % identifier[txid] }
keyword[if] identifier[content] . identifier[get] ( literal[string] ):
identifier[self] . identifier[account] . identifier[blockchain] . identifier[setendpoint] ( identifier[settings] . identifier[bridges] [ identifier[coinid] ])
identifier[cid] = keyword[await] identifier[self] . identifier[account] . identifier[blockchain] . identifier[getcid] ( identifier[hash] = identifier[content] [ literal[string] ])
keyword[await] identifier[content_collection] . identifier[find_one_and_update] ({ literal[string] : identifier[txid] },{ literal[string] :{ literal[string] : identifier[int] ( identifier[cid] )}})
keyword[await] identifier[content_collection] . identifier[find_one_and_update] ({ literal[string] : identifier[txid] },{ literal[string] :{ literal[string] : keyword[None] }})
identifier[updated] = keyword[await] identifier[content_collection] . identifier[find_one] ({ literal[string] : identifier[txid] })
keyword[return] { identifier[i] : identifier[updated] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[updated] keyword[if] identifier[i] != literal[string] } | async def update_contents(self, **params):
"""Updates users content row
Accepts:
- txid
- cid
- description
- write_price
- read_price
- confirmed
- coinid
"""
if params.get('message'):
params = json.loads(params.get('message', '{}')) # depends on [control=['if'], data=[]]
if not params:
return {'error': 400, 'reason': 'Missed required fields'} # depends on [control=['if'], data=[]]
txid = params.get('txid')
coinid = params.get('coinid').upper()
try:
coinid = coinid.replace('TEST', '') # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
database = client[coinid]
content_collection = database[settings.CONTENT]
content = await content_collection.find_one({'txid': txid})
if not content:
return {'error': 404, 'reason': 'Update content. Content with txid %s not found' % txid} # depends on [control=['if'], data=[]]
if content.get('hash'):
self.account.blockchain.setendpoint(settings.bridges[coinid])
cid = await self.account.blockchain.getcid(hash=content['hash'])
await content_collection.find_one_and_update({'txid': txid}, {'$set': {'cid': int(cid)}})
await content_collection.find_one_and_update({'txid': txid}, {'$set': {'hash': None}}) # depends on [control=['if'], data=[]]
updated = await content_collection.find_one({'txid': txid})
return {i: updated[i] for i in updated if i != '_id'} |
def save(self, path):
''' Save source video to file.
Args:
path (str): Filename to save to.
Notes: Saves entire source video to file, not just currently selected
frames.
'''
# IMPORTANT WARNING: saves entire source video
self.clip.write_videofile(path, audio_fps=self.clip.audio.fps) | def function[save, parameter[self, path]]:
constant[ Save source video to file.
Args:
path (str): Filename to save to.
Notes: Saves entire source video to file, not just currently selected
frames.
]
call[name[self].clip.write_videofile, parameter[name[path]]] | keyword[def] identifier[save] ( identifier[self] , identifier[path] ):
literal[string]
identifier[self] . identifier[clip] . identifier[write_videofile] ( identifier[path] , identifier[audio_fps] = identifier[self] . identifier[clip] . identifier[audio] . identifier[fps] ) | def save(self, path):
""" Save source video to file.
Args:
path (str): Filename to save to.
Notes: Saves entire source video to file, not just currently selected
frames.
"""
# IMPORTANT WARNING: saves entire source video
self.clip.write_videofile(path, audio_fps=self.clip.audio.fps) |
def _get_dbid2goids(associations):
"""Return gene2go data for user-specified taxids."""
id2gos = cx.defaultdict(set)
for ntd in associations:
id2gos[ntd.DB_ID].add(ntd.GO_ID)
return dict(id2gos) | def function[_get_dbid2goids, parameter[associations]]:
constant[Return gene2go data for user-specified taxids.]
variable[id2gos] assign[=] call[name[cx].defaultdict, parameter[name[set]]]
for taget[name[ntd]] in starred[name[associations]] begin[:]
call[call[name[id2gos]][name[ntd].DB_ID].add, parameter[name[ntd].GO_ID]]
return[call[name[dict], parameter[name[id2gos]]]] | keyword[def] identifier[_get_dbid2goids] ( identifier[associations] ):
literal[string]
identifier[id2gos] = identifier[cx] . identifier[defaultdict] ( identifier[set] )
keyword[for] identifier[ntd] keyword[in] identifier[associations] :
identifier[id2gos] [ identifier[ntd] . identifier[DB_ID] ]. identifier[add] ( identifier[ntd] . identifier[GO_ID] )
keyword[return] identifier[dict] ( identifier[id2gos] ) | def _get_dbid2goids(associations):
"""Return gene2go data for user-specified taxids."""
id2gos = cx.defaultdict(set)
for ntd in associations:
id2gos[ntd.DB_ID].add(ntd.GO_ID) # depends on [control=['for'], data=['ntd']]
return dict(id2gos) |
def get(name, download=False, install=False):
'''
.. versionadded:: 2017.7.0
Returns details for the named update
Args:
name (str):
The name of the update you're searching for. This can be the GUID, a
KB number, or any part of the name of the update. GUIDs and KBs are
preferred. Run ``list`` to get the GUID for the update you're
looking for.
download (bool):
Download the update returned by this function. Run this function
first to see if the update exists, then set ``download=True`` to
download the update.
install (bool):
Install the update returned by this function. Run this function
first to see if the update exists, then set ``install=True`` to
install the update.
Returns:
dict: Returns a dict containing a list of updates that match the name if
download and install are both set to False. Should usually be a single
update, but can return multiple if a partial name is given.
If download or install is set to true it will return the results of the
operation.
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
CLI Examples:
.. code-block:: bash
# Recommended Usage using GUID without braces
# Use this to find the status of a specific update
salt '*' win_wua.get 12345678-abcd-1234-abcd-1234567890ab
# Use the following if you don't know the GUID:
# Using a KB number
# Not all updates have an associated KB
salt '*' win_wua.get KB3030298
# Using part or all of the name of the update
# Could possibly return multiple results
# Not all updates have an associated KB
salt '*' win_wua.get 'Microsoft Camera Codec Pack'
'''
# Create a Windows Update Agent instance
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for Update
updates = wua.search(name)
ret = {}
# Download
if download or install:
ret['Download'] = wua.download(updates)
# Install
if install:
ret['Install'] = wua.install(updates)
return ret if ret else updates.list() | def function[get, parameter[name, download, install]]:
constant[
.. versionadded:: 2017.7.0
Returns details for the named update
Args:
name (str):
The name of the update you're searching for. This can be the GUID, a
KB number, or any part of the name of the update. GUIDs and KBs are
preferred. Run ``list`` to get the GUID for the update you're
looking for.
download (bool):
Download the update returned by this function. Run this function
first to see if the update exists, then set ``download=True`` to
download the update.
install (bool):
Install the update returned by this function. Run this function
first to see if the update exists, then set ``install=True`` to
install the update.
Returns:
dict: Returns a dict containing a list of updates that match the name if
download and install are both set to False. Should usually be a single
update, but can return multiple if a partial name is given.
If download or install is set to true it will return the results of the
operation.
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
CLI Examples:
.. code-block:: bash
# Recommended Usage using GUID without braces
# Use this to find the status of a specific update
salt '*' win_wua.get 12345678-abcd-1234-abcd-1234567890ab
# Use the following if you don't know the GUID:
# Using a KB number
# Not all updates have an associated KB
salt '*' win_wua.get KB3030298
# Using part or all of the name of the update
# Could possibly return multiple results
# Not all updates have an associated KB
salt '*' win_wua.get 'Microsoft Camera Codec Pack'
]
variable[wua] assign[=] call[name[salt].utils.win_update.WindowsUpdateAgent, parameter[]]
variable[updates] assign[=] call[name[wua].search, parameter[name[name]]]
variable[ret] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da207f021d0> begin[:]
call[name[ret]][constant[Download]] assign[=] call[name[wua].download, parameter[name[updates]]]
if name[install] begin[:]
call[name[ret]][constant[Install]] assign[=] call[name[wua].install, parameter[name[updates]]]
return[<ast.IfExp object at 0x7da18bcc92d0>] | keyword[def] identifier[get] ( identifier[name] , identifier[download] = keyword[False] , identifier[install] = keyword[False] ):
literal[string]
identifier[wua] = identifier[salt] . identifier[utils] . identifier[win_update] . identifier[WindowsUpdateAgent] ()
identifier[updates] = identifier[wua] . identifier[search] ( identifier[name] )
identifier[ret] ={}
keyword[if] identifier[download] keyword[or] identifier[install] :
identifier[ret] [ literal[string] ]= identifier[wua] . identifier[download] ( identifier[updates] )
keyword[if] identifier[install] :
identifier[ret] [ literal[string] ]= identifier[wua] . identifier[install] ( identifier[updates] )
keyword[return] identifier[ret] keyword[if] identifier[ret] keyword[else] identifier[updates] . identifier[list] () | def get(name, download=False, install=False):
"""
.. versionadded:: 2017.7.0
Returns details for the named update
Args:
name (str):
The name of the update you're searching for. This can be the GUID, a
KB number, or any part of the name of the update. GUIDs and KBs are
preferred. Run ``list`` to get the GUID for the update you're
looking for.
download (bool):
Download the update returned by this function. Run this function
first to see if the update exists, then set ``download=True`` to
download the update.
install (bool):
Install the update returned by this function. Run this function
first to see if the update exists, then set ``install=True`` to
install the update.
Returns:
dict: Returns a dict containing a list of updates that match the name if
download and install are both set to False. Should usually be a single
update, but can return multiple if a partial name is given.
If download or install is set to true it will return the results of the
operation.
.. code-block:: cfg
List of Updates:
{'<GUID>': {'Title': <title>,
'KB': <KB>,
'GUID': <the globally unique identifier for the update>
'Description': <description>,
'Downloaded': <has the update been downloaded>,
'Installed': <has the update been installed>,
'Mandatory': <is the update mandatory>,
'UserInput': <is user input required>,
'EULAAccepted': <has the EULA been accepted>,
'Severity': <update severity>,
'NeedsReboot': <is the update installed and awaiting reboot>,
'RebootBehavior': <will the update require a reboot>,
'Categories': [ '<category 1>',
'<category 2>',
...]
}
}
CLI Examples:
.. code-block:: bash
# Recommended Usage using GUID without braces
# Use this to find the status of a specific update
salt '*' win_wua.get 12345678-abcd-1234-abcd-1234567890ab
# Use the following if you don't know the GUID:
# Using a KB number
# Not all updates have an associated KB
salt '*' win_wua.get KB3030298
# Using part or all of the name of the update
# Could possibly return multiple results
# Not all updates have an associated KB
salt '*' win_wua.get 'Microsoft Camera Codec Pack'
"""
# Create a Windows Update Agent instance
wua = salt.utils.win_update.WindowsUpdateAgent()
# Search for Update
updates = wua.search(name)
ret = {}
# Download
if download or install:
ret['Download'] = wua.download(updates) # depends on [control=['if'], data=[]]
# Install
if install:
ret['Install'] = wua.install(updates) # depends on [control=['if'], data=[]]
return ret if ret else updates.list() |
def detranslify(text):
"""Detranslify russian text"""
try:
res = translit.detranslify(text)
except Exception as err:
# because filter must die silently
res = default_value % {'error': err, 'value': text}
return res | def function[detranslify, parameter[text]]:
constant[Detranslify russian text]
<ast.Try object at 0x7da1b0ebdc60>
return[name[res]] | keyword[def] identifier[detranslify] ( identifier[text] ):
literal[string]
keyword[try] :
identifier[res] = identifier[translit] . identifier[detranslify] ( identifier[text] )
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[res] = identifier[default_value] %{ literal[string] : identifier[err] , literal[string] : identifier[text] }
keyword[return] identifier[res] | def detranslify(text):
"""Detranslify russian text"""
try:
res = translit.detranslify(text) # depends on [control=['try'], data=[]]
except Exception as err:
# because filter must die silently
res = default_value % {'error': err, 'value': text} # depends on [control=['except'], data=['err']]
return res |
def add(self, tipo_opcao, nome_opcao):
"""Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao
url='api/pools/options/save/'
return self.post(url, {'type': tipo_opcao, "name":nome_opcao }) | def function[add, parameter[self, tipo_opcao, nome_opcao]]:
constant[Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
]
variable[url] assign[=] constant[api/pools/options/save/]
return[call[name[self].post, parameter[name[url], dictionary[[<ast.Constant object at 0x7da20c7c88e0>, <ast.Constant object at 0x7da20c7c8ca0>], [<ast.Name object at 0x7da20c7cb460>, <ast.Name object at 0x7da20c7c8ac0>]]]]] | keyword[def] identifier[add] ( identifier[self] , identifier[tipo_opcao] , identifier[nome_opcao] ):
literal[string]
identifier[url] = literal[string]
keyword[return] identifier[self] . identifier[post] ( identifier[url] ,{ literal[string] : identifier[tipo_opcao] , literal[string] : identifier[nome_opcao] }) | def add(self, tipo_opcao, nome_opcao):
"""Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao
url = 'api/pools/options/save/'
return self.post(url, {'type': tipo_opcao, 'name': nome_opcao}) |
def task_submission_options(f):
"""
Options shared by both transfer and delete task submission
"""
def notify_opt_callback(ctx, param, value):
"""
Parse --notify
- "" is the same as "off"
- parse by lowercase, comma-split, strip spaces
- "off,x" is invalid for any x
- "on,x" is valid for any valid x (other than "off")
- "failed", "succeeded", "inactive" are normal vals
In code, produces True, False, or a set
"""
# if no value was set, don't set any explicit options
# the API default is "everything on"
if value is None:
return {}
value = value.lower()
value = [x.strip() for x in value.split(",")]
# [""] is what you'll get if value is "" to start with
# special-case it into "off", which helps avoid surprising scripts
# which take a notification settings as inputs and build --notify
if value == [""]:
value = ["off"]
off = "off" in value
on = "on" in value
# set-ize it -- duplicates are fine
vals = set([x for x in value if x not in ("off", "on")])
if (vals or on) and off:
raise click.UsageError('--notify cannot accept "off" and another value')
allowed_vals = set(("on", "succeeded", "failed", "inactive"))
if not vals <= allowed_vals:
raise click.UsageError(
"--notify received at least one invalid value among {}".format(
list(vals)
)
)
# return the notification options to send!
# on means don't set anything (default)
if on:
return {}
# off means turn off everything
if off:
return {
"notify_on_succeeded": False,
"notify_on_failed": False,
"notify_on_inactive": False,
}
# otherwise, return the exact set of values seen
else:
return {
"notify_on_succeeded": "succeeded" in vals,
"notify_on_failed": "failed" in vals,
"notify_on_inactive": "inactive" in vals,
}
f = click.option(
"--dry-run",
is_flag=True,
help=("Don't actually submit the task, print submission " "data instead"),
)(f)
f = click.option(
"--notify",
callback=notify_opt_callback,
help=(
"Comma separated list of task events which notify by email. "
"'on' and 'off' may be used to enable or disable notifications "
"for all event types. Otherwise, use 'succeeded', 'failed', or "
"'inactive'"
),
)(f)
f = click.option(
"--submission-id",
help=(
"Task submission ID, as generated by `globus task "
"generate-submission-id`. Used for safe resubmission in the "
"presence of network failures."
),
)(f)
f = click.option("--label", default=None, help="Set a label for this task.")(f)
f = click.option(
"--deadline",
default=None,
type=ISOTimeType(),
help="Set a deadline for this to be canceled if not completed by.",
)(f)
f = click.option(
"--skip-activation-check",
is_flag=True,
help=("Submit the task even if the endpoint(s) " "aren't currently activated."),
)(f)
return f | def function[task_submission_options, parameter[f]]:
constant[
Options shared by both transfer and delete task submission
]
def function[notify_opt_callback, parameter[ctx, param, value]]:
constant[
Parse --notify
- "" is the same as "off"
- parse by lowercase, comma-split, strip spaces
- "off,x" is invalid for any x
- "on,x" is valid for any valid x (other than "off")
- "failed", "succeeded", "inactive" are normal vals
In code, produces True, False, or a set
]
if compare[name[value] is constant[None]] begin[:]
return[dictionary[[], []]]
variable[value] assign[=] call[name[value].lower, parameter[]]
variable[value] assign[=] <ast.ListComp object at 0x7da1b03e35e0>
if compare[name[value] equal[==] list[[<ast.Constant object at 0x7da1b03e3310>]]] begin[:]
variable[value] assign[=] list[[<ast.Constant object at 0x7da1b03e3250>]]
variable[off] assign[=] compare[constant[off] in name[value]]
variable[on] assign[=] compare[constant[on] in name[value]]
variable[vals] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b03e2f80>]]
if <ast.BoolOp object at 0x7da1b03e2d40> begin[:]
<ast.Raise object at 0x7da1b03e2c50>
variable[allowed_vals] assign[=] call[name[set], parameter[tuple[[<ast.Constant object at 0x7da1b03e2a70>, <ast.Constant object at 0x7da1b03e2a40>, <ast.Constant object at 0x7da1b03e2a10>, <ast.Constant object at 0x7da1b03e29e0>]]]]
if <ast.UnaryOp object at 0x7da1b03e2980> begin[:]
<ast.Raise object at 0x7da1b03e28c0>
if name[on] begin[:]
return[dictionary[[], []]]
if name[off] begin[:]
return[dictionary[[<ast.Constant object at 0x7da1b03e2530>, <ast.Constant object at 0x7da1b03e2500>, <ast.Constant object at 0x7da1b03e24d0>], [<ast.Constant object at 0x7da1b03e24a0>, <ast.Constant object at 0x7da1b03e2470>, <ast.Constant object at 0x7da1b03e2440>]]]
variable[f] assign[=] call[call[name[click].option, parameter[constant[--dry-run]]], parameter[name[f]]]
variable[f] assign[=] call[call[name[click].option, parameter[constant[--notify]]], parameter[name[f]]]
variable[f] assign[=] call[call[name[click].option, parameter[constant[--submission-id]]], parameter[name[f]]]
variable[f] assign[=] call[call[name[click].option, parameter[constant[--label]]], parameter[name[f]]]
variable[f] assign[=] call[call[name[click].option, parameter[constant[--deadline]]], parameter[name[f]]]
variable[f] assign[=] call[call[name[click].option, parameter[constant[--skip-activation-check]]], parameter[name[f]]]
return[name[f]] | keyword[def] identifier[task_submission_options] ( identifier[f] ):
literal[string]
keyword[def] identifier[notify_opt_callback] ( identifier[ctx] , identifier[param] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return] {}
identifier[value] = identifier[value] . identifier[lower] ()
identifier[value] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[value] . identifier[split] ( literal[string] )]
keyword[if] identifier[value] ==[ literal[string] ]:
identifier[value] =[ literal[string] ]
identifier[off] = literal[string] keyword[in] identifier[value]
identifier[on] = literal[string] keyword[in] identifier[value]
identifier[vals] = identifier[set] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[value] keyword[if] identifier[x] keyword[not] keyword[in] ( literal[string] , literal[string] )])
keyword[if] ( identifier[vals] keyword[or] identifier[on] ) keyword[and] identifier[off] :
keyword[raise] identifier[click] . identifier[UsageError] ( literal[string] )
identifier[allowed_vals] = identifier[set] (( literal[string] , literal[string] , literal[string] , literal[string] ))
keyword[if] keyword[not] identifier[vals] <= identifier[allowed_vals] :
keyword[raise] identifier[click] . identifier[UsageError] (
literal[string] . identifier[format] (
identifier[list] ( identifier[vals] )
)
)
keyword[if] identifier[on] :
keyword[return] {}
keyword[if] identifier[off] :
keyword[return] {
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
}
keyword[else] :
keyword[return] {
literal[string] : literal[string] keyword[in] identifier[vals] ,
literal[string] : literal[string] keyword[in] identifier[vals] ,
literal[string] : literal[string] keyword[in] identifier[vals] ,
}
identifier[f] = identifier[click] . identifier[option] (
literal[string] ,
identifier[is_flag] = keyword[True] ,
identifier[help] =( literal[string] literal[string] ),
)( identifier[f] )
identifier[f] = identifier[click] . identifier[option] (
literal[string] ,
identifier[callback] = identifier[notify_opt_callback] ,
identifier[help] =(
literal[string]
literal[string]
literal[string]
literal[string]
),
)( identifier[f] )
identifier[f] = identifier[click] . identifier[option] (
literal[string] ,
identifier[help] =(
literal[string]
literal[string]
literal[string]
),
)( identifier[f] )
identifier[f] = identifier[click] . identifier[option] ( literal[string] , identifier[default] = keyword[None] , identifier[help] = literal[string] )( identifier[f] )
identifier[f] = identifier[click] . identifier[option] (
literal[string] ,
identifier[default] = keyword[None] ,
identifier[type] = identifier[ISOTimeType] (),
identifier[help] = literal[string] ,
)( identifier[f] )
identifier[f] = identifier[click] . identifier[option] (
literal[string] ,
identifier[is_flag] = keyword[True] ,
identifier[help] =( literal[string] literal[string] ),
)( identifier[f] )
keyword[return] identifier[f] | def task_submission_options(f):
"""
Options shared by both transfer and delete task submission
"""
def notify_opt_callback(ctx, param, value):
"""
Parse --notify
- "" is the same as "off"
- parse by lowercase, comma-split, strip spaces
- "off,x" is invalid for any x
- "on,x" is valid for any valid x (other than "off")
- "failed", "succeeded", "inactive" are normal vals
In code, produces True, False, or a set
"""
# if no value was set, don't set any explicit options
# the API default is "everything on"
if value is None:
return {} # depends on [control=['if'], data=[]]
value = value.lower()
value = [x.strip() for x in value.split(',')]
# [""] is what you'll get if value is "" to start with
# special-case it into "off", which helps avoid surprising scripts
# which take a notification settings as inputs and build --notify
if value == ['']:
value = ['off'] # depends on [control=['if'], data=['value']]
off = 'off' in value
on = 'on' in value
# set-ize it -- duplicates are fine
vals = set([x for x in value if x not in ('off', 'on')])
if (vals or on) and off:
raise click.UsageError('--notify cannot accept "off" and another value') # depends on [control=['if'], data=[]]
allowed_vals = set(('on', 'succeeded', 'failed', 'inactive'))
if not vals <= allowed_vals:
raise click.UsageError('--notify received at least one invalid value among {}'.format(list(vals))) # depends on [control=['if'], data=[]]
# return the notification options to send!
# on means don't set anything (default)
if on:
return {} # depends on [control=['if'], data=[]]
# off means turn off everything
if off:
return {'notify_on_succeeded': False, 'notify_on_failed': False, 'notify_on_inactive': False} # depends on [control=['if'], data=[]]
else:
# otherwise, return the exact set of values seen
return {'notify_on_succeeded': 'succeeded' in vals, 'notify_on_failed': 'failed' in vals, 'notify_on_inactive': 'inactive' in vals}
f = click.option('--dry-run', is_flag=True, help="Don't actually submit the task, print submission data instead")(f)
f = click.option('--notify', callback=notify_opt_callback, help="Comma separated list of task events which notify by email. 'on' and 'off' may be used to enable or disable notifications for all event types. Otherwise, use 'succeeded', 'failed', or 'inactive'")(f)
f = click.option('--submission-id', help='Task submission ID, as generated by `globus task generate-submission-id`. Used for safe resubmission in the presence of network failures.')(f)
f = click.option('--label', default=None, help='Set a label for this task.')(f)
f = click.option('--deadline', default=None, type=ISOTimeType(), help='Set a deadline for this to be canceled if not completed by.')(f)
f = click.option('--skip-activation-check', is_flag=True, help="Submit the task even if the endpoint(s) aren't currently activated.")(f)
return f |
def _list_store_resources(self, request, head_id, filter_ids,
resource_fetcher, block_xform):
"""Builds a list of blocks or resources derived from blocks,
handling multiple possible filter requests:
- filtered by a set of ids
- filtered by head block
- filtered by both id and head block
- not filtered (all current resources)
Note:
This method will fail if `_block_store` has not been set
Args:
request (object): The parsed protobuf request object
head_id (str): Either request.head_id, or the current chain head
filter_ids (list of str): the resource ids (if any) to filter by
resource_fetcher (function): Fetches a resource by its id
Expected args:
resource_id: The id of the resource to be fetched
Expected return:
object: The resource to be appended to the results
block_xform (function): Transforms a block into a list of resources
Expected args:
block: A block object from the block store
Expected return:
list: To be concatenated to the end of the results
Returns:
list: List of blocks or data from blocks. If filtered by ids,
they will be listed in the same order as the id filters,
otherwise they will be ordered from newest to oldest
"""
resources = []
# Simply fetch by id if filtered by id but not by head block
if filter_ids and not request.head_id:
for resource_id in filter_ids:
try:
resources.append(resource_fetcher(resource_id))
except (KeyError, ValueError, TypeError):
# Invalid ids should be omitted, not raise an exception
pass
# Traverse block chain to build results for most scenarios
else:
current_id = head_id
while current_id in self._block_store:
block = self._block_store[current_id].block
resources += block_xform(block)
header = BlockHeader()
header.ParseFromString(block.header)
current_id = header.previous_block_id
# If filtering by head AND ids, the traverse results must be winnowed
if request.head_id and filter_ids:
matches = {
r.header_signature: r
for r in resources if r.header_signature in filter_ids
}
resources = [matches[i] for i in filter_ids if i in matches]
return resources | def function[_list_store_resources, parameter[self, request, head_id, filter_ids, resource_fetcher, block_xform]]:
constant[Builds a list of blocks or resources derived from blocks,
handling multiple possible filter requests:
- filtered by a set of ids
- filtered by head block
- filtered by both id and head block
- not filtered (all current resources)
Note:
This method will fail if `_block_store` has not been set
Args:
request (object): The parsed protobuf request object
head_id (str): Either request.head_id, or the current chain head
filter_ids (list of str): the resource ids (if any) to filter by
resource_fetcher (function): Fetches a resource by its id
Expected args:
resource_id: The id of the resource to be fetched
Expected return:
object: The resource to be appended to the results
block_xform (function): Transforms a block into a list of resources
Expected args:
block: A block object from the block store
Expected return:
list: To be concatenated to the end of the results
Returns:
list: List of blocks or data from blocks. If filtered by ids,
they will be listed in the same order as the id filters,
otherwise they will be ordered from newest to oldest
]
variable[resources] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18bc70820> begin[:]
for taget[name[resource_id]] in starred[name[filter_ids]] begin[:]
<ast.Try object at 0x7da18bc70dc0>
if <ast.BoolOp object at 0x7da20c7c8b20> begin[:]
variable[matches] assign[=] <ast.DictComp object at 0x7da20c7c8fd0>
variable[resources] assign[=] <ast.ListComp object at 0x7da20c7c98a0>
return[name[resources]] | keyword[def] identifier[_list_store_resources] ( identifier[self] , identifier[request] , identifier[head_id] , identifier[filter_ids] ,
identifier[resource_fetcher] , identifier[block_xform] ):
literal[string]
identifier[resources] =[]
keyword[if] identifier[filter_ids] keyword[and] keyword[not] identifier[request] . identifier[head_id] :
keyword[for] identifier[resource_id] keyword[in] identifier[filter_ids] :
keyword[try] :
identifier[resources] . identifier[append] ( identifier[resource_fetcher] ( identifier[resource_id] ))
keyword[except] ( identifier[KeyError] , identifier[ValueError] , identifier[TypeError] ):
keyword[pass]
keyword[else] :
identifier[current_id] = identifier[head_id]
keyword[while] identifier[current_id] keyword[in] identifier[self] . identifier[_block_store] :
identifier[block] = identifier[self] . identifier[_block_store] [ identifier[current_id] ]. identifier[block]
identifier[resources] += identifier[block_xform] ( identifier[block] )
identifier[header] = identifier[BlockHeader] ()
identifier[header] . identifier[ParseFromString] ( identifier[block] . identifier[header] )
identifier[current_id] = identifier[header] . identifier[previous_block_id]
keyword[if] identifier[request] . identifier[head_id] keyword[and] identifier[filter_ids] :
identifier[matches] ={
identifier[r] . identifier[header_signature] : identifier[r]
keyword[for] identifier[r] keyword[in] identifier[resources] keyword[if] identifier[r] . identifier[header_signature] keyword[in] identifier[filter_ids]
}
identifier[resources] =[ identifier[matches] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[filter_ids] keyword[if] identifier[i] keyword[in] identifier[matches] ]
keyword[return] identifier[resources] | def _list_store_resources(self, request, head_id, filter_ids, resource_fetcher, block_xform):
"""Builds a list of blocks or resources derived from blocks,
handling multiple possible filter requests:
- filtered by a set of ids
- filtered by head block
- filtered by both id and head block
- not filtered (all current resources)
Note:
This method will fail if `_block_store` has not been set
Args:
request (object): The parsed protobuf request object
head_id (str): Either request.head_id, or the current chain head
filter_ids (list of str): the resource ids (if any) to filter by
resource_fetcher (function): Fetches a resource by its id
Expected args:
resource_id: The id of the resource to be fetched
Expected return:
object: The resource to be appended to the results
block_xform (function): Transforms a block into a list of resources
Expected args:
block: A block object from the block store
Expected return:
list: To be concatenated to the end of the results
Returns:
list: List of blocks or data from blocks. If filtered by ids,
they will be listed in the same order as the id filters,
otherwise they will be ordered from newest to oldest
"""
resources = []
# Simply fetch by id if filtered by id but not by head block
if filter_ids and (not request.head_id):
for resource_id in filter_ids:
try:
resources.append(resource_fetcher(resource_id)) # depends on [control=['try'], data=[]]
except (KeyError, ValueError, TypeError):
# Invalid ids should be omitted, not raise an exception
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['resource_id']] # depends on [control=['if'], data=[]]
else:
# Traverse block chain to build results for most scenarios
current_id = head_id
while current_id in self._block_store:
block = self._block_store[current_id].block
resources += block_xform(block)
header = BlockHeader()
header.ParseFromString(block.header)
current_id = header.previous_block_id # depends on [control=['while'], data=['current_id']]
# If filtering by head AND ids, the traverse results must be winnowed
if request.head_id and filter_ids:
matches = {r.header_signature: r for r in resources if r.header_signature in filter_ids}
resources = [matches[i] for i in filter_ids if i in matches] # depends on [control=['if'], data=[]]
return resources |
def _normalize_sv_coverage_cnvkit(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage depths by GC, repeats and background using CNVkit
- reference: calculates reference backgrounds from normals and pools
including GC and repeat information
- fix: Uses background to normalize coverage estimations
http://cnvkit.readthedocs.io/en/stable/pipeline.html#fix
"""
from bcbio.structural import cnvkit
cnns = reduce(operator.add, [[tz.get_in(["depth", "bins", "target"], x),
tz.get_in(["depth", "bins", "antitarget"], x)] for x in backgrounds], [])
for d in inputs:
if tz.get_in(["depth", "bins", "target"], d):
target_bed = tz.get_in(["depth", "bins", "target"], d)
antitarget_bed = tz.get_in(["depth", "bins", "antitarget"], d)
input_backs = set(filter(lambda x: x is not None,
[dd.get_background_cnv_reference(d, "cnvkit") for d in inputs]))
if input_backs:
assert len(input_backs) == 1, "Multiple backgrounds in group: %s" % list(input_backs)
back_file = list(input_backs)[0]
else:
back_file = cnvkit.cnvkit_background(cnns,
os.path.join(work_dir, "background-%s-cnvkit.cnn" % (group_id)),
backgrounds or inputs, target_bed, antitarget_bed)
fix_cmd_inputs = []
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "structural",
dd.get_sample_name(data), "bins"))
if tz.get_in(["depth", "bins", "target"], data):
fix_file = os.path.join(work_dir, "%s-normalized.cnr" % (dd.get_sample_name(data)))
fix_cmd_inputs.append((tz.get_in(["depth", "bins", "target"], data),
tz.get_in(["depth", "bins", "antitarget"], data),
back_file, fix_file, data))
out_files[dd.get_sample_name(data)] = fix_file
back_files[dd.get_sample_name(data)] = back_file
parallel = {"type": "local", "cores": dd.get_cores(inputs[0]), "progs": ["cnvkit"]}
run_multicore(cnvkit.run_fix_parallel, fix_cmd_inputs, inputs[0]["config"], parallel)
return back_files, out_files | def function[_normalize_sv_coverage_cnvkit, parameter[group_id, inputs, backgrounds, work_dir, back_files, out_files]]:
constant[Normalize CNV coverage depths by GC, repeats and background using CNVkit
- reference: calculates reference backgrounds from normals and pools
including GC and repeat information
- fix: Uses background to normalize coverage estimations
http://cnvkit.readthedocs.io/en/stable/pipeline.html#fix
]
from relative_module[bcbio.structural] import module[cnvkit]
variable[cnns] assign[=] call[name[reduce], parameter[name[operator].add, <ast.ListComp object at 0x7da1b18abac0>, list[[]]]]
for taget[name[d]] in starred[name[inputs]] begin[:]
if call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18aab00>, <ast.Constant object at 0x7da1b18aabc0>, <ast.Constant object at 0x7da1b18aaad0>]], name[d]]] begin[:]
variable[target_bed] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18a8070>, <ast.Constant object at 0x7da1b18a9a50>, <ast.Constant object at 0x7da1b18a8220>]], name[d]]]
variable[antitarget_bed] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18a8100>, <ast.Constant object at 0x7da1b18a80a0>, <ast.Constant object at 0x7da1b18a8130>]], name[d]]]
variable[input_backs] assign[=] call[name[set], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b18a8e20>, <ast.ListComp object at 0x7da1b18a8f40>]]]]
if name[input_backs] begin[:]
assert[compare[call[name[len], parameter[name[input_backs]]] equal[==] constant[1]]]
variable[back_file] assign[=] call[call[name[list], parameter[name[input_backs]]]][constant[0]]
variable[fix_cmd_inputs] assign[=] list[[]]
for taget[name[data]] in starred[name[inputs]] begin[:]
variable[work_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[name[dd].get_work_dir, parameter[name[data]]], constant[structural], call[name[dd].get_sample_name, parameter[name[data]]], constant[bins]]]]]
if call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b18a8a90>, <ast.Constant object at 0x7da1b18a8a60>, <ast.Constant object at 0x7da1b18a8a00>]], name[data]]] begin[:]
variable[fix_file] assign[=] call[name[os].path.join, parameter[name[work_dir], binary_operation[constant[%s-normalized.cnr] <ast.Mod object at 0x7da2590d6920> call[name[dd].get_sample_name, parameter[name[data]]]]]]
call[name[fix_cmd_inputs].append, parameter[tuple[[<ast.Call object at 0x7da1b18a8820>, <ast.Call object at 0x7da1b18a8520>, <ast.Name object at 0x7da1b18a83a0>, <ast.Name object at 0x7da1b18a8340>, <ast.Name object at 0x7da1b18a8370>]]]]
call[name[out_files]][call[name[dd].get_sample_name, parameter[name[data]]]] assign[=] name[fix_file]
call[name[back_files]][call[name[dd].get_sample_name, parameter[name[data]]]] assign[=] name[back_file]
variable[parallel] assign[=] dictionary[[<ast.Constant object at 0x7da1b1985510>, <ast.Constant object at 0x7da1b1985210>, <ast.Constant object at 0x7da1b1985f90>], [<ast.Constant object at 0x7da1b1986200>, <ast.Call object at 0x7da1b1987250>, <ast.List object at 0x7da1b19842b0>]]
call[name[run_multicore], parameter[name[cnvkit].run_fix_parallel, name[fix_cmd_inputs], call[call[name[inputs]][constant[0]]][constant[config]], name[parallel]]]
return[tuple[[<ast.Name object at 0x7da1b1986f50>, <ast.Name object at 0x7da1b19857b0>]]] | keyword[def] identifier[_normalize_sv_coverage_cnvkit] ( identifier[group_id] , identifier[inputs] , identifier[backgrounds] , identifier[work_dir] , identifier[back_files] , identifier[out_files] ):
literal[string]
keyword[from] identifier[bcbio] . identifier[structural] keyword[import] identifier[cnvkit]
identifier[cnns] = identifier[reduce] ( identifier[operator] . identifier[add] ,[[ identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[x] ),
identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[x] )] keyword[for] identifier[x] keyword[in] identifier[backgrounds] ],[])
keyword[for] identifier[d] keyword[in] identifier[inputs] :
keyword[if] identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[d] ):
identifier[target_bed] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[d] )
identifier[antitarget_bed] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[d] )
identifier[input_backs] = identifier[set] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] keyword[is] keyword[not] keyword[None] ,
[ identifier[dd] . identifier[get_background_cnv_reference] ( identifier[d] , literal[string] ) keyword[for] identifier[d] keyword[in] identifier[inputs] ]))
keyword[if] identifier[input_backs] :
keyword[assert] identifier[len] ( identifier[input_backs] )== literal[int] , literal[string] % identifier[list] ( identifier[input_backs] )
identifier[back_file] = identifier[list] ( identifier[input_backs] )[ literal[int] ]
keyword[else] :
identifier[back_file] = identifier[cnvkit] . identifier[cnvkit_background] ( identifier[cnns] ,
identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] %( identifier[group_id] )),
identifier[backgrounds] keyword[or] identifier[inputs] , identifier[target_bed] , identifier[antitarget_bed] )
identifier[fix_cmd_inputs] =[]
keyword[for] identifier[data] keyword[in] identifier[inputs] :
identifier[work_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dd] . identifier[get_work_dir] ( identifier[data] ), literal[string] ,
identifier[dd] . identifier[get_sample_name] ( identifier[data] ), literal[string] ))
keyword[if] identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] ):
identifier[fix_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] %( identifier[dd] . identifier[get_sample_name] ( identifier[data] )))
identifier[fix_cmd_inputs] . identifier[append] (( identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] ),
identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] ),
identifier[back_file] , identifier[fix_file] , identifier[data] ))
identifier[out_files] [ identifier[dd] . identifier[get_sample_name] ( identifier[data] )]= identifier[fix_file]
identifier[back_files] [ identifier[dd] . identifier[get_sample_name] ( identifier[data] )]= identifier[back_file]
identifier[parallel] ={ literal[string] : literal[string] , literal[string] : identifier[dd] . identifier[get_cores] ( identifier[inputs] [ literal[int] ]), literal[string] :[ literal[string] ]}
identifier[run_multicore] ( identifier[cnvkit] . identifier[run_fix_parallel] , identifier[fix_cmd_inputs] , identifier[inputs] [ literal[int] ][ literal[string] ], identifier[parallel] )
keyword[return] identifier[back_files] , identifier[out_files] | def _normalize_sv_coverage_cnvkit(group_id, inputs, backgrounds, work_dir, back_files, out_files):
"""Normalize CNV coverage depths by GC, repeats and background using CNVkit
- reference: calculates reference backgrounds from normals and pools
including GC and repeat information
- fix: Uses background to normalize coverage estimations
http://cnvkit.readthedocs.io/en/stable/pipeline.html#fix
"""
from bcbio.structural import cnvkit
cnns = reduce(operator.add, [[tz.get_in(['depth', 'bins', 'target'], x), tz.get_in(['depth', 'bins', 'antitarget'], x)] for x in backgrounds], [])
for d in inputs:
if tz.get_in(['depth', 'bins', 'target'], d):
target_bed = tz.get_in(['depth', 'bins', 'target'], d)
antitarget_bed = tz.get_in(['depth', 'bins', 'antitarget'], d) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
input_backs = set(filter(lambda x: x is not None, [dd.get_background_cnv_reference(d, 'cnvkit') for d in inputs]))
if input_backs:
assert len(input_backs) == 1, 'Multiple backgrounds in group: %s' % list(input_backs)
back_file = list(input_backs)[0] # depends on [control=['if'], data=[]]
else:
back_file = cnvkit.cnvkit_background(cnns, os.path.join(work_dir, 'background-%s-cnvkit.cnn' % group_id), backgrounds or inputs, target_bed, antitarget_bed)
fix_cmd_inputs = []
for data in inputs:
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), 'structural', dd.get_sample_name(data), 'bins'))
if tz.get_in(['depth', 'bins', 'target'], data):
fix_file = os.path.join(work_dir, '%s-normalized.cnr' % dd.get_sample_name(data))
fix_cmd_inputs.append((tz.get_in(['depth', 'bins', 'target'], data), tz.get_in(['depth', 'bins', 'antitarget'], data), back_file, fix_file, data))
out_files[dd.get_sample_name(data)] = fix_file
back_files[dd.get_sample_name(data)] = back_file # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['data']]
parallel = {'type': 'local', 'cores': dd.get_cores(inputs[0]), 'progs': ['cnvkit']}
run_multicore(cnvkit.run_fix_parallel, fix_cmd_inputs, inputs[0]['config'], parallel)
return (back_files, out_files) |
def iter_sers(self):
"""
Generate each ``<c:ser>`` child element in this xChart in
c:order/@val sequence (not document or c:idx order).
"""
def ser_order(ser):
return ser.order.val
return (ser for ser in sorted(self.xpath('./c:ser'), key=ser_order)) | def function[iter_sers, parameter[self]]:
constant[
Generate each ``<c:ser>`` child element in this xChart in
c:order/@val sequence (not document or c:idx order).
]
def function[ser_order, parameter[ser]]:
return[name[ser].order.val]
return[<ast.GeneratorExp object at 0x7da20c6a8a00>] | keyword[def] identifier[iter_sers] ( identifier[self] ):
literal[string]
keyword[def] identifier[ser_order] ( identifier[ser] ):
keyword[return] identifier[ser] . identifier[order] . identifier[val]
keyword[return] ( identifier[ser] keyword[for] identifier[ser] keyword[in] identifier[sorted] ( identifier[self] . identifier[xpath] ( literal[string] ), identifier[key] = identifier[ser_order] )) | def iter_sers(self):
"""
Generate each ``<c:ser>`` child element in this xChart in
c:order/@val sequence (not document or c:idx order).
"""
def ser_order(ser):
return ser.order.val
return (ser for ser in sorted(self.xpath('./c:ser'), key=ser_order)) |
def _configure_from_module(self, item):
"""Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
"""
package = None
if item[0] == '.':
package = self.import_name
obj = importlib.import_module(item, package=package)
self.config.from_object(obj)
return self | def function[_configure_from_module, parameter[self, item]]:
constant[Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
]
variable[package] assign[=] constant[None]
if compare[call[name[item]][constant[0]] equal[==] constant[.]] begin[:]
variable[package] assign[=] name[self].import_name
variable[obj] assign[=] call[name[importlib].import_module, parameter[name[item]]]
call[name[self].config.from_object, parameter[name[obj]]]
return[name[self]] | keyword[def] identifier[_configure_from_module] ( identifier[self] , identifier[item] ):
literal[string]
identifier[package] = keyword[None]
keyword[if] identifier[item] [ literal[int] ]== literal[string] :
identifier[package] = identifier[self] . identifier[import_name]
identifier[obj] = identifier[importlib] . identifier[import_module] ( identifier[item] , identifier[package] = identifier[package] )
identifier[self] . identifier[config] . identifier[from_object] ( identifier[obj] )
keyword[return] identifier[self] | def _configure_from_module(self, item):
"""Configure from a module by import path.
Effectively, you give this an absolute or relative import path, it will
import it, and then pass the resulting object to
``_configure_from_object``.
Args:
item (str):
A string pointing to a valid import path.
Returns:
fleaker.App:
Returns itself.
"""
package = None
if item[0] == '.':
package = self.import_name # depends on [control=['if'], data=[]]
obj = importlib.import_module(item, package=package)
self.config.from_object(obj)
return self |
def get_reference_line_numeration_marker_patterns(prefix=u''):
"""Return a list of compiled regex patterns used to search for the marker.
Marker of a reference line in a full-text document.
:param prefix: (string) the possible prefix to a reference line
:return: (list) of compiled regex patterns.
"""
title = u""
if type(prefix) in (str, unicode):
title = prefix
g_name = u'(?P<mark>'
g_close = u')'
space = r'\s*'
patterns = [
# [1]
space + title + g_name + r'\[\s*(?P<marknum>\d+)\s*\]' + g_close,
# [<letters and numbers]
space + title + g_name + r'\[\s*[a-zA-Z:-]+\+?\s?(\d{1,4}[A-Za-z:-]?)?\s*\]' + g_close, # noqa
# {1}
space + title + g_name + r'\{\s*(?P<marknum>\d+)\s*\}' + g_close,
# (1)
space + title + g_name + r'\<\s*(?P<marknum>\d+)\s*\>' + g_close,
space + title + g_name + r'\(\s*(?P<marknum>\d+)\s*\)' + g_close,
space + title + g_name + r'(?P<marknum>\d+)\s*\.(?!\d)' + g_close,
space + title + g_name + r'(?P<marknum>\d+)\s+' + g_close,
space + title + g_name + r'(?P<marknum>\d+)\s*\]' + g_close,
# 1]
space + title + g_name + r'(?P<marknum>\d+)\s*\}' + g_close,
# 1}
space + title + g_name + r'(?P<marknum>\d+)\s*\)' + g_close,
# 1)
space + title + g_name + r'(?P<marknum>\d+)\s*\>' + g_close,
# [1.1]
space + title + g_name + r'\[\s*\d+\.\d+\s*\]' + g_close,
# [ ]
space + title + g_name + r'\[\s*\]' + g_close,
# *
space + title + g_name + r'\*' + g_close,
]
return [re.compile(p, re.I | re.UNICODE) for p in patterns] | def function[get_reference_line_numeration_marker_patterns, parameter[prefix]]:
constant[Return a list of compiled regex patterns used to search for the marker.
Marker of a reference line in a full-text document.
:param prefix: (string) the possible prefix to a reference line
:return: (list) of compiled regex patterns.
]
variable[title] assign[=] constant[]
if compare[call[name[type], parameter[name[prefix]]] in tuple[[<ast.Name object at 0x7da18fe92020>, <ast.Name object at 0x7da18fe92830>]]] begin[:]
variable[title] assign[=] name[prefix]
variable[g_name] assign[=] constant[(?P<mark>]
variable[g_close] assign[=] constant[)]
variable[space] assign[=] constant[\s*]
variable[patterns] assign[=] list[[<ast.BinOp object at 0x7da18fe92e90>, <ast.BinOp object at 0x7da18fe93bb0>, <ast.BinOp object at 0x7da18fe93c70>, <ast.BinOp object at 0x7da18fe90850>, <ast.BinOp object at 0x7da18fe930a0>, <ast.BinOp object at 0x7da18fe907c0>, <ast.BinOp object at 0x7da18fe902b0>, <ast.BinOp object at 0x7da207f9a9e0>, <ast.BinOp object at 0x7da207f981c0>, <ast.BinOp object at 0x7da18fe90c70>, <ast.BinOp object at 0x7da18fe925c0>, <ast.BinOp object at 0x7da18fe90e50>, <ast.BinOp object at 0x7da18fe92950>, <ast.BinOp object at 0x7da18fe90df0>]]
return[<ast.ListComp object at 0x7da18fe91960>] | keyword[def] identifier[get_reference_line_numeration_marker_patterns] ( identifier[prefix] = literal[string] ):
literal[string]
identifier[title] = literal[string]
keyword[if] identifier[type] ( identifier[prefix] ) keyword[in] ( identifier[str] , identifier[unicode] ):
identifier[title] = identifier[prefix]
identifier[g_name] = literal[string]
identifier[g_close] = literal[string]
identifier[space] = literal[string]
identifier[patterns] =[
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
identifier[space] + identifier[title] + identifier[g_name] + literal[string] + identifier[g_close] ,
]
keyword[return] [ identifier[re] . identifier[compile] ( identifier[p] , identifier[re] . identifier[I] | identifier[re] . identifier[UNICODE] ) keyword[for] identifier[p] keyword[in] identifier[patterns] ] | def get_reference_line_numeration_marker_patterns(prefix=u''):
"""Return a list of compiled regex patterns used to search for the marker.
Marker of a reference line in a full-text document.
:param prefix: (string) the possible prefix to a reference line
:return: (list) of compiled regex patterns.
"""
title = u''
if type(prefix) in (str, unicode):
title = prefix # depends on [control=['if'], data=[]]
g_name = u'(?P<mark>'
g_close = u')'
space = '\\s*'
# [1]
# [<letters and numbers]
# noqa
# {1}
# (1)
# 1]
# 1}
# 1)
# [1.1]
# [ ]
# *
patterns = [space + title + g_name + '\\[\\s*(?P<marknum>\\d+)\\s*\\]' + g_close, space + title + g_name + '\\[\\s*[a-zA-Z:-]+\\+?\\s?(\\d{1,4}[A-Za-z:-]?)?\\s*\\]' + g_close, space + title + g_name + '\\{\\s*(?P<marknum>\\d+)\\s*\\}' + g_close, space + title + g_name + '\\<\\s*(?P<marknum>\\d+)\\s*\\>' + g_close, space + title + g_name + '\\(\\s*(?P<marknum>\\d+)\\s*\\)' + g_close, space + title + g_name + '(?P<marknum>\\d+)\\s*\\.(?!\\d)' + g_close, space + title + g_name + '(?P<marknum>\\d+)\\s+' + g_close, space + title + g_name + '(?P<marknum>\\d+)\\s*\\]' + g_close, space + title + g_name + '(?P<marknum>\\d+)\\s*\\}' + g_close, space + title + g_name + '(?P<marknum>\\d+)\\s*\\)' + g_close, space + title + g_name + '(?P<marknum>\\d+)\\s*\\>' + g_close, space + title + g_name + '\\[\\s*\\d+\\.\\d+\\s*\\]' + g_close, space + title + g_name + '\\[\\s*\\]' + g_close, space + title + g_name + '\\*' + g_close]
return [re.compile(p, re.I | re.UNICODE) for p in patterns] |
def list_of_matching(self, tup_tree, matched):
"""
Parse only the children of particular types defined in the list/tuple
matched under tup_tree.
Other children are ignored rather than giving an error.
"""
result = []
for child in kids(tup_tree):
if name(child) not in matched:
continue
result.append(self.parse_any(child))
return result | def function[list_of_matching, parameter[self, tup_tree, matched]]:
constant[
Parse only the children of particular types defined in the list/tuple
matched under tup_tree.
Other children are ignored rather than giving an error.
]
variable[result] assign[=] list[[]]
for taget[name[child]] in starred[call[name[kids], parameter[name[tup_tree]]]] begin[:]
if compare[call[name[name], parameter[name[child]]] <ast.NotIn object at 0x7da2590d7190> name[matched]] begin[:]
continue
call[name[result].append, parameter[call[name[self].parse_any, parameter[name[child]]]]]
return[name[result]] | keyword[def] identifier[list_of_matching] ( identifier[self] , identifier[tup_tree] , identifier[matched] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[child] keyword[in] identifier[kids] ( identifier[tup_tree] ):
keyword[if] identifier[name] ( identifier[child] ) keyword[not] keyword[in] identifier[matched] :
keyword[continue]
identifier[result] . identifier[append] ( identifier[self] . identifier[parse_any] ( identifier[child] ))
keyword[return] identifier[result] | def list_of_matching(self, tup_tree, matched):
"""
Parse only the children of particular types defined in the list/tuple
matched under tup_tree.
Other children are ignored rather than giving an error.
"""
result = []
for child in kids(tup_tree):
if name(child) not in matched:
continue # depends on [control=['if'], data=[]]
result.append(self.parse_any(child)) # depends on [control=['for'], data=['child']]
return result |
def walk(self):
""" Generate paths in "self.datapath".
"""
# FIFO?
if self._fifo:
if self._fifo > 1:
raise RuntimeError("INTERNAL ERROR: FIFO read twice!")
self._fifo += 1
# Read paths relative to directory containing the FIFO
with open(self.datapath, "r") as fifo:
while True:
relpath = fifo.readline().rstrip('\n')
if not relpath: # EOF?
break
self.LOG.debug("Read relative path %r from FIFO..." % (relpath,))
yield os.path.join(os.path.dirname(self.datapath), relpath)
self.LOG.debug("FIFO %r closed!" % (self.datapath,))
# Directory?
elif os.path.isdir(self.datapath):
# Walk the directory tree
for dirpath, dirnames, filenames in os.walk(self.datapath): #, followlinks=True):
# Don't scan blacklisted directories
for bad in dirnames[:]:
if any(fnmatch.fnmatch(bad, pattern) for pattern in self.ignore):
dirnames.remove(bad)
# Yield all filenames that aren't blacklisted
for filename in filenames:
if not any(fnmatch.fnmatch(filename, pattern) for pattern in self.ignore):
#yield os.path.join(dirpath[len(self.datapath)+1:], filename)
yield os.path.join(dirpath, filename)
# Single file
else:
# Yield the filename
yield self.datapath | def function[walk, parameter[self]]:
constant[ Generate paths in "self.datapath".
]
if name[self]._fifo begin[:]
if compare[name[self]._fifo greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b26adb70>
<ast.AugAssign object at 0x7da1b26aea10>
with call[name[open], parameter[name[self].datapath, constant[r]]] begin[:]
while constant[True] begin[:]
variable[relpath] assign[=] call[call[name[fifo].readline, parameter[]].rstrip, parameter[constant[
]]]
if <ast.UnaryOp object at 0x7da1b26acd60> begin[:]
break
call[name[self].LOG.debug, parameter[binary_operation[constant[Read relative path %r from FIFO...] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ae590>]]]]]
<ast.Yield object at 0x7da1b26ae470>
call[name[self].LOG.debug, parameter[binary_operation[constant[FIFO %r closed!] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b26ad090>]]]]] | keyword[def] identifier[walk] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_fifo] :
keyword[if] identifier[self] . identifier[_fifo] > literal[int] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[self] . identifier[_fifo] += literal[int]
keyword[with] identifier[open] ( identifier[self] . identifier[datapath] , literal[string] ) keyword[as] identifier[fifo] :
keyword[while] keyword[True] :
identifier[relpath] = identifier[fifo] . identifier[readline] (). identifier[rstrip] ( literal[string] )
keyword[if] keyword[not] identifier[relpath] :
keyword[break]
identifier[self] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[relpath] ,))
keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[self] . identifier[datapath] ), identifier[relpath] )
identifier[self] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[self] . identifier[datapath] ,))
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[datapath] ):
keyword[for] identifier[dirpath] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[self] . identifier[datapath] ):
keyword[for] identifier[bad] keyword[in] identifier[dirnames] [:]:
keyword[if] identifier[any] ( identifier[fnmatch] . identifier[fnmatch] ( identifier[bad] , identifier[pattern] ) keyword[for] identifier[pattern] keyword[in] identifier[self] . identifier[ignore] ):
identifier[dirnames] . identifier[remove] ( identifier[bad] )
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
keyword[if] keyword[not] identifier[any] ( identifier[fnmatch] . identifier[fnmatch] ( identifier[filename] , identifier[pattern] ) keyword[for] identifier[pattern] keyword[in] identifier[self] . identifier[ignore] ):
keyword[yield] identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[filename] )
keyword[else] :
keyword[yield] identifier[self] . identifier[datapath] | def walk(self):
""" Generate paths in "self.datapath".
"""
# FIFO?
if self._fifo:
if self._fifo > 1:
raise RuntimeError('INTERNAL ERROR: FIFO read twice!') # depends on [control=['if'], data=[]]
self._fifo += 1
# Read paths relative to directory containing the FIFO
with open(self.datapath, 'r') as fifo:
while True:
relpath = fifo.readline().rstrip('\n')
if not relpath: # EOF?
break # depends on [control=['if'], data=[]]
self.LOG.debug('Read relative path %r from FIFO...' % (relpath,))
yield os.path.join(os.path.dirname(self.datapath), relpath) # depends on [control=['while'], data=[]] # depends on [control=['with'], data=['fifo']]
self.LOG.debug('FIFO %r closed!' % (self.datapath,)) # depends on [control=['if'], data=[]]
# Directory?
elif os.path.isdir(self.datapath):
# Walk the directory tree
for (dirpath, dirnames, filenames) in os.walk(self.datapath): #, followlinks=True):
# Don't scan blacklisted directories
for bad in dirnames[:]:
if any((fnmatch.fnmatch(bad, pattern) for pattern in self.ignore)):
dirnames.remove(bad) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['bad']]
# Yield all filenames that aren't blacklisted
for filename in filenames:
if not any((fnmatch.fnmatch(filename, pattern) for pattern in self.ignore)):
#yield os.path.join(dirpath[len(self.datapath)+1:], filename)
yield os.path.join(dirpath, filename) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# Single file
# Yield the filename
yield self.datapath |
def get_scenario_data(scenario_id,**kwargs):
"""
Get all the datasets from the group with the specified name
@returns a list of dictionaries
"""
user_id = kwargs.get('user_id')
scenario_data = db.DBSession.query(Dataset).filter(Dataset.id==ResourceScenario.dataset_id, ResourceScenario.scenario_id==scenario_id).options(joinedload_all('metadata')).distinct().all()
for sd in scenario_data:
if sd.hidden == 'Y':
try:
sd.check_read_permission(user_id)
except:
sd.value = None
sd.metadata = []
db.DBSession.expunge_all()
log.info("Retrieved %s datasets", len(scenario_data))
return scenario_data | def function[get_scenario_data, parameter[scenario_id]]:
constant[
Get all the datasets from the group with the specified name
@returns a list of dictionaries
]
variable[user_id] assign[=] call[name[kwargs].get, parameter[constant[user_id]]]
variable[scenario_data] assign[=] call[call[call[call[call[name[db].DBSession.query, parameter[name[Dataset]]].filter, parameter[compare[name[Dataset].id equal[==] name[ResourceScenario].dataset_id], compare[name[ResourceScenario].scenario_id equal[==] name[scenario_id]]]].options, parameter[call[name[joinedload_all], parameter[constant[metadata]]]]].distinct, parameter[]].all, parameter[]]
for taget[name[sd]] in starred[name[scenario_data]] begin[:]
if compare[name[sd].hidden equal[==] constant[Y]] begin[:]
<ast.Try object at 0x7da18bcc8700>
call[name[db].DBSession.expunge_all, parameter[]]
call[name[log].info, parameter[constant[Retrieved %s datasets], call[name[len], parameter[name[scenario_data]]]]]
return[name[scenario_data]] | keyword[def] identifier[get_scenario_data] ( identifier[scenario_id] ,** identifier[kwargs] ):
literal[string]
identifier[user_id] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[scenario_data] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[Dataset] ). identifier[filter] ( identifier[Dataset] . identifier[id] == identifier[ResourceScenario] . identifier[dataset_id] , identifier[ResourceScenario] . identifier[scenario_id] == identifier[scenario_id] ). identifier[options] ( identifier[joinedload_all] ( literal[string] )). identifier[distinct] (). identifier[all] ()
keyword[for] identifier[sd] keyword[in] identifier[scenario_data] :
keyword[if] identifier[sd] . identifier[hidden] == literal[string] :
keyword[try] :
identifier[sd] . identifier[check_read_permission] ( identifier[user_id] )
keyword[except] :
identifier[sd] . identifier[value] = keyword[None]
identifier[sd] . identifier[metadata] =[]
identifier[db] . identifier[DBSession] . identifier[expunge_all] ()
identifier[log] . identifier[info] ( literal[string] , identifier[len] ( identifier[scenario_data] ))
keyword[return] identifier[scenario_data] | def get_scenario_data(scenario_id, **kwargs):
"""
Get all the datasets from the group with the specified name
@returns a list of dictionaries
"""
user_id = kwargs.get('user_id')
scenario_data = db.DBSession.query(Dataset).filter(Dataset.id == ResourceScenario.dataset_id, ResourceScenario.scenario_id == scenario_id).options(joinedload_all('metadata')).distinct().all()
for sd in scenario_data:
if sd.hidden == 'Y':
try:
sd.check_read_permission(user_id) # depends on [control=['try'], data=[]]
except:
sd.value = None
sd.metadata = [] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sd']]
db.DBSession.expunge_all()
log.info('Retrieved %s datasets', len(scenario_data))
return scenario_data |
def add_plugin_arguments(self, parser):
"""Add plugin arguments to argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The main haas ArgumentParser.
"""
for manager in self.hook_managers.values():
if len(list(manager)) == 0:
continue
manager.map(self._add_hook_extension_arguments, parser)
for namespace, manager in self.driver_managers.items():
choices = list(sorted(manager.names()))
if len(choices) == 0:
continue
option, dest = self._namespace_to_option(namespace)
parser.add_argument(
option, help=self._help[namespace], dest=dest,
choices=choices, default='default')
option_prefix = '{0}-'.format(option)
dest_prefix = '{0}_'.format(dest)
manager.map(self._add_driver_extension_arguments,
parser, option_prefix, dest_prefix) | def function[add_plugin_arguments, parameter[self, parser]]:
constant[Add plugin arguments to argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The main haas ArgumentParser.
]
for taget[name[manager]] in starred[call[name[self].hook_managers.values, parameter[]]] begin[:]
if compare[call[name[len], parameter[call[name[list], parameter[name[manager]]]]] equal[==] constant[0]] begin[:]
continue
call[name[manager].map, parameter[name[self]._add_hook_extension_arguments, name[parser]]]
for taget[tuple[[<ast.Name object at 0x7da20e956e00>, <ast.Name object at 0x7da20e954430>]]] in starred[call[name[self].driver_managers.items, parameter[]]] begin[:]
variable[choices] assign[=] call[name[list], parameter[call[name[sorted], parameter[call[name[manager].names, parameter[]]]]]]
if compare[call[name[len], parameter[name[choices]]] equal[==] constant[0]] begin[:]
continue
<ast.Tuple object at 0x7da20e955690> assign[=] call[name[self]._namespace_to_option, parameter[name[namespace]]]
call[name[parser].add_argument, parameter[name[option]]]
variable[option_prefix] assign[=] call[constant[{0}-].format, parameter[name[option]]]
variable[dest_prefix] assign[=] call[constant[{0}_].format, parameter[name[dest]]]
call[name[manager].map, parameter[name[self]._add_driver_extension_arguments, name[parser], name[option_prefix], name[dest_prefix]]] | keyword[def] identifier[add_plugin_arguments] ( identifier[self] , identifier[parser] ):
literal[string]
keyword[for] identifier[manager] keyword[in] identifier[self] . identifier[hook_managers] . identifier[values] ():
keyword[if] identifier[len] ( identifier[list] ( identifier[manager] ))== literal[int] :
keyword[continue]
identifier[manager] . identifier[map] ( identifier[self] . identifier[_add_hook_extension_arguments] , identifier[parser] )
keyword[for] identifier[namespace] , identifier[manager] keyword[in] identifier[self] . identifier[driver_managers] . identifier[items] ():
identifier[choices] = identifier[list] ( identifier[sorted] ( identifier[manager] . identifier[names] ()))
keyword[if] identifier[len] ( identifier[choices] )== literal[int] :
keyword[continue]
identifier[option] , identifier[dest] = identifier[self] . identifier[_namespace_to_option] ( identifier[namespace] )
identifier[parser] . identifier[add_argument] (
identifier[option] , identifier[help] = identifier[self] . identifier[_help] [ identifier[namespace] ], identifier[dest] = identifier[dest] ,
identifier[choices] = identifier[choices] , identifier[default] = literal[string] )
identifier[option_prefix] = literal[string] . identifier[format] ( identifier[option] )
identifier[dest_prefix] = literal[string] . identifier[format] ( identifier[dest] )
identifier[manager] . identifier[map] ( identifier[self] . identifier[_add_driver_extension_arguments] ,
identifier[parser] , identifier[option_prefix] , identifier[dest_prefix] ) | def add_plugin_arguments(self, parser):
"""Add plugin arguments to argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The main haas ArgumentParser.
"""
for manager in self.hook_managers.values():
if len(list(manager)) == 0:
continue # depends on [control=['if'], data=[]]
manager.map(self._add_hook_extension_arguments, parser) # depends on [control=['for'], data=['manager']]
for (namespace, manager) in self.driver_managers.items():
choices = list(sorted(manager.names()))
if len(choices) == 0:
continue # depends on [control=['if'], data=[]]
(option, dest) = self._namespace_to_option(namespace)
parser.add_argument(option, help=self._help[namespace], dest=dest, choices=choices, default='default')
option_prefix = '{0}-'.format(option)
dest_prefix = '{0}_'.format(dest)
manager.map(self._add_driver_extension_arguments, parser, option_prefix, dest_prefix) # depends on [control=['for'], data=[]] |
def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
if self.current:
self.inactive = 0
callback, args, kwds = self.current.popleft()
_logging_debug('nowevent: %s', callback.__name__)
callback(*args, **kwds)
return 0
if self.run_idle():
return 0
delay = None
if self.queue:
delay = self.queue[0][0] - self.clock.now()
if delay <= 0:
self.inactive = 0
_, callback, args, kwds = self.queue.pop(0)
_logging_debug('event: %s', callback.__name__)
callback(*args, **kwds)
# TODO: What if it raises an exception?
return 0
if self.rpcs:
self.inactive = 0
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
_logging_debug('rpc: %s.%s', rpc.service, rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
if rpc not in self.rpcs:
raise RuntimeError('rpc %r was not given to wait_any as a choice %r' %
(rpc, self.rpcs))
callback, args, kwds = self.rpcs[rpc]
del self.rpcs[rpc]
if callback is not None:
callback(*args, **kwds)
# TODO: Again, what about exceptions?
return 0
return delay | def function[run0, parameter[self]]:
constant[Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
]
if name[self].current begin[:]
name[self].inactive assign[=] constant[0]
<ast.Tuple object at 0x7da18f09fd30> assign[=] call[name[self].current.popleft, parameter[]]
call[name[_logging_debug], parameter[constant[nowevent: %s], name[callback].__name__]]
call[name[callback], parameter[<ast.Starred object at 0x7da18f09cb50>]]
return[constant[0]]
if call[name[self].run_idle, parameter[]] begin[:]
return[constant[0]]
variable[delay] assign[=] constant[None]
if name[self].queue begin[:]
variable[delay] assign[=] binary_operation[call[call[name[self].queue][constant[0]]][constant[0]] - call[name[self].clock.now, parameter[]]]
if compare[name[delay] less_or_equal[<=] constant[0]] begin[:]
name[self].inactive assign[=] constant[0]
<ast.Tuple object at 0x7da1b2344be0> assign[=] call[name[self].queue.pop, parameter[constant[0]]]
call[name[_logging_debug], parameter[constant[event: %s], name[callback].__name__]]
call[name[callback], parameter[<ast.Starred object at 0x7da1b2346ad0>]]
return[constant[0]]
if name[self].rpcs begin[:]
name[self].inactive assign[=] constant[0]
variable[rpc] assign[=] call[name[datastore_rpc].MultiRpc.wait_any, parameter[name[self].rpcs]]
if compare[name[rpc] is_not constant[None]] begin[:]
call[name[_logging_debug], parameter[constant[rpc: %s.%s], name[rpc].service, name[rpc].method]]
if compare[name[rpc] <ast.NotIn object at 0x7da2590d7190> name[self].rpcs] begin[:]
<ast.Raise object at 0x7da1b2346dd0>
<ast.Tuple object at 0x7da1b2345de0> assign[=] call[name[self].rpcs][name[rpc]]
<ast.Delete object at 0x7da1b2346800>
if compare[name[callback] is_not constant[None]] begin[:]
call[name[callback], parameter[<ast.Starred object at 0x7da1b2347640>]]
return[constant[0]]
return[name[delay]] | keyword[def] identifier[run0] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[current] :
identifier[self] . identifier[inactive] = literal[int]
identifier[callback] , identifier[args] , identifier[kwds] = identifier[self] . identifier[current] . identifier[popleft] ()
identifier[_logging_debug] ( literal[string] , identifier[callback] . identifier[__name__] )
identifier[callback] (* identifier[args] ,** identifier[kwds] )
keyword[return] literal[int]
keyword[if] identifier[self] . identifier[run_idle] ():
keyword[return] literal[int]
identifier[delay] = keyword[None]
keyword[if] identifier[self] . identifier[queue] :
identifier[delay] = identifier[self] . identifier[queue] [ literal[int] ][ literal[int] ]- identifier[self] . identifier[clock] . identifier[now] ()
keyword[if] identifier[delay] <= literal[int] :
identifier[self] . identifier[inactive] = literal[int]
identifier[_] , identifier[callback] , identifier[args] , identifier[kwds] = identifier[self] . identifier[queue] . identifier[pop] ( literal[int] )
identifier[_logging_debug] ( literal[string] , identifier[callback] . identifier[__name__] )
identifier[callback] (* identifier[args] ,** identifier[kwds] )
keyword[return] literal[int]
keyword[if] identifier[self] . identifier[rpcs] :
identifier[self] . identifier[inactive] = literal[int]
identifier[rpc] = identifier[datastore_rpc] . identifier[MultiRpc] . identifier[wait_any] ( identifier[self] . identifier[rpcs] )
keyword[if] identifier[rpc] keyword[is] keyword[not] keyword[None] :
identifier[_logging_debug] ( literal[string] , identifier[rpc] . identifier[service] , identifier[rpc] . identifier[method] )
keyword[if] identifier[rpc] keyword[not] keyword[in] identifier[self] . identifier[rpcs] :
keyword[raise] identifier[RuntimeError] ( literal[string] %
( identifier[rpc] , identifier[self] . identifier[rpcs] ))
identifier[callback] , identifier[args] , identifier[kwds] = identifier[self] . identifier[rpcs] [ identifier[rpc] ]
keyword[del] identifier[self] . identifier[rpcs] [ identifier[rpc] ]
keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] :
identifier[callback] (* identifier[args] ,** identifier[kwds] )
keyword[return] literal[int]
keyword[return] identifier[delay] | def run0(self):
"""Run one item (a callback or an RPC wait_any).
Returns:
A time to sleep if something happened (may be 0);
None if all queues are empty.
"""
if self.current:
self.inactive = 0
(callback, args, kwds) = self.current.popleft()
_logging_debug('nowevent: %s', callback.__name__)
callback(*args, **kwds)
return 0 # depends on [control=['if'], data=[]]
if self.run_idle():
return 0 # depends on [control=['if'], data=[]]
delay = None
if self.queue:
delay = self.queue[0][0] - self.clock.now()
if delay <= 0:
self.inactive = 0
(_, callback, args, kwds) = self.queue.pop(0)
_logging_debug('event: %s', callback.__name__)
callback(*args, **kwds)
# TODO: What if it raises an exception?
return 0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.rpcs:
self.inactive = 0
rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs)
if rpc is not None:
_logging_debug('rpc: %s.%s', rpc.service, rpc.method)
# Yes, wait_any() may return None even for a non-empty argument.
# But no, it won't ever return an RPC not in its argument.
if rpc not in self.rpcs:
raise RuntimeError('rpc %r was not given to wait_any as a choice %r' % (rpc, self.rpcs)) # depends on [control=['if'], data=['rpc']]
(callback, args, kwds) = self.rpcs[rpc]
del self.rpcs[rpc]
if callback is not None:
callback(*args, **kwds) # depends on [control=['if'], data=['callback']] # depends on [control=['if'], data=['rpc']]
# TODO: Again, what about exceptions?
return 0 # depends on [control=['if'], data=[]]
return delay |
def get_private_and_public(username, password_verifier, private, preset):
"""Print out server public and private."""
session = SRPServerSession(
SRPContext(username, prime=preset[0], generator=preset[1]),
hex_from_b64(password_verifier), private=private)
click.secho('Server private: %s' % session.private_b64)
click.secho('Server public: %s' % session.public_b64) | def function[get_private_and_public, parameter[username, password_verifier, private, preset]]:
constant[Print out server public and private.]
variable[session] assign[=] call[name[SRPServerSession], parameter[call[name[SRPContext], parameter[name[username]]], call[name[hex_from_b64], parameter[name[password_verifier]]]]]
call[name[click].secho, parameter[binary_operation[constant[Server private: %s] <ast.Mod object at 0x7da2590d6920> name[session].private_b64]]]
call[name[click].secho, parameter[binary_operation[constant[Server public: %s] <ast.Mod object at 0x7da2590d6920> name[session].public_b64]]] | keyword[def] identifier[get_private_and_public] ( identifier[username] , identifier[password_verifier] , identifier[private] , identifier[preset] ):
literal[string]
identifier[session] = identifier[SRPServerSession] (
identifier[SRPContext] ( identifier[username] , identifier[prime] = identifier[preset] [ literal[int] ], identifier[generator] = identifier[preset] [ literal[int] ]),
identifier[hex_from_b64] ( identifier[password_verifier] ), identifier[private] = identifier[private] )
identifier[click] . identifier[secho] ( literal[string] % identifier[session] . identifier[private_b64] )
identifier[click] . identifier[secho] ( literal[string] % identifier[session] . identifier[public_b64] ) | def get_private_and_public(username, password_verifier, private, preset):
"""Print out server public and private."""
session = SRPServerSession(SRPContext(username, prime=preset[0], generator=preset[1]), hex_from_b64(password_verifier), private=private)
click.secho('Server private: %s' % session.private_b64)
click.secho('Server public: %s' % session.public_b64) |
async def send_offnetwork_invitation(
self, send_offnetwork_invitation_request
):
"""Send an email to invite a non-Google contact to Hangouts."""
response = hangouts_pb2.SendOffnetworkInvitationResponse()
await self._pb_request('devices/sendoffnetworkinvitation',
send_offnetwork_invitation_request,
response)
return response | <ast.AsyncFunctionDef object at 0x7da207f98d90> | keyword[async] keyword[def] identifier[send_offnetwork_invitation] (
identifier[self] , identifier[send_offnetwork_invitation_request]
):
literal[string]
identifier[response] = identifier[hangouts_pb2] . identifier[SendOffnetworkInvitationResponse] ()
keyword[await] identifier[self] . identifier[_pb_request] ( literal[string] ,
identifier[send_offnetwork_invitation_request] ,
identifier[response] )
keyword[return] identifier[response] | async def send_offnetwork_invitation(self, send_offnetwork_invitation_request):
"""Send an email to invite a non-Google contact to Hangouts."""
response = hangouts_pb2.SendOffnetworkInvitationResponse()
await self._pb_request('devices/sendoffnetworkinvitation', send_offnetwork_invitation_request, response)
return response |
def read_named_csv(name, data_path=DATA_PATH, nrows=None, verbose=True):
""" Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame
TODO: should be called read_named_dataset
Args:
`name` is assumed not to have an extension (like ".csv"), alternative extensions are tried automatically.file
"""
if os.path.isfile(name):
try:
return read_json(name)
except (IOError, UnicodeDecodeError, json.JSONDecodeError):
pass
try:
return read_csv(name, nrows=nrows)
except (IOError, pd.errors.ParserError):
pass
try:
return read_txt(name, nrows=nrows)
except (IOError, UnicodeDecodeError):
pass
data_path = expand_filepath(data_path)
if os.path.isfile(os.path.join(data_path, name)):
return read_csv(os.path.join(data_path, name), nrows=nrows)
if name in DATASET_NAME2FILENAME:
name = DATASET_NAME2FILENAME[name]
if name.lower().endswith('.txt') or name.lower().endswith('.txt.gz'):
return read_text(os.path.join(data_path, name), nrows=nrows)
else:
return read_csv(os.path.join(data_path, name), nrows=nrows)
try:
return read_csv(os.path.join(data_path, name + '.csv.gz'), nrows=nrows)
except IOError:
pass
try:
return read_csv(os.path.join(data_path, name + '.csv'), nrows=nrows)
except IOError:
pass
try:
return read_json(os.path.join(data_path, name + '.json'))
except IOError:
pass
try:
return read_txt(os.path.join(data_path, name + '.txt'), verbose=verbose)
except IOError:
pass
# FIXME: mapping from short name to uncompressed filename
# BIGDATA files are usually not loadable into dataframes
try:
return KeyedVectors.load_word2vec_format(os.path.join(BIGDATA_PATH, name + '.bin.gz'), binary=True)
except IOError:
pass
except ValueError:
pass
try:
return read_txt(os.path.join(BIGDATA_PATH, name + '.txt'), verbose=verbose)
except IOError:
pass | def function[read_named_csv, parameter[name, data_path, nrows, verbose]]:
constant[ Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame
TODO: should be called read_named_dataset
Args:
`name` is assumed not to have an extension (like ".csv"), alternative extensions are tried automatically.file
]
if call[name[os].path.isfile, parameter[name[name]]] begin[:]
<ast.Try object at 0x7da207f9a0b0>
<ast.Try object at 0x7da207f9a590>
<ast.Try object at 0x7da207f98070>
variable[data_path] assign[=] call[name[expand_filepath], parameter[name[data_path]]]
if call[name[os].path.isfile, parameter[call[name[os].path.join, parameter[name[data_path], name[name]]]]] begin[:]
return[call[name[read_csv], parameter[call[name[os].path.join, parameter[name[data_path], name[name]]]]]]
if compare[name[name] in name[DATASET_NAME2FILENAME]] begin[:]
variable[name] assign[=] call[name[DATASET_NAME2FILENAME]][name[name]]
if <ast.BoolOp object at 0x7da207f99ba0> begin[:]
return[call[name[read_text], parameter[call[name[os].path.join, parameter[name[data_path], name[name]]]]]]
<ast.Try object at 0x7da207f9a650>
<ast.Try object at 0x7da207f9a500>
<ast.Try object at 0x7da207f981c0>
<ast.Try object at 0x7da2054a46a0>
<ast.Try object at 0x7da2054a57b0>
<ast.Try object at 0x7da2054a58a0> | keyword[def] identifier[read_named_csv] ( identifier[name] , identifier[data_path] = identifier[DATA_PATH] , identifier[nrows] = keyword[None] , identifier[verbose] = keyword[True] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[name] ):
keyword[try] :
keyword[return] identifier[read_json] ( identifier[name] )
keyword[except] ( identifier[IOError] , identifier[UnicodeDecodeError] , identifier[json] . identifier[JSONDecodeError] ):
keyword[pass]
keyword[try] :
keyword[return] identifier[read_csv] ( identifier[name] , identifier[nrows] = identifier[nrows] )
keyword[except] ( identifier[IOError] , identifier[pd] . identifier[errors] . identifier[ParserError] ):
keyword[pass]
keyword[try] :
keyword[return] identifier[read_txt] ( identifier[name] , identifier[nrows] = identifier[nrows] )
keyword[except] ( identifier[IOError] , identifier[UnicodeDecodeError] ):
keyword[pass]
identifier[data_path] = identifier[expand_filepath] ( identifier[data_path] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] )):
keyword[return] identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] ), identifier[nrows] = identifier[nrows] )
keyword[if] identifier[name] keyword[in] identifier[DATASET_NAME2FILENAME] :
identifier[name] = identifier[DATASET_NAME2FILENAME] [ identifier[name] ]
keyword[if] identifier[name] . identifier[lower] (). identifier[endswith] ( literal[string] ) keyword[or] identifier[name] . identifier[lower] (). identifier[endswith] ( literal[string] ):
keyword[return] identifier[read_text] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] ), identifier[nrows] = identifier[nrows] )
keyword[else] :
keyword[return] identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] ), identifier[nrows] = identifier[nrows] )
keyword[try] :
keyword[return] identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] + literal[string] ), identifier[nrows] = identifier[nrows] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] + literal[string] ), identifier[nrows] = identifier[nrows] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[read_json] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] + literal[string] ))
keyword[except] identifier[IOError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[read_txt] ( identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[name] + literal[string] ), identifier[verbose] = identifier[verbose] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[KeyedVectors] . identifier[load_word2vec_format] ( identifier[os] . identifier[path] . identifier[join] ( identifier[BIGDATA_PATH] , identifier[name] + literal[string] ), identifier[binary] = keyword[True] )
keyword[except] identifier[IOError] :
keyword[pass]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[try] :
keyword[return] identifier[read_txt] ( identifier[os] . identifier[path] . identifier[join] ( identifier[BIGDATA_PATH] , identifier[name] + literal[string] ), identifier[verbose] = identifier[verbose] )
keyword[except] identifier[IOError] :
keyword[pass] | def read_named_csv(name, data_path=DATA_PATH, nrows=None, verbose=True):
""" Convert a dataset in a local file (usually a CSV) into a Pandas DataFrame
TODO: should be called read_named_dataset
Args:
`name` is assumed not to have an extension (like ".csv"), alternative extensions are tried automatically.file
"""
if os.path.isfile(name):
try:
return read_json(name) # depends on [control=['try'], data=[]]
except (IOError, UnicodeDecodeError, json.JSONDecodeError):
pass # depends on [control=['except'], data=[]]
try:
return read_csv(name, nrows=nrows) # depends on [control=['try'], data=[]]
except (IOError, pd.errors.ParserError):
pass # depends on [control=['except'], data=[]]
try:
return read_txt(name, nrows=nrows) # depends on [control=['try'], data=[]]
except (IOError, UnicodeDecodeError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
data_path = expand_filepath(data_path)
if os.path.isfile(os.path.join(data_path, name)):
return read_csv(os.path.join(data_path, name), nrows=nrows) # depends on [control=['if'], data=[]]
if name in DATASET_NAME2FILENAME:
name = DATASET_NAME2FILENAME[name]
if name.lower().endswith('.txt') or name.lower().endswith('.txt.gz'):
return read_text(os.path.join(data_path, name), nrows=nrows) # depends on [control=['if'], data=[]]
else:
return read_csv(os.path.join(data_path, name), nrows=nrows) # depends on [control=['if'], data=['name', 'DATASET_NAME2FILENAME']]
try:
return read_csv(os.path.join(data_path, name + '.csv.gz'), nrows=nrows) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
try:
return read_csv(os.path.join(data_path, name + '.csv'), nrows=nrows) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
try:
return read_json(os.path.join(data_path, name + '.json')) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
try:
return read_txt(os.path.join(data_path, name + '.txt'), verbose=verbose) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
# FIXME: mapping from short name to uncompressed filename
# BIGDATA files are usually not loadable into dataframes
try:
return KeyedVectors.load_word2vec_format(os.path.join(BIGDATA_PATH, name + '.bin.gz'), binary=True) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]]
try:
return read_txt(os.path.join(BIGDATA_PATH, name + '.txt'), verbose=verbose) # depends on [control=['try'], data=[]]
except IOError:
pass # depends on [control=['except'], data=[]] |
def entropy(self, t, structure=None):
"""
Vibrational entropy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Vibrational entropy
"""
if t == 0:
return 0
freqs = self._positive_frequencies
dens = self._positive_densities
coth = lambda x: 1.0 / np.tanh(x)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
s = np.trapz((wd2kt * coth(wd2kt) - np.log(2 * np.sinh(wd2kt))) * dens, x=freqs)
s *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
s /= formula_units
return s | def function[entropy, parameter[self, t, structure]]:
constant[
Vibrational entropy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Vibrational entropy
]
if compare[name[t] equal[==] constant[0]] begin[:]
return[constant[0]]
variable[freqs] assign[=] name[self]._positive_frequencies
variable[dens] assign[=] name[self]._positive_densities
variable[coth] assign[=] <ast.Lambda object at 0x7da18f811a20>
variable[wd2kt] assign[=] binary_operation[name[freqs] / binary_operation[binary_operation[constant[2] * name[BOLTZ_THZ_PER_K]] * name[t]]]
variable[s] assign[=] call[name[np].trapz, parameter[binary_operation[binary_operation[binary_operation[name[wd2kt] * call[name[coth], parameter[name[wd2kt]]]] - call[name[np].log, parameter[binary_operation[constant[2] * call[name[np].sinh, parameter[name[wd2kt]]]]]]] * name[dens]]]]
<ast.AugAssign object at 0x7da18f8110f0>
if name[structure] begin[:]
variable[formula_units] assign[=] binary_operation[name[structure].composition.num_atoms / name[structure].composition.reduced_composition.num_atoms]
<ast.AugAssign object at 0x7da18f810c70>
return[name[s]] | keyword[def] identifier[entropy] ( identifier[self] , identifier[t] , identifier[structure] = keyword[None] ):
literal[string]
keyword[if] identifier[t] == literal[int] :
keyword[return] literal[int]
identifier[freqs] = identifier[self] . identifier[_positive_frequencies]
identifier[dens] = identifier[self] . identifier[_positive_densities]
identifier[coth] = keyword[lambda] identifier[x] : literal[int] / identifier[np] . identifier[tanh] ( identifier[x] )
identifier[wd2kt] = identifier[freqs] /( literal[int] * identifier[BOLTZ_THZ_PER_K] * identifier[t] )
identifier[s] = identifier[np] . identifier[trapz] (( identifier[wd2kt] * identifier[coth] ( identifier[wd2kt] )- identifier[np] . identifier[log] ( literal[int] * identifier[np] . identifier[sinh] ( identifier[wd2kt] )))* identifier[dens] , identifier[x] = identifier[freqs] )
identifier[s] *= identifier[const] . identifier[Boltzmann] * identifier[const] . identifier[Avogadro]
keyword[if] identifier[structure] :
identifier[formula_units] = identifier[structure] . identifier[composition] . identifier[num_atoms] / identifier[structure] . identifier[composition] . identifier[reduced_composition] . identifier[num_atoms]
identifier[s] /= identifier[formula_units]
keyword[return] identifier[s] | def entropy(self, t, structure=None):
"""
Vibrational entropy at temperature T obtained from the integration of the DOS.
Only positive frequencies will be used.
Result in J/(K*mol-c). A mol-c is the abbreviation of a mole-cell, that is, the number
of Avogadro times the atoms in a unit cell. To compare with experimental data the result
should be divided by the number of unit formulas in the cell. If the structure is provided
the division is performed internally and the result is in J/(K*mol)
Args:
t: a temperature in K
structure: the structure of the system. If not None it will be used to determine the numer of
formula units
Returns:
Vibrational entropy
"""
if t == 0:
return 0 # depends on [control=['if'], data=[]]
freqs = self._positive_frequencies
dens = self._positive_densities
coth = lambda x: 1.0 / np.tanh(x)
wd2kt = freqs / (2 * BOLTZ_THZ_PER_K * t)
s = np.trapz((wd2kt * coth(wd2kt) - np.log(2 * np.sinh(wd2kt))) * dens, x=freqs)
s *= const.Boltzmann * const.Avogadro
if structure:
formula_units = structure.composition.num_atoms / structure.composition.reduced_composition.num_atoms
s /= formula_units # depends on [control=['if'], data=[]]
return s |
async def AddUser(self, users):
'''
users : typing.Sequence[~AddUser]
Returns -> typing.Sequence[~AddUserResult]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='UserManager',
request='AddUser',
version=2,
params=_params)
_params['users'] = users
reply = await self.rpc(msg)
return reply | <ast.AsyncFunctionDef object at 0x7da1b0dbe830> | keyword[async] keyword[def] identifier[AddUser] ( identifier[self] , identifier[users] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[users]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply] | async def AddUser(self, users):
"""
users : typing.Sequence[~AddUser]
Returns -> typing.Sequence[~AddUserResult]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='UserManager', request='AddUser', version=2, params=_params)
_params['users'] = users
reply = await self.rpc(msg)
return reply |
def get_xsi_type(element):
"""
returns the type of an element of the XML tree (incl. its namespace),
i.e. nodes, edges, layers etc.), raises an exception if the element has no
'xsi:type' attribute.
"""
nsdict = NAMESPACES
#.xpath() always returns a list, so we need to select the first element
try:
return element.xpath('@xsi:type', namespaces=nsdict)[0]
except IndexError: # xpath result is empty
raise ValueError("The '{0}' element has no 'xsi:type' but has these "
"attribs:\n{1}").format(element.tag, element.attrib) | def function[get_xsi_type, parameter[element]]:
constant[
returns the type of an element of the XML tree (incl. its namespace),
i.e. nodes, edges, layers etc.), raises an exception if the element has no
'xsi:type' attribute.
]
variable[nsdict] assign[=] name[NAMESPACES]
<ast.Try object at 0x7da20cabc7f0> | keyword[def] identifier[get_xsi_type] ( identifier[element] ):
literal[string]
identifier[nsdict] = identifier[NAMESPACES]
keyword[try] :
keyword[return] identifier[element] . identifier[xpath] ( literal[string] , identifier[namespaces] = identifier[nsdict] )[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] ). identifier[format] ( identifier[element] . identifier[tag] , identifier[element] . identifier[attrib] ) | def get_xsi_type(element):
"""
returns the type of an element of the XML tree (incl. its namespace),
i.e. nodes, edges, layers etc.), raises an exception if the element has no
'xsi:type' attribute.
"""
nsdict = NAMESPACES
#.xpath() always returns a list, so we need to select the first element
try:
return element.xpath('@xsi:type', namespaces=nsdict)[0] # depends on [control=['try'], data=[]]
except IndexError: # xpath result is empty
raise ValueError("The '{0}' element has no 'xsi:type' but has these attribs:\n{1}").format(element.tag, element.attrib) # depends on [control=['except'], data=[]] |
def _populate_common_request(self, request):
'''Populate the Request with common fields.'''
url_record = self._item_session.url_record
# Note that referrer may have already been set by the --referer option
if url_record.parent_url and not request.fields.get('Referer'):
self._add_referrer(request, url_record)
if self._fetch_rule.http_login:
request.username, request.password = self._fetch_rule.http_login | def function[_populate_common_request, parameter[self, request]]:
constant[Populate the Request with common fields.]
variable[url_record] assign[=] name[self]._item_session.url_record
if <ast.BoolOp object at 0x7da2054a7910> begin[:]
call[name[self]._add_referrer, parameter[name[request], name[url_record]]]
if name[self]._fetch_rule.http_login begin[:]
<ast.Tuple object at 0x7da2054a7580> assign[=] name[self]._fetch_rule.http_login | keyword[def] identifier[_populate_common_request] ( identifier[self] , identifier[request] ):
literal[string]
identifier[url_record] = identifier[self] . identifier[_item_session] . identifier[url_record]
keyword[if] identifier[url_record] . identifier[parent_url] keyword[and] keyword[not] identifier[request] . identifier[fields] . identifier[get] ( literal[string] ):
identifier[self] . identifier[_add_referrer] ( identifier[request] , identifier[url_record] )
keyword[if] identifier[self] . identifier[_fetch_rule] . identifier[http_login] :
identifier[request] . identifier[username] , identifier[request] . identifier[password] = identifier[self] . identifier[_fetch_rule] . identifier[http_login] | def _populate_common_request(self, request):
"""Populate the Request with common fields."""
url_record = self._item_session.url_record
# Note that referrer may have already been set by the --referer option
if url_record.parent_url and (not request.fields.get('Referer')):
self._add_referrer(request, url_record) # depends on [control=['if'], data=[]]
if self._fetch_rule.http_login:
(request.username, request.password) = self._fetch_rule.http_login # depends on [control=['if'], data=[]] |
def _create_tunnels(self):
"""
Create SSH tunnels on top of a transport to the remote gateway
"""
if not self.is_active:
try:
self._connect_to_gateway()
except socket.gaierror: # raised by paramiko.Transport
msg = 'Could not resolve IP address for {0}, aborting!' \
.format(self.ssh_host)
self.logger.error(msg)
return
except (paramiko.SSHException, socket.error) as e:
template = 'Could not connect to gateway {0}:{1} : {2}'
msg = template.format(self.ssh_host, self.ssh_port, e.args[0])
self.logger.error(msg)
return
for (rem, loc) in zip(self._remote_binds, self._local_binds):
try:
self._make_ssh_forward_server(rem, loc)
except BaseSSHTunnelForwarderError as e:
msg = 'Problem setting SSH Forwarder up: {0}'.format(e.value)
self.logger.error(msg) | def function[_create_tunnels, parameter[self]]:
constant[
Create SSH tunnels on top of a transport to the remote gateway
]
if <ast.UnaryOp object at 0x7da1b1393880> begin[:]
<ast.Try object at 0x7da1b13905b0>
for taget[tuple[[<ast.Name object at 0x7da1b13b5db0>, <ast.Name object at 0x7da1b13b4c10>]]] in starred[call[name[zip], parameter[name[self]._remote_binds, name[self]._local_binds]]] begin[:]
<ast.Try object at 0x7da1b13b75b0> | keyword[def] identifier[_create_tunnels] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_active] :
keyword[try] :
identifier[self] . identifier[_connect_to_gateway] ()
keyword[except] identifier[socket] . identifier[gaierror] :
identifier[msg] = literal[string] . identifier[format] ( identifier[self] . identifier[ssh_host] )
identifier[self] . identifier[logger] . identifier[error] ( identifier[msg] )
keyword[return]
keyword[except] ( identifier[paramiko] . identifier[SSHException] , identifier[socket] . identifier[error] ) keyword[as] identifier[e] :
identifier[template] = literal[string]
identifier[msg] = identifier[template] . identifier[format] ( identifier[self] . identifier[ssh_host] , identifier[self] . identifier[ssh_port] , identifier[e] . identifier[args] [ literal[int] ])
identifier[self] . identifier[logger] . identifier[error] ( identifier[msg] )
keyword[return]
keyword[for] ( identifier[rem] , identifier[loc] ) keyword[in] identifier[zip] ( identifier[self] . identifier[_remote_binds] , identifier[self] . identifier[_local_binds] ):
keyword[try] :
identifier[self] . identifier[_make_ssh_forward_server] ( identifier[rem] , identifier[loc] )
keyword[except] identifier[BaseSSHTunnelForwarderError] keyword[as] identifier[e] :
identifier[msg] = literal[string] . identifier[format] ( identifier[e] . identifier[value] )
identifier[self] . identifier[logger] . identifier[error] ( identifier[msg] ) | def _create_tunnels(self):
"""
Create SSH tunnels on top of a transport to the remote gateway
"""
if not self.is_active:
try:
self._connect_to_gateway() # depends on [control=['try'], data=[]]
except socket.gaierror: # raised by paramiko.Transport
msg = 'Could not resolve IP address for {0}, aborting!'.format(self.ssh_host)
self.logger.error(msg)
return # depends on [control=['except'], data=[]]
except (paramiko.SSHException, socket.error) as e:
template = 'Could not connect to gateway {0}:{1} : {2}'
msg = template.format(self.ssh_host, self.ssh_port, e.args[0])
self.logger.error(msg)
return # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
for (rem, loc) in zip(self._remote_binds, self._local_binds):
try:
self._make_ssh_forward_server(rem, loc) # depends on [control=['try'], data=[]]
except BaseSSHTunnelForwarderError as e:
msg = 'Problem setting SSH Forwarder up: {0}'.format(e.value)
self.logger.error(msg) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]] |
def write_default_config(self, filename):
"""Write the default config file.
"""
try:
with open(filename, 'wt') as file:
file.write(DEFAULT_CONFIG)
return True
except (IOError, OSError) as e:
print('Error writing %s: %s' % (filename, e.strerror or e), file=sys.stderr)
return False | def function[write_default_config, parameter[self, filename]]:
constant[Write the default config file.
]
<ast.Try object at 0x7da207f98910> | keyword[def] identifier[write_default_config] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[try] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[file] :
identifier[file] . identifier[write] ( identifier[DEFAULT_CONFIG] )
keyword[return] keyword[True]
keyword[except] ( identifier[IOError] , identifier[OSError] ) keyword[as] identifier[e] :
identifier[print] ( literal[string] %( identifier[filename] , identifier[e] . identifier[strerror] keyword[or] identifier[e] ), identifier[file] = identifier[sys] . identifier[stderr] )
keyword[return] keyword[False] | def write_default_config(self, filename):
"""Write the default config file.
"""
try:
with open(filename, 'wt') as file:
file.write(DEFAULT_CONFIG) # depends on [control=['with'], data=['file']]
return True # depends on [control=['try'], data=[]]
except (IOError, OSError) as e:
print('Error writing %s: %s' % (filename, e.strerror or e), file=sys.stderr)
return False # depends on [control=['except'], data=['e']] |
async def toggle(self):
"""Toggles between pause and resume command"""
self.logger.debug("toggle command")
if not self.state == 'ready':
return
if self.streamer is None:
return
try:
if self.streamer.is_playing():
await self.pause()
else:
await self.resume()
except Exception as e:
logger.error(e)
pass | <ast.AsyncFunctionDef object at 0x7da1b198bb20> | keyword[async] keyword[def] identifier[toggle] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[state] == literal[string] :
keyword[return]
keyword[if] identifier[self] . identifier[streamer] keyword[is] keyword[None] :
keyword[return]
keyword[try] :
keyword[if] identifier[self] . identifier[streamer] . identifier[is_playing] ():
keyword[await] identifier[self] . identifier[pause] ()
keyword[else] :
keyword[await] identifier[self] . identifier[resume] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( identifier[e] )
keyword[pass] | async def toggle(self):
"""Toggles between pause and resume command"""
self.logger.debug('toggle command')
if not self.state == 'ready':
return # depends on [control=['if'], data=[]]
if self.streamer is None:
return # depends on [control=['if'], data=[]]
try:
if self.streamer.is_playing():
await self.pause() # depends on [control=['if'], data=[]]
else:
await self.resume() # depends on [control=['try'], data=[]]
except Exception as e:
logger.error(e)
pass # depends on [control=['except'], data=['e']] |
def getxattr(self, req, ino, name, size):
""" Set an extended attribute
Valid replies:
reply_buf
reply_data
reply_xattr
reply_err
"""
self.reply_err(req, errno.ENOSYS) | def function[getxattr, parameter[self, req, ino, name, size]]:
constant[ Set an extended attribute
Valid replies:
reply_buf
reply_data
reply_xattr
reply_err
]
call[name[self].reply_err, parameter[name[req], name[errno].ENOSYS]] | keyword[def] identifier[getxattr] ( identifier[self] , identifier[req] , identifier[ino] , identifier[name] , identifier[size] ):
literal[string]
identifier[self] . identifier[reply_err] ( identifier[req] , identifier[errno] . identifier[ENOSYS] ) | def getxattr(self, req, ino, name, size):
""" Set an extended attribute
Valid replies:
reply_buf
reply_data
reply_xattr
reply_err
"""
self.reply_err(req, errno.ENOSYS) |
def emit_toi_stats(toi_set, peripherals):
"""
Calculates new TOI stats and emits them via statsd.
"""
count_by_zoom = defaultdict(int)
total = 0
for coord_int in toi_set:
coord = coord_unmarshall_int(coord_int)
count_by_zoom[coord.zoom] += 1
total += 1
peripherals.stats.gauge('tiles-of-interest.count', total)
for zoom, count in count_by_zoom.items():
peripherals.stats.gauge(
'tiles-of-interest.by-zoom.z{:02d}'.format(zoom),
count
) | def function[emit_toi_stats, parameter[toi_set, peripherals]]:
constant[
Calculates new TOI stats and emits them via statsd.
]
variable[count_by_zoom] assign[=] call[name[defaultdict], parameter[name[int]]]
variable[total] assign[=] constant[0]
for taget[name[coord_int]] in starred[name[toi_set]] begin[:]
variable[coord] assign[=] call[name[coord_unmarshall_int], parameter[name[coord_int]]]
<ast.AugAssign object at 0x7da1b04a4eb0>
<ast.AugAssign object at 0x7da20c76c040>
call[name[peripherals].stats.gauge, parameter[constant[tiles-of-interest.count], name[total]]]
for taget[tuple[[<ast.Name object at 0x7da20c76fb20>, <ast.Name object at 0x7da20c76d600>]]] in starred[call[name[count_by_zoom].items, parameter[]]] begin[:]
call[name[peripherals].stats.gauge, parameter[call[constant[tiles-of-interest.by-zoom.z{:02d}].format, parameter[name[zoom]]], name[count]]] | keyword[def] identifier[emit_toi_stats] ( identifier[toi_set] , identifier[peripherals] ):
literal[string]
identifier[count_by_zoom] = identifier[defaultdict] ( identifier[int] )
identifier[total] = literal[int]
keyword[for] identifier[coord_int] keyword[in] identifier[toi_set] :
identifier[coord] = identifier[coord_unmarshall_int] ( identifier[coord_int] )
identifier[count_by_zoom] [ identifier[coord] . identifier[zoom] ]+= literal[int]
identifier[total] += literal[int]
identifier[peripherals] . identifier[stats] . identifier[gauge] ( literal[string] , identifier[total] )
keyword[for] identifier[zoom] , identifier[count] keyword[in] identifier[count_by_zoom] . identifier[items] ():
identifier[peripherals] . identifier[stats] . identifier[gauge] (
literal[string] . identifier[format] ( identifier[zoom] ),
identifier[count]
) | def emit_toi_stats(toi_set, peripherals):
"""
Calculates new TOI stats and emits them via statsd.
"""
count_by_zoom = defaultdict(int)
total = 0
for coord_int in toi_set:
coord = coord_unmarshall_int(coord_int)
count_by_zoom[coord.zoom] += 1
total += 1 # depends on [control=['for'], data=['coord_int']]
peripherals.stats.gauge('tiles-of-interest.count', total)
for (zoom, count) in count_by_zoom.items():
peripherals.stats.gauge('tiles-of-interest.by-zoom.z{:02d}'.format(zoom), count) # depends on [control=['for'], data=[]] |
def rename(self, newpath):
"Move folder to a new name, possibly a whole new path"
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)}
r = self.jfs.post(self.path,
extra_headers={'Content-Type':'application/octet-stream'},
params=params)
return r | def function[rename, parameter[self, newpath]]:
constant[Move folder to a new name, possibly a whole new path]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18f723ca0>], [<ast.BinOp object at 0x7da18f722f80>]]
variable[r] assign[=] call[name[self].jfs.post, parameter[name[self].path]]
return[name[r]] | keyword[def] identifier[rename] ( identifier[self] , identifier[newpath] ):
literal[string]
identifier[params] ={ literal[string] : literal[string] %( identifier[self] . identifier[jfs] . identifier[username] , identifier[newpath] )}
identifier[r] = identifier[self] . identifier[jfs] . identifier[post] ( identifier[self] . identifier[path] ,
identifier[extra_headers] ={ literal[string] : literal[string] },
identifier[params] = identifier[params] )
keyword[return] identifier[r] | def rename(self, newpath):
"""Move folder to a new name, possibly a whole new path"""
# POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder
#url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath)
params = {'mvDir': '/%s%s' % (self.jfs.username, newpath)}
r = self.jfs.post(self.path, extra_headers={'Content-Type': 'application/octet-stream'}, params=params)
return r |
def window(self, windowDuration, slideDuration=None):
"""
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer)
s = self._ssc._jduration(slideDuration)
return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer) | def function[window, parameter[self, windowDuration, slideDuration]]:
constant[
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
]
call[name[self]._validate_window_param, parameter[name[windowDuration], name[slideDuration]]]
variable[d] assign[=] call[name[self]._ssc._jduration, parameter[name[windowDuration]]]
if compare[name[slideDuration] is constant[None]] begin[:]
return[call[name[DStream], parameter[call[name[self]._jdstream.window, parameter[name[d]]], name[self]._ssc, name[self]._jrdd_deserializer]]]
variable[s] assign[=] call[name[self]._ssc._jduration, parameter[name[slideDuration]]]
return[call[name[DStream], parameter[call[name[self]._jdstream.window, parameter[name[d], name[s]]], name[self]._ssc, name[self]._jrdd_deserializer]]] | keyword[def] identifier[window] ( identifier[self] , identifier[windowDuration] , identifier[slideDuration] = keyword[None] ):
literal[string]
identifier[self] . identifier[_validate_window_param] ( identifier[windowDuration] , identifier[slideDuration] )
identifier[d] = identifier[self] . identifier[_ssc] . identifier[_jduration] ( identifier[windowDuration] )
keyword[if] identifier[slideDuration] keyword[is] keyword[None] :
keyword[return] identifier[DStream] ( identifier[self] . identifier[_jdstream] . identifier[window] ( identifier[d] ), identifier[self] . identifier[_ssc] , identifier[self] . identifier[_jrdd_deserializer] )
identifier[s] = identifier[self] . identifier[_ssc] . identifier[_jduration] ( identifier[slideDuration] )
keyword[return] identifier[DStream] ( identifier[self] . identifier[_jdstream] . identifier[window] ( identifier[d] , identifier[s] ), identifier[self] . identifier[_ssc] , identifier[self] . identifier[_jrdd_deserializer] ) | def window(self, windowDuration, slideDuration=None):
"""
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer) # depends on [control=['if'], data=[]]
s = self._ssc._jduration(slideDuration)
return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer) |
def random_new_from_seed(
seed: Hashable, algo: int = RNG_CMWC
) -> tcod.random.Random:
"""Return a new Random instance. Using the given ``seed`` and ``algo``.
Args:
seed (Hashable): The RNG seed. Should be a 32-bit integer, but any
hashable object is accepted.
algo (int): The random number algorithm to use.
Returns:
Random: A new Random instance using the given algorithm.
"""
return tcod.random.Random(algo, seed) | def function[random_new_from_seed, parameter[seed, algo]]:
constant[Return a new Random instance. Using the given ``seed`` and ``algo``.
Args:
seed (Hashable): The RNG seed. Should be a 32-bit integer, but any
hashable object is accepted.
algo (int): The random number algorithm to use.
Returns:
Random: A new Random instance using the given algorithm.
]
return[call[name[tcod].random.Random, parameter[name[algo], name[seed]]]] | keyword[def] identifier[random_new_from_seed] (
identifier[seed] : identifier[Hashable] , identifier[algo] : identifier[int] = identifier[RNG_CMWC]
)-> identifier[tcod] . identifier[random] . identifier[Random] :
literal[string]
keyword[return] identifier[tcod] . identifier[random] . identifier[Random] ( identifier[algo] , identifier[seed] ) | def random_new_from_seed(seed: Hashable, algo: int=RNG_CMWC) -> tcod.random.Random:
"""Return a new Random instance. Using the given ``seed`` and ``algo``.
Args:
seed (Hashable): The RNG seed. Should be a 32-bit integer, but any
hashable object is accepted.
algo (int): The random number algorithm to use.
Returns:
Random: A new Random instance using the given algorithm.
"""
return tcod.random.Random(algo, seed) |
def map_remove(self, key, mapkey, **kwargs):
"""
Remove an item from a map.
:param str key: The document ID
:param str mapkey: The key in the map
:param kwargs: See :meth:`mutate_in` for options
:raise: :exc:`IndexError` if the mapkey does not exist
:raise: :cb_exc:`NotFoundError` if the document does not exist.
.. Remove a map key-value pair:
cb.map_remove('a_map', 'some_key')
.. seealso:: :meth:`map_add`
"""
op = SD.remove(mapkey)
sdres = self.mutate_in(key, op, **kwargs)
return self._wrap_dsop(sdres) | def function[map_remove, parameter[self, key, mapkey]]:
constant[
Remove an item from a map.
:param str key: The document ID
:param str mapkey: The key in the map
:param kwargs: See :meth:`mutate_in` for options
:raise: :exc:`IndexError` if the mapkey does not exist
:raise: :cb_exc:`NotFoundError` if the document does not exist.
.. Remove a map key-value pair:
cb.map_remove('a_map', 'some_key')
.. seealso:: :meth:`map_add`
]
variable[op] assign[=] call[name[SD].remove, parameter[name[mapkey]]]
variable[sdres] assign[=] call[name[self].mutate_in, parameter[name[key], name[op]]]
return[call[name[self]._wrap_dsop, parameter[name[sdres]]]] | keyword[def] identifier[map_remove] ( identifier[self] , identifier[key] , identifier[mapkey] ,** identifier[kwargs] ):
literal[string]
identifier[op] = identifier[SD] . identifier[remove] ( identifier[mapkey] )
identifier[sdres] = identifier[self] . identifier[mutate_in] ( identifier[key] , identifier[op] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[_wrap_dsop] ( identifier[sdres] ) | def map_remove(self, key, mapkey, **kwargs):
"""
Remove an item from a map.
:param str key: The document ID
:param str mapkey: The key in the map
:param kwargs: See :meth:`mutate_in` for options
:raise: :exc:`IndexError` if the mapkey does not exist
:raise: :cb_exc:`NotFoundError` if the document does not exist.
.. Remove a map key-value pair:
cb.map_remove('a_map', 'some_key')
.. seealso:: :meth:`map_add`
"""
op = SD.remove(mapkey)
sdres = self.mutate_in(key, op, **kwargs)
return self._wrap_dsop(sdres) |
def find(obj, prs, forced_type=None, cls=anyconfig.models.processor.Processor):
"""
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or :class:`anyconfig.models.processor.Processor` class object or
its instance itself
:param cls: A class object to compare with 'forced_type' later
:return: an instance of processor class to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
if forced_type is not None:
processor = maybe_processor(forced_type, cls=cls)
if processor is not None:
return processor
pclss = findall(obj, prs, forced_type=forced_type, cls=cls)
return pclss[0]() | def function[find, parameter[obj, prs, forced_type, cls]]:
constant[
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or :class:`anyconfig.models.processor.Processor` class object or
its instance itself
:param cls: A class object to compare with 'forced_type' later
:return: an instance of processor class to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
]
if compare[name[forced_type] is_not constant[None]] begin[:]
variable[processor] assign[=] call[name[maybe_processor], parameter[name[forced_type]]]
if compare[name[processor] is_not constant[None]] begin[:]
return[name[processor]]
variable[pclss] assign[=] call[name[findall], parameter[name[obj], name[prs]]]
return[call[call[name[pclss]][constant[0]], parameter[]]] | keyword[def] identifier[find] ( identifier[obj] , identifier[prs] , identifier[forced_type] = keyword[None] , identifier[cls] = identifier[anyconfig] . identifier[models] . identifier[processor] . identifier[Processor] ):
literal[string]
keyword[if] identifier[forced_type] keyword[is] keyword[not] keyword[None] :
identifier[processor] = identifier[maybe_processor] ( identifier[forced_type] , identifier[cls] = identifier[cls] )
keyword[if] identifier[processor] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[processor]
identifier[pclss] = identifier[findall] ( identifier[obj] , identifier[prs] , identifier[forced_type] = identifier[forced_type] , identifier[cls] = identifier[cls] )
keyword[return] identifier[pclss] [ literal[int] ]() | def find(obj, prs, forced_type=None, cls=anyconfig.models.processor.Processor):
"""
:param obj:
a file path, file, file-like object, pathlib.Path object or an
'anyconfig.globals.IOInfo' (namedtuple) object
:param prs: A list of :class:`anyconfig.models.processor.Processor` classes
:param forced_type:
Forced processor type of the data to process or ID of the processor
class or :class:`anyconfig.models.processor.Processor` class object or
its instance itself
:param cls: A class object to compare with 'forced_type' later
:return: an instance of processor class to process 'obj' data
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
if forced_type is not None:
processor = maybe_processor(forced_type, cls=cls)
if processor is not None:
return processor # depends on [control=['if'], data=['processor']] # depends on [control=['if'], data=['forced_type']]
pclss = findall(obj, prs, forced_type=forced_type, cls=cls)
return pclss[0]() |
def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError(
"@gen.engine functions cannot return values: %r" %
(future.result(),))
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper | def function[engine, parameter[func]]:
constant[Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
]
variable[func] assign[=] call[name[_make_coroutine_wrapper], parameter[name[func]]]
def function[wrapper, parameter[]]:
variable[future] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da1b1b14c10>]]
def function[final_callback, parameter[future]]:
if compare[call[name[future].result, parameter[]] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1b17eb0>
call[name[future].add_done_callback, parameter[call[name[stack_context].wrap, parameter[name[final_callback]]]]]
return[name[wrapper]] | keyword[def] identifier[engine] ( identifier[func] ):
literal[string]
identifier[func] = identifier[_make_coroutine_wrapper] ( identifier[func] , identifier[replace_callback] = keyword[False] )
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[future] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[def] identifier[final_callback] ( identifier[future] ):
keyword[if] identifier[future] . identifier[result] () keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[ReturnValueIgnoredError] (
literal[string] %
( identifier[future] . identifier[result] (),))
identifier[future] . identifier[add_done_callback] ( identifier[stack_context] . identifier[wrap] ( identifier[final_callback] ))
keyword[return] identifier[wrapper] | def engine(func):
"""Callback-oriented decorator for asynchronous generators.
This is an older interface; for new code that does not need to be
compatible with versions of Tornado older than 3.0 the
`coroutine` decorator is recommended instead.
This decorator is similar to `coroutine`, except it does not
return a `.Future` and the ``callback`` argument is not treated
specially.
In most cases, functions decorated with `engine` should take
a ``callback`` argument and invoke it with their result when
they are finished. One notable exception is the
`~tornado.web.RequestHandler` :ref:`HTTP verb methods <verbs>`,
which use ``self.finish()`` in place of a callback argument.
"""
func = _make_coroutine_wrapper(func, replace_callback=False)
@functools.wraps(func)
def wrapper(*args, **kwargs):
future = func(*args, **kwargs)
def final_callback(future):
if future.result() is not None:
raise ReturnValueIgnoredError('@gen.engine functions cannot return values: %r' % (future.result(),)) # depends on [control=['if'], data=[]]
# The engine interface doesn't give us any way to return
# errors but to raise them into the stack context.
# Save the stack context here to use when the Future has resolved.
future.add_done_callback(stack_context.wrap(final_callback))
return wrapper |
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module)
return matches | def function[detect_django_settings, parameter[]]:
constant[
Automatically try to discover Django settings files,
return them as relative module paths.
]
variable[matches] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b21eeb60>, <ast.Name object at 0x7da1b21edf30>, <ast.Name object at 0x7da1b21ee5f0>]]] in starred[call[name[os].walk, parameter[call[name[os].getcwd, parameter[]]]]] begin[:]
for taget[name[filename]] in starred[call[name[fnmatch].filter, parameter[name[filenames], constant[*settings.py]]]] begin[:]
variable[full] assign[=] call[name[os].path.join, parameter[name[root], name[filename]]]
if compare[constant[site-packages] in name[full]] begin[:]
continue
variable[full] assign[=] call[name[os].path.join, parameter[name[root], name[filename]]]
variable[package_path] assign[=] call[name[full].replace, parameter[call[name[os].getcwd, parameter[]], constant[]]]
variable[package_module] assign[=] call[call[call[call[name[package_path].replace, parameter[name[os].sep, constant[.]]].split, parameter[constant[.], constant[1]]]][constant[1]].replace, parameter[constant[.py], constant[]]]
call[name[matches].append, parameter[name[package_module]]]
return[name[matches]] | keyword[def] identifier[detect_django_settings] ():
literal[string]
identifier[matches] =[]
keyword[for] identifier[root] , identifier[dirnames] , identifier[filenames] keyword[in] identifier[os] . identifier[walk] ( identifier[os] . identifier[getcwd] ()):
keyword[for] identifier[filename] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[filenames] , literal[string] ):
identifier[full] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] )
keyword[if] literal[string] keyword[in] identifier[full] :
keyword[continue]
identifier[full] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] )
identifier[package_path] = identifier[full] . identifier[replace] ( identifier[os] . identifier[getcwd] (), literal[string] )
identifier[package_module] = identifier[package_path] . identifier[replace] ( identifier[os] . identifier[sep] , literal[string] ). identifier[split] ( literal[string] , literal[int] )[ literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
identifier[matches] . identifier[append] ( identifier[package_module] )
keyword[return] identifier[matches] | def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for (root, dirnames, filenames) in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue # depends on [control=['if'], data=[]]
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module) # depends on [control=['for'], data=['filename']] # depends on [control=['for'], data=[]]
return matches |
def get_axes(x, y):
"""
computes the axis x and y of a given 2d grid
:param x:
:param y:
:return:
"""
n=int(np.sqrt(len(x)))
if n**2 != len(x):
raise ValueError("lenght of input array given as %s is not square of integer number!" % (len(x)))
x_image = x.reshape(n,n)
y_image = y.reshape(n,n)
x_axes = x_image[0,:]
y_axes = y_image[:,0]
return x_axes, y_axes | def function[get_axes, parameter[x, y]]:
constant[
computes the axis x and y of a given 2d grid
:param x:
:param y:
:return:
]
variable[n] assign[=] call[name[int], parameter[call[name[np].sqrt, parameter[call[name[len], parameter[name[x]]]]]]]
if compare[binary_operation[name[n] ** constant[2]] not_equal[!=] call[name[len], parameter[name[x]]]] begin[:]
<ast.Raise object at 0x7da2054a4e50>
variable[x_image] assign[=] call[name[x].reshape, parameter[name[n], name[n]]]
variable[y_image] assign[=] call[name[y].reshape, parameter[name[n], name[n]]]
variable[x_axes] assign[=] call[name[x_image]][tuple[[<ast.Constant object at 0x7da2054a51e0>, <ast.Slice object at 0x7da2054a6920>]]]
variable[y_axes] assign[=] call[name[y_image]][tuple[[<ast.Slice object at 0x7da2054a7d00>, <ast.Constant object at 0x7da2054a6740>]]]
return[tuple[[<ast.Name object at 0x7da2054a70d0>, <ast.Name object at 0x7da2054a6f50>]]] | keyword[def] identifier[get_axes] ( identifier[x] , identifier[y] ):
literal[string]
identifier[n] = identifier[int] ( identifier[np] . identifier[sqrt] ( identifier[len] ( identifier[x] )))
keyword[if] identifier[n] ** literal[int] != identifier[len] ( identifier[x] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[len] ( identifier[x] )))
identifier[x_image] = identifier[x] . identifier[reshape] ( identifier[n] , identifier[n] )
identifier[y_image] = identifier[y] . identifier[reshape] ( identifier[n] , identifier[n] )
identifier[x_axes] = identifier[x_image] [ literal[int] ,:]
identifier[y_axes] = identifier[y_image] [:, literal[int] ]
keyword[return] identifier[x_axes] , identifier[y_axes] | def get_axes(x, y):
"""
computes the axis x and y of a given 2d grid
:param x:
:param y:
:return:
"""
n = int(np.sqrt(len(x)))
if n ** 2 != len(x):
raise ValueError('lenght of input array given as %s is not square of integer number!' % len(x)) # depends on [control=['if'], data=[]]
x_image = x.reshape(n, n)
y_image = y.reshape(n, n)
x_axes = x_image[0, :]
y_axes = y_image[:, 0]
return (x_axes, y_axes) |
def edit(
request,
slug,
rev_id=None,
template_name='wakawaka/edit.html',
extra_context=None,
wiki_page_form=WikiPageForm,
wiki_delete_form=DeleteWikiPageForm,
):
"""
Displays the form for editing and deleting a page.
"""
# Get the page for slug and get a specific revision, if given
try:
queryset = WikiPage.objects.all()
page = queryset.get(slug=slug)
rev = page.current
initial = {'content': page.current.content}
# Do not allow editing wiki pages if the user has no permission
if not request.user.has_perms(
('wakawaka.change_wikipage', 'wakawaka.change_revision')
):
return HttpResponseForbidden(
ugettext('You don\'t have permission to edit pages.')
)
if rev_id:
# There is a specific revision, fetch this
rev_specific = Revision.objects.get(pk=rev_id)
if rev.pk != rev_specific.pk:
rev = rev_specific
rev.is_not_current = True
initial = {
'content': rev.content,
'message': _('Reverted to "%s"' % rev.message),
}
# This page does not exist, create a dummy page
# Note that it's not saved here
except WikiPage.DoesNotExist:
# Do not allow adding wiki pages if the user has no permission
if not request.user.has_perms(
('wakawaka.add_wikipage', 'wakawaka.add_revision')
):
return HttpResponseForbidden(
ugettext('You don\'t have permission to add wiki pages.')
)
page = WikiPage(slug=slug)
page.is_initial = True
rev = None
initial = {
'content': _('Describe your new page %s here...' % slug),
'message': _('Initial revision'),
}
# Don't display the delete form if the user has nor permission
delete_form = None
# The user has permission, then do
if request.user.has_perm(
'wakawaka.delete_wikipage'
) or request.user.has_perm('wakawaka.delete_revision'):
delete_form = wiki_delete_form(request)
if request.method == 'POST' and request.POST.get('delete'):
delete_form = wiki_delete_form(request, request.POST)
if delete_form.is_valid():
return delete_form.delete_wiki(request, page, rev)
# Page add/edit form
form = wiki_page_form(initial=initial)
if request.method == 'POST':
form = wiki_page_form(data=request.POST)
if form.is_valid():
# Check if the content is changed, except there is a rev_id and the
# user possibly only reverted the HEAD to it
if (
not rev_id
and initial['content'] == form.cleaned_data['content']
):
form.errors['content'] = (_('You have made no changes!'),)
# Save the form and redirect to the page view
else:
try:
# Check that the page already exist
queryset = WikiPage.objects.all()
page = queryset.get(slug=slug)
except WikiPage.DoesNotExist:
# Must be a new one, create that page
page = WikiPage(slug=slug)
page.save()
form.save(request, page)
kwargs = {'slug': page.slug}
redirect_to = reverse('wakawaka_page', kwargs=kwargs)
messages.success(
request,
ugettext('Your changes to %s were saved' % page.slug),
)
return HttpResponseRedirect(redirect_to)
template_context = {
'form': form,
'delete_form': delete_form,
'page': page,
'rev': rev,
}
template_context.update(extra_context or {})
return render(request, template_name, template_context) | def function[edit, parameter[request, slug, rev_id, template_name, extra_context, wiki_page_form, wiki_delete_form]]:
constant[
Displays the form for editing and deleting a page.
]
<ast.Try object at 0x7da1b0fd5540>
variable[delete_form] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b0fd5270> begin[:]
variable[delete_form] assign[=] call[name[wiki_delete_form], parameter[name[request]]]
if <ast.BoolOp object at 0x7da1b0fd7430> begin[:]
variable[delete_form] assign[=] call[name[wiki_delete_form], parameter[name[request], name[request].POST]]
if call[name[delete_form].is_valid, parameter[]] begin[:]
return[call[name[delete_form].delete_wiki, parameter[name[request], name[page], name[rev]]]]
variable[form] assign[=] call[name[wiki_page_form], parameter[]]
if compare[name[request].method equal[==] constant[POST]] begin[:]
variable[form] assign[=] call[name[wiki_page_form], parameter[]]
if call[name[form].is_valid, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da18c4cf940> begin[:]
call[name[form].errors][constant[content]] assign[=] tuple[[<ast.Call object at 0x7da207f039d0>]]
variable[template_context] assign[=] dictionary[[<ast.Constant object at 0x7da1b26aed70>, <ast.Constant object at 0x7da1b26ac550>, <ast.Constant object at 0x7da1b26acdf0>, <ast.Constant object at 0x7da1b26accd0>], [<ast.Name object at 0x7da1b26afe50>, <ast.Name object at 0x7da1b26af550>, <ast.Name object at 0x7da1b26aec20>, <ast.Name object at 0x7da1b26ae7a0>]]
call[name[template_context].update, parameter[<ast.BoolOp object at 0x7da1b26afd90>]]
return[call[name[render], parameter[name[request], name[template_name], name[template_context]]]] | keyword[def] identifier[edit] (
identifier[request] ,
identifier[slug] ,
identifier[rev_id] = keyword[None] ,
identifier[template_name] = literal[string] ,
identifier[extra_context] = keyword[None] ,
identifier[wiki_page_form] = identifier[WikiPageForm] ,
identifier[wiki_delete_form] = identifier[DeleteWikiPageForm] ,
):
literal[string]
keyword[try] :
identifier[queryset] = identifier[WikiPage] . identifier[objects] . identifier[all] ()
identifier[page] = identifier[queryset] . identifier[get] ( identifier[slug] = identifier[slug] )
identifier[rev] = identifier[page] . identifier[current]
identifier[initial] ={ literal[string] : identifier[page] . identifier[current] . identifier[content] }
keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[has_perms] (
( literal[string] , literal[string] )
):
keyword[return] identifier[HttpResponseForbidden] (
identifier[ugettext] ( literal[string] )
)
keyword[if] identifier[rev_id] :
identifier[rev_specific] = identifier[Revision] . identifier[objects] . identifier[get] ( identifier[pk] = identifier[rev_id] )
keyword[if] identifier[rev] . identifier[pk] != identifier[rev_specific] . identifier[pk] :
identifier[rev] = identifier[rev_specific]
identifier[rev] . identifier[is_not_current] = keyword[True]
identifier[initial] ={
literal[string] : identifier[rev] . identifier[content] ,
literal[string] : identifier[_] ( literal[string] % identifier[rev] . identifier[message] ),
}
keyword[except] identifier[WikiPage] . identifier[DoesNotExist] :
keyword[if] keyword[not] identifier[request] . identifier[user] . identifier[has_perms] (
( literal[string] , literal[string] )
):
keyword[return] identifier[HttpResponseForbidden] (
identifier[ugettext] ( literal[string] )
)
identifier[page] = identifier[WikiPage] ( identifier[slug] = identifier[slug] )
identifier[page] . identifier[is_initial] = keyword[True]
identifier[rev] = keyword[None]
identifier[initial] ={
literal[string] : identifier[_] ( literal[string] % identifier[slug] ),
literal[string] : identifier[_] ( literal[string] ),
}
identifier[delete_form] = keyword[None]
keyword[if] identifier[request] . identifier[user] . identifier[has_perm] (
literal[string]
) keyword[or] identifier[request] . identifier[user] . identifier[has_perm] ( literal[string] ):
identifier[delete_form] = identifier[wiki_delete_form] ( identifier[request] )
keyword[if] identifier[request] . identifier[method] == literal[string] keyword[and] identifier[request] . identifier[POST] . identifier[get] ( literal[string] ):
identifier[delete_form] = identifier[wiki_delete_form] ( identifier[request] , identifier[request] . identifier[POST] )
keyword[if] identifier[delete_form] . identifier[is_valid] ():
keyword[return] identifier[delete_form] . identifier[delete_wiki] ( identifier[request] , identifier[page] , identifier[rev] )
identifier[form] = identifier[wiki_page_form] ( identifier[initial] = identifier[initial] )
keyword[if] identifier[request] . identifier[method] == literal[string] :
identifier[form] = identifier[wiki_page_form] ( identifier[data] = identifier[request] . identifier[POST] )
keyword[if] identifier[form] . identifier[is_valid] ():
keyword[if] (
keyword[not] identifier[rev_id]
keyword[and] identifier[initial] [ literal[string] ]== identifier[form] . identifier[cleaned_data] [ literal[string] ]
):
identifier[form] . identifier[errors] [ literal[string] ]=( identifier[_] ( literal[string] ),)
keyword[else] :
keyword[try] :
identifier[queryset] = identifier[WikiPage] . identifier[objects] . identifier[all] ()
identifier[page] = identifier[queryset] . identifier[get] ( identifier[slug] = identifier[slug] )
keyword[except] identifier[WikiPage] . identifier[DoesNotExist] :
identifier[page] = identifier[WikiPage] ( identifier[slug] = identifier[slug] )
identifier[page] . identifier[save] ()
identifier[form] . identifier[save] ( identifier[request] , identifier[page] )
identifier[kwargs] ={ literal[string] : identifier[page] . identifier[slug] }
identifier[redirect_to] = identifier[reverse] ( literal[string] , identifier[kwargs] = identifier[kwargs] )
identifier[messages] . identifier[success] (
identifier[request] ,
identifier[ugettext] ( literal[string] % identifier[page] . identifier[slug] ),
)
keyword[return] identifier[HttpResponseRedirect] ( identifier[redirect_to] )
identifier[template_context] ={
literal[string] : identifier[form] ,
literal[string] : identifier[delete_form] ,
literal[string] : identifier[page] ,
literal[string] : identifier[rev] ,
}
identifier[template_context] . identifier[update] ( identifier[extra_context] keyword[or] {})
keyword[return] identifier[render] ( identifier[request] , identifier[template_name] , identifier[template_context] ) | def edit(request, slug, rev_id=None, template_name='wakawaka/edit.html', extra_context=None, wiki_page_form=WikiPageForm, wiki_delete_form=DeleteWikiPageForm):
"""
Displays the form for editing and deleting a page.
"""
# Get the page for slug and get a specific revision, if given
try:
queryset = WikiPage.objects.all()
page = queryset.get(slug=slug)
rev = page.current
initial = {'content': page.current.content}
# Do not allow editing wiki pages if the user has no permission
if not request.user.has_perms(('wakawaka.change_wikipage', 'wakawaka.change_revision')):
return HttpResponseForbidden(ugettext("You don't have permission to edit pages.")) # depends on [control=['if'], data=[]]
if rev_id:
# There is a specific revision, fetch this
rev_specific = Revision.objects.get(pk=rev_id)
if rev.pk != rev_specific.pk:
rev = rev_specific
rev.is_not_current = True
initial = {'content': rev.content, 'message': _('Reverted to "%s"' % rev.message)} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
# This page does not exist, create a dummy page
# Note that it's not saved here
except WikiPage.DoesNotExist:
# Do not allow adding wiki pages if the user has no permission
if not request.user.has_perms(('wakawaka.add_wikipage', 'wakawaka.add_revision')):
return HttpResponseForbidden(ugettext("You don't have permission to add wiki pages.")) # depends on [control=['if'], data=[]]
page = WikiPage(slug=slug)
page.is_initial = True
rev = None
initial = {'content': _('Describe your new page %s here...' % slug), 'message': _('Initial revision')} # depends on [control=['except'], data=[]]
# Don't display the delete form if the user has nor permission
delete_form = None
# The user has permission, then do
if request.user.has_perm('wakawaka.delete_wikipage') or request.user.has_perm('wakawaka.delete_revision'):
delete_form = wiki_delete_form(request)
if request.method == 'POST' and request.POST.get('delete'):
delete_form = wiki_delete_form(request, request.POST)
if delete_form.is_valid():
return delete_form.delete_wiki(request, page, rev) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Page add/edit form
form = wiki_page_form(initial=initial)
if request.method == 'POST':
form = wiki_page_form(data=request.POST)
if form.is_valid():
# Check if the content is changed, except there is a rev_id and the
# user possibly only reverted the HEAD to it
if not rev_id and initial['content'] == form.cleaned_data['content']:
form.errors['content'] = (_('You have made no changes!'),) # depends on [control=['if'], data=[]]
else:
# Save the form and redirect to the page view
try:
# Check that the page already exist
queryset = WikiPage.objects.all()
page = queryset.get(slug=slug) # depends on [control=['try'], data=[]]
except WikiPage.DoesNotExist:
# Must be a new one, create that page
page = WikiPage(slug=slug)
page.save() # depends on [control=['except'], data=[]]
form.save(request, page)
kwargs = {'slug': page.slug}
redirect_to = reverse('wakawaka_page', kwargs=kwargs)
messages.success(request, ugettext('Your changes to %s were saved' % page.slug))
return HttpResponseRedirect(redirect_to) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
template_context = {'form': form, 'delete_form': delete_form, 'page': page, 'rev': rev}
template_context.update(extra_context or {})
return render(request, template_name, template_context) |
def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [
('bsources', 'bsourcesigs'),
('bdepends', 'bdependsigs'),
('bimplicit', 'bimplicitsigs'),
]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr)
except AttributeError:
continue
if strings is None or nodeinfos is None:
continue
nodes = []
for s, ni in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s)
nodes.append(s)
setattr(self, nattr, nodes) | def function[prepare_dependencies, parameter[self]]:
constant[
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
]
variable[attrs] assign[=] list[[<ast.Tuple object at 0x7da20cabf0d0>, <ast.Tuple object at 0x7da20cabd8d0>, <ast.Tuple object at 0x7da20e9b21d0>]]
for taget[tuple[[<ast.Name object at 0x7da20e9b3a00>, <ast.Name object at 0x7da20e9b0640>]]] in starred[name[attrs]] begin[:]
<ast.Try object at 0x7da20e9b37c0>
if <ast.BoolOp object at 0x7da20e9b2b60> begin[:]
continue
variable[nodes] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18fe91de0>, <ast.Name object at 0x7da18fe90e50>]]] in starred[call[name[zip], parameter[name[strings], name[nodeinfos]]]] begin[:]
if <ast.UnaryOp object at 0x7da18fe91330> begin[:]
variable[s] assign[=] call[name[ni].str_to_node, parameter[name[s]]]
call[name[nodes].append, parameter[name[s]]]
call[name[setattr], parameter[name[self], name[nattr], name[nodes]]] | keyword[def] identifier[prepare_dependencies] ( identifier[self] ):
literal[string]
identifier[attrs] =[
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
]
keyword[for] ( identifier[nattr] , identifier[sattr] ) keyword[in] identifier[attrs] :
keyword[try] :
identifier[strings] = identifier[getattr] ( identifier[self] , identifier[nattr] )
identifier[nodeinfos] = identifier[getattr] ( identifier[self] , identifier[sattr] )
keyword[except] identifier[AttributeError] :
keyword[continue]
keyword[if] identifier[strings] keyword[is] keyword[None] keyword[or] identifier[nodeinfos] keyword[is] keyword[None] :
keyword[continue]
identifier[nodes] =[]
keyword[for] identifier[s] , identifier[ni] keyword[in] identifier[zip] ( identifier[strings] , identifier[nodeinfos] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[s] , identifier[SCons] . identifier[Node] . identifier[Node] ):
identifier[s] = identifier[ni] . identifier[str_to_node] ( identifier[s] )
identifier[nodes] . identifier[append] ( identifier[s] )
identifier[setattr] ( identifier[self] , identifier[nattr] , identifier[nodes] ) | def prepare_dependencies(self):
"""
Prepares a FileBuildInfo object for explaining what changed
The bsources, bdepends and bimplicit lists have all been
stored on disk as paths relative to the top-level SConstruct
directory. Convert the strings to actual Nodes (for use by the
--debug=explain code and --implicit-cache).
"""
attrs = [('bsources', 'bsourcesigs'), ('bdepends', 'bdependsigs'), ('bimplicit', 'bimplicitsigs')]
for (nattr, sattr) in attrs:
try:
strings = getattr(self, nattr)
nodeinfos = getattr(self, sattr) # depends on [control=['try'], data=[]]
except AttributeError:
continue # depends on [control=['except'], data=[]]
if strings is None or nodeinfos is None:
continue # depends on [control=['if'], data=[]]
nodes = []
for (s, ni) in zip(strings, nodeinfos):
if not isinstance(s, SCons.Node.Node):
s = ni.str_to_node(s) # depends on [control=['if'], data=[]]
nodes.append(s) # depends on [control=['for'], data=[]]
setattr(self, nattr, nodes) # depends on [control=['for'], data=[]] |
def values(self):
"""Returns a list of all values in the dictionary.
Returns:
list of str: [value1,value2,...,valueN]
"""
all_values = [v.decode('utf-8') for k,v in self.rdb.hgetall(self.session_hash).items()]
return all_values | def function[values, parameter[self]]:
constant[Returns a list of all values in the dictionary.
Returns:
list of str: [value1,value2,...,valueN]
]
variable[all_values] assign[=] <ast.ListComp object at 0x7da18f09e2f0>
return[name[all_values]] | keyword[def] identifier[values] ( identifier[self] ):
literal[string]
identifier[all_values] =[ identifier[v] . identifier[decode] ( literal[string] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[self] . identifier[rdb] . identifier[hgetall] ( identifier[self] . identifier[session_hash] ). identifier[items] ()]
keyword[return] identifier[all_values] | def values(self):
"""Returns a list of all values in the dictionary.
Returns:
list of str: [value1,value2,...,valueN]
"""
all_values = [v.decode('utf-8') for (k, v) in self.rdb.hgetall(self.session_hash).items()]
return all_values |
def transform(self, X, lenscale=None):
"""
Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power).
"""
lenscale = self._check_dim(X.shape[1], lenscale)
VX = self._makeVX(X / lenscale)
Phi = np.hstack((np.cos(VX), np.sin(VX))) / np.sqrt(self.n)
return Phi | def function[transform, parameter[self, X, lenscale]]:
constant[
Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power).
]
variable[lenscale] assign[=] call[name[self]._check_dim, parameter[call[name[X].shape][constant[1]], name[lenscale]]]
variable[VX] assign[=] call[name[self]._makeVX, parameter[binary_operation[name[X] / name[lenscale]]]]
variable[Phi] assign[=] binary_operation[call[name[np].hstack, parameter[tuple[[<ast.Call object at 0x7da2054a4fa0>, <ast.Call object at 0x7da2054a4580>]]]] / call[name[np].sqrt, parameter[name[self].n]]]
return[name[Phi]] | keyword[def] identifier[transform] ( identifier[self] , identifier[X] , identifier[lenscale] = keyword[None] ):
literal[string]
identifier[lenscale] = identifier[self] . identifier[_check_dim] ( identifier[X] . identifier[shape] [ literal[int] ], identifier[lenscale] )
identifier[VX] = identifier[self] . identifier[_makeVX] ( identifier[X] / identifier[lenscale] )
identifier[Phi] = identifier[np] . identifier[hstack] (( identifier[np] . identifier[cos] ( identifier[VX] ), identifier[np] . identifier[sin] ( identifier[VX] )))/ identifier[np] . identifier[sqrt] ( identifier[self] . identifier[n] )
keyword[return] identifier[Phi] | def transform(self, X, lenscale=None):
"""
Apply the Fast Food RBF basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X).If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor (to nearest larger two power).
"""
lenscale = self._check_dim(X.shape[1], lenscale)
VX = self._makeVX(X / lenscale)
Phi = np.hstack((np.cos(VX), np.sin(VX))) / np.sqrt(self.n)
return Phi |
def _get_basilisp_bytecode(
fullname: str, mtime: int, source_size: int, cache_data: bytes
) -> List[types.CodeType]:
"""Unmarshal the bytes from a Basilisp bytecode cache file, validating the
file header prior to returning. If the file header does not match, throw
an exception."""
exc_details = {"name": fullname}
magic = cache_data[:4]
raw_timestamp = cache_data[4:8]
raw_size = cache_data[8:12]
if magic != MAGIC_NUMBER:
message = (
f"Incorrect magic number ({magic}) in {fullname}; expected {MAGIC_NUMBER}"
)
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore
elif len(raw_timestamp) != 4:
message = f"Reached EOF while reading timestamp in {fullname}"
logger.debug(message)
raise EOFError(message)
elif _r_long(raw_timestamp) != mtime:
message = f"Non-matching timestamp ({_r_long(raw_timestamp)}) in {fullname} bytecode cache; expected {mtime}"
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore
elif len(raw_size) != 4:
message = f"Reached EOF while reading size of source in {fullname}"
logger.debug(message)
raise EOFError(message)
elif _r_long(raw_size) != source_size:
message = f"Non-matching filesize ({_r_long(raw_size)}) in {fullname} bytecode cache; expected {source_size}"
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore
return marshal.loads(cache_data[12:]) | def function[_get_basilisp_bytecode, parameter[fullname, mtime, source_size, cache_data]]:
constant[Unmarshal the bytes from a Basilisp bytecode cache file, validating the
file header prior to returning. If the file header does not match, throw
an exception.]
variable[exc_details] assign[=] dictionary[[<ast.Constant object at 0x7da1b02127a0>], [<ast.Name object at 0x7da1b0211e40>]]
variable[magic] assign[=] call[name[cache_data]][<ast.Slice object at 0x7da1b02112d0>]
variable[raw_timestamp] assign[=] call[name[cache_data]][<ast.Slice object at 0x7da1b0213280>]
variable[raw_size] assign[=] call[name[cache_data]][<ast.Slice object at 0x7da1b0210070>]
if compare[name[magic] not_equal[!=] name[MAGIC_NUMBER]] begin[:]
variable[message] assign[=] <ast.JoinedStr object at 0x7da1b02103d0>
call[name[logger].debug, parameter[name[message]]]
<ast.Raise object at 0x7da1b0210a30>
return[call[name[marshal].loads, parameter[call[name[cache_data]][<ast.Slice object at 0x7da1b023d2a0>]]]] | keyword[def] identifier[_get_basilisp_bytecode] (
identifier[fullname] : identifier[str] , identifier[mtime] : identifier[int] , identifier[source_size] : identifier[int] , identifier[cache_data] : identifier[bytes]
)-> identifier[List] [ identifier[types] . identifier[CodeType] ]:
literal[string]
identifier[exc_details] ={ literal[string] : identifier[fullname] }
identifier[magic] = identifier[cache_data] [: literal[int] ]
identifier[raw_timestamp] = identifier[cache_data] [ literal[int] : literal[int] ]
identifier[raw_size] = identifier[cache_data] [ literal[int] : literal[int] ]
keyword[if] identifier[magic] != identifier[MAGIC_NUMBER] :
identifier[message] =(
literal[string]
)
identifier[logger] . identifier[debug] ( identifier[message] )
keyword[raise] identifier[ImportError] ( identifier[message] ,** identifier[exc_details] )
keyword[elif] identifier[len] ( identifier[raw_timestamp] )!= literal[int] :
identifier[message] = literal[string]
identifier[logger] . identifier[debug] ( identifier[message] )
keyword[raise] identifier[EOFError] ( identifier[message] )
keyword[elif] identifier[_r_long] ( identifier[raw_timestamp] )!= identifier[mtime] :
identifier[message] = literal[string]
identifier[logger] . identifier[debug] ( identifier[message] )
keyword[raise] identifier[ImportError] ( identifier[message] ,** identifier[exc_details] )
keyword[elif] identifier[len] ( identifier[raw_size] )!= literal[int] :
identifier[message] = literal[string]
identifier[logger] . identifier[debug] ( identifier[message] )
keyword[raise] identifier[EOFError] ( identifier[message] )
keyword[elif] identifier[_r_long] ( identifier[raw_size] )!= identifier[source_size] :
identifier[message] = literal[string]
identifier[logger] . identifier[debug] ( identifier[message] )
keyword[raise] identifier[ImportError] ( identifier[message] ,** identifier[exc_details] )
keyword[return] identifier[marshal] . identifier[loads] ( identifier[cache_data] [ literal[int] :]) | def _get_basilisp_bytecode(fullname: str, mtime: int, source_size: int, cache_data: bytes) -> List[types.CodeType]:
"""Unmarshal the bytes from a Basilisp bytecode cache file, validating the
file header prior to returning. If the file header does not match, throw
an exception."""
exc_details = {'name': fullname}
magic = cache_data[:4]
raw_timestamp = cache_data[4:8]
raw_size = cache_data[8:12]
if magic != MAGIC_NUMBER:
message = f'Incorrect magic number ({magic}) in {fullname}; expected {MAGIC_NUMBER}'
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore # depends on [control=['if'], data=['magic', 'MAGIC_NUMBER']]
elif len(raw_timestamp) != 4:
message = f'Reached EOF while reading timestamp in {fullname}'
logger.debug(message)
raise EOFError(message) # depends on [control=['if'], data=[]]
elif _r_long(raw_timestamp) != mtime:
message = f'Non-matching timestamp ({_r_long(raw_timestamp)}) in {fullname} bytecode cache; expected {mtime}'
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore # depends on [control=['if'], data=['mtime']]
elif len(raw_size) != 4:
message = f'Reached EOF while reading size of source in {fullname}'
logger.debug(message)
raise EOFError(message) # depends on [control=['if'], data=[]]
elif _r_long(raw_size) != source_size:
message = f'Non-matching filesize ({_r_long(raw_size)}) in {fullname} bytecode cache; expected {source_size}'
logger.debug(message)
raise ImportError(message, **exc_details) # type: ignore # depends on [control=['if'], data=['source_size']]
return marshal.loads(cache_data[12:]) |
def view_assets_by_site(token, dstore):
"""
Display statistical information about the distribution of the assets
"""
taxonomies = dstore['assetcol/tagcol/taxonomy'].value
assets_by_site = dstore['assetcol'].assets_by_site()
data = ['taxonomy mean stddev min max num_sites num_assets'.split()]
num_assets = AccumDict()
for assets in assets_by_site:
num_assets += {k: [len(v)] for k, v in group_array(
assets, 'taxonomy').items()}
for taxo in sorted(num_assets):
val = numpy.array(num_assets[taxo])
data.append(stats(taxonomies[taxo], val, val.sum()))
if len(num_assets) > 1: # more than one taxonomy, add a summary
n_assets = numpy.array([len(assets) for assets in assets_by_site])
data.append(stats('*ALL*', n_assets, n_assets.sum()))
return rst_table(data) | def function[view_assets_by_site, parameter[token, dstore]]:
constant[
Display statistical information about the distribution of the assets
]
variable[taxonomies] assign[=] call[name[dstore]][constant[assetcol/tagcol/taxonomy]].value
variable[assets_by_site] assign[=] call[call[name[dstore]][constant[assetcol]].assets_by_site, parameter[]]
variable[data] assign[=] list[[<ast.Call object at 0x7da20c991c30>]]
variable[num_assets] assign[=] call[name[AccumDict], parameter[]]
for taget[name[assets]] in starred[name[assets_by_site]] begin[:]
<ast.AugAssign object at 0x7da20c991cc0>
for taget[name[taxo]] in starred[call[name[sorted], parameter[name[num_assets]]]] begin[:]
variable[val] assign[=] call[name[numpy].array, parameter[call[name[num_assets]][name[taxo]]]]
call[name[data].append, parameter[call[name[stats], parameter[call[name[taxonomies]][name[taxo]], name[val], call[name[val].sum, parameter[]]]]]]
if compare[call[name[len], parameter[name[num_assets]]] greater[>] constant[1]] begin[:]
variable[n_assets] assign[=] call[name[numpy].array, parameter[<ast.ListComp object at 0x7da18dc05780>]]
call[name[data].append, parameter[call[name[stats], parameter[constant[*ALL*], name[n_assets], call[name[n_assets].sum, parameter[]]]]]]
return[call[name[rst_table], parameter[name[data]]]] | keyword[def] identifier[view_assets_by_site] ( identifier[token] , identifier[dstore] ):
literal[string]
identifier[taxonomies] = identifier[dstore] [ literal[string] ]. identifier[value]
identifier[assets_by_site] = identifier[dstore] [ literal[string] ]. identifier[assets_by_site] ()
identifier[data] =[ literal[string] . identifier[split] ()]
identifier[num_assets] = identifier[AccumDict] ()
keyword[for] identifier[assets] keyword[in] identifier[assets_by_site] :
identifier[num_assets] +={ identifier[k] :[ identifier[len] ( identifier[v] )] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[group_array] (
identifier[assets] , literal[string] ). identifier[items] ()}
keyword[for] identifier[taxo] keyword[in] identifier[sorted] ( identifier[num_assets] ):
identifier[val] = identifier[numpy] . identifier[array] ( identifier[num_assets] [ identifier[taxo] ])
identifier[data] . identifier[append] ( identifier[stats] ( identifier[taxonomies] [ identifier[taxo] ], identifier[val] , identifier[val] . identifier[sum] ()))
keyword[if] identifier[len] ( identifier[num_assets] )> literal[int] :
identifier[n_assets] = identifier[numpy] . identifier[array] ([ identifier[len] ( identifier[assets] ) keyword[for] identifier[assets] keyword[in] identifier[assets_by_site] ])
identifier[data] . identifier[append] ( identifier[stats] ( literal[string] , identifier[n_assets] , identifier[n_assets] . identifier[sum] ()))
keyword[return] identifier[rst_table] ( identifier[data] ) | def view_assets_by_site(token, dstore):
"""
Display statistical information about the distribution of the assets
"""
taxonomies = dstore['assetcol/tagcol/taxonomy'].value
assets_by_site = dstore['assetcol'].assets_by_site()
data = ['taxonomy mean stddev min max num_sites num_assets'.split()]
num_assets = AccumDict()
for assets in assets_by_site:
num_assets += {k: [len(v)] for (k, v) in group_array(assets, 'taxonomy').items()} # depends on [control=['for'], data=['assets']]
for taxo in sorted(num_assets):
val = numpy.array(num_assets[taxo])
data.append(stats(taxonomies[taxo], val, val.sum())) # depends on [control=['for'], data=['taxo']]
if len(num_assets) > 1: # more than one taxonomy, add a summary
n_assets = numpy.array([len(assets) for assets in assets_by_site])
data.append(stats('*ALL*', n_assets, n_assets.sum())) # depends on [control=['if'], data=[]]
return rst_table(data) |
def delete_record(zone_id, record_id, profile):
'''
Delete a record.
:param zone_id: Zone to delete.
:type zone_id: ``str``
:param record_id: Record to delete.
:type record_id: ``str``
:param profile: The profile key
:type profile: ``str``
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.delete_record google.com www profile1
'''
conn = _get_driver(profile=profile)
record = conn.get_record(zone_id=zone_id, record_id=record_id)
return conn.delete_record(record) | def function[delete_record, parameter[zone_id, record_id, profile]]:
constant[
Delete a record.
:param zone_id: Zone to delete.
:type zone_id: ``str``
:param record_id: Record to delete.
:type record_id: ``str``
:param profile: The profile key
:type profile: ``str``
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.delete_record google.com www profile1
]
variable[conn] assign[=] call[name[_get_driver], parameter[]]
variable[record] assign[=] call[name[conn].get_record, parameter[]]
return[call[name[conn].delete_record, parameter[name[record]]]] | keyword[def] identifier[delete_record] ( identifier[zone_id] , identifier[record_id] , identifier[profile] ):
literal[string]
identifier[conn] = identifier[_get_driver] ( identifier[profile] = identifier[profile] )
identifier[record] = identifier[conn] . identifier[get_record] ( identifier[zone_id] = identifier[zone_id] , identifier[record_id] = identifier[record_id] )
keyword[return] identifier[conn] . identifier[delete_record] ( identifier[record] ) | def delete_record(zone_id, record_id, profile):
"""
Delete a record.
:param zone_id: Zone to delete.
:type zone_id: ``str``
:param record_id: Record to delete.
:type record_id: ``str``
:param profile: The profile key
:type profile: ``str``
:rtype: ``bool``
CLI Example:
.. code-block:: bash
salt myminion libcloud_dns.delete_record google.com www profile1
"""
conn = _get_driver(profile=profile)
record = conn.get_record(zone_id=zone_id, record_id=record_id)
return conn.delete_record(record) |
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # In minutes, not seconds!
log.debug(
'Enabling the grains refresher. Will run every %d minute(s).',
self.opts['grains_refresh_every']
)
self._refresh_grains_watcher(abs(self.opts['grains_refresh_every']))
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh '
'routine during minion tune-in: %s', exc
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks) | def function[setup_scheduler, parameter[self, before_connect]]:
constant[
Set up the scheduler.
This is safe to call multiple times.
]
call[name[self]._setup_core, parameter[]]
variable[loop_interval] assign[=] call[name[self].opts][constant[loop_interval]]
variable[new_periodic_callbacks] assign[=] dictionary[[], []]
if compare[constant[schedule] <ast.NotIn object at 0x7da2590d7190> name[self].periodic_callbacks] begin[:]
if compare[constant[schedule] <ast.NotIn object at 0x7da2590d7190> name[self].opts] begin[:]
call[name[self].opts][constant[schedule]] assign[=] dictionary[[], []]
if <ast.UnaryOp object at 0x7da1b1f3dea0> begin[:]
name[self].schedule assign[=] call[name[salt].utils.schedule.Schedule, parameter[name[self].opts, name[self].functions, name[self].returners]]
<ast.Try object at 0x7da1b1f3c370>
def function[handle_schedule, parameter[]]:
call[name[self].process_schedule, parameter[name[self], name[loop_interval]]]
call[name[new_periodic_callbacks]][constant[schedule]] assign[=] call[name[tornado].ioloop.PeriodicCallback, parameter[name[handle_schedule], constant[1000]]]
if name[before_connect] begin[:]
call[name[handle_schedule], parameter[]]
if compare[constant[cleanup] <ast.NotIn object at 0x7da2590d7190> name[self].periodic_callbacks] begin[:]
call[name[new_periodic_callbacks]][constant[cleanup]] assign[=] call[name[tornado].ioloop.PeriodicCallback, parameter[name[self]._fallback_cleanups, binary_operation[name[loop_interval] * constant[1000]]]]
for taget[name[periodic_cb]] in starred[call[name[six].itervalues, parameter[name[new_periodic_callbacks]]]] begin[:]
call[name[periodic_cb].start, parameter[]]
call[name[self].periodic_callbacks.update, parameter[name[new_periodic_callbacks]]] | keyword[def] identifier[setup_scheduler] ( identifier[self] , identifier[before_connect] = keyword[False] ):
literal[string]
identifier[self] . identifier[_setup_core] ()
identifier[loop_interval] = identifier[self] . identifier[opts] [ literal[string] ]
identifier[new_periodic_callbacks] ={}
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[periodic_callbacks] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[opts] :
identifier[self] . identifier[opts] [ literal[string] ]={}
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[schedule] = identifier[salt] . identifier[utils] . identifier[schedule] . identifier[Schedule] (
identifier[self] . identifier[opts] ,
identifier[self] . identifier[functions] ,
identifier[self] . identifier[returners] ,
identifier[utils] = identifier[self] . identifier[utils] ,
identifier[cleanup] =[ identifier[master_event] ( identifier[type] = literal[string] )])
keyword[try] :
keyword[if] identifier[self] . identifier[opts] [ literal[string] ]:
identifier[log] . identifier[debug] (
literal[string] ,
identifier[self] . identifier[opts] [ literal[string] ]
)
identifier[self] . identifier[_refresh_grains_watcher] ( identifier[abs] ( identifier[self] . identifier[opts] [ literal[string] ]))
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[log] . identifier[error] (
literal[string]
literal[string] , identifier[exc]
)
keyword[def] identifier[handle_schedule] ():
identifier[self] . identifier[process_schedule] ( identifier[self] , identifier[loop_interval] )
identifier[new_periodic_callbacks] [ literal[string] ]= identifier[tornado] . identifier[ioloop] . identifier[PeriodicCallback] ( identifier[handle_schedule] , literal[int] )
keyword[if] identifier[before_connect] :
identifier[handle_schedule] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[periodic_callbacks] :
identifier[new_periodic_callbacks] [ literal[string] ]= identifier[tornado] . identifier[ioloop] . identifier[PeriodicCallback] (
identifier[self] . identifier[_fallback_cleanups] , identifier[loop_interval] * literal[int] )
keyword[for] identifier[periodic_cb] keyword[in] identifier[six] . identifier[itervalues] ( identifier[new_periodic_callbacks] ):
identifier[periodic_cb] . identifier[start] ()
identifier[self] . identifier[periodic_callbacks] . identifier[update] ( identifier[new_periodic_callbacks] ) | def setup_scheduler(self, before_connect=False):
"""
Set up the scheduler.
This is safe to call multiple times.
"""
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {} # depends on [control=['if'], data=[]]
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) # depends on [control=['if'], data=[]]
try:
if self.opts['grains_refresh_every']: # In minutes, not seconds!
log.debug('Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'])
self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as exc:
log.error('Exception occurred in attempt to initialize grain refresh routine during minion tune-in: %s', exc) # depends on [control=['except'], data=['exc']]
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000) # depends on [control=['if'], data=[]]
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start() # depends on [control=['for'], data=['periodic_cb']]
self.periodic_callbacks.update(new_periodic_callbacks) |
def generate_timestamped_string(subject="test", number_of_random_chars=4):
"""
Generate time-stamped string. Format as follows...
`2013-01-31_14:12:23_SubjectString_a3Zg`
Kwargs:
subject (str): String to use as subject.
number_of_random_chars (int) : Number of random characters to append.
This method is helpful for creating unique names with timestamps in them so
when you have to troubleshoot an issue, the name is easier to find.::
self.project_name = generate_timestamped_string("project")
new_project_page.create_project(project_name)
"""
random_str = generate_random_string(number_of_random_chars)
timestamp = generate_timestamp()
return u"{timestamp}_{subject}_{random_str}".format(timestamp=timestamp,
subject=subject,
random_str=random_str) | def function[generate_timestamped_string, parameter[subject, number_of_random_chars]]:
constant[
Generate time-stamped string. Format as follows...
`2013-01-31_14:12:23_SubjectString_a3Zg`
Kwargs:
subject (str): String to use as subject.
number_of_random_chars (int) : Number of random characters to append.
This method is helpful for creating unique names with timestamps in them so
when you have to troubleshoot an issue, the name is easier to find.::
self.project_name = generate_timestamped_string("project")
new_project_page.create_project(project_name)
]
variable[random_str] assign[=] call[name[generate_random_string], parameter[name[number_of_random_chars]]]
variable[timestamp] assign[=] call[name[generate_timestamp], parameter[]]
return[call[constant[{timestamp}_{subject}_{random_str}].format, parameter[]]] | keyword[def] identifier[generate_timestamped_string] ( identifier[subject] = literal[string] , identifier[number_of_random_chars] = literal[int] ):
literal[string]
identifier[random_str] = identifier[generate_random_string] ( identifier[number_of_random_chars] )
identifier[timestamp] = identifier[generate_timestamp] ()
keyword[return] literal[string] . identifier[format] ( identifier[timestamp] = identifier[timestamp] ,
identifier[subject] = identifier[subject] ,
identifier[random_str] = identifier[random_str] ) | def generate_timestamped_string(subject='test', number_of_random_chars=4):
"""
Generate time-stamped string. Format as follows...
`2013-01-31_14:12:23_SubjectString_a3Zg`
Kwargs:
subject (str): String to use as subject.
number_of_random_chars (int) : Number of random characters to append.
This method is helpful for creating unique names with timestamps in them so
when you have to troubleshoot an issue, the name is easier to find.::
self.project_name = generate_timestamped_string("project")
new_project_page.create_project(project_name)
"""
random_str = generate_random_string(number_of_random_chars)
timestamp = generate_timestamp()
return u'{timestamp}_{subject}_{random_str}'.format(timestamp=timestamp, subject=subject, random_str=random_str) |
def list(self):
"""Lists the keys
:return: Returns a list of all keys (not just key names, but rather
the keys themselves).
"""
response = self.client.list_objects_v2(Bucket=self.db_path)
if u'Contents' in response:
# Filter out everything but the key names
keys = [key[u'Key'] for key in response[u'Contents']]
keys_list = []
for key_name in keys:
key = self.get(key_name)
keys_list.append(key)
return keys_list
return [] | def function[list, parameter[self]]:
constant[Lists the keys
:return: Returns a list of all keys (not just key names, but rather
the keys themselves).
]
variable[response] assign[=] call[name[self].client.list_objects_v2, parameter[]]
if compare[constant[Contents] in name[response]] begin[:]
variable[keys] assign[=] <ast.ListComp object at 0x7da1b00f5600>
variable[keys_list] assign[=] list[[]]
for taget[name[key_name]] in starred[name[keys]] begin[:]
variable[key] assign[=] call[name[self].get, parameter[name[key_name]]]
call[name[keys_list].append, parameter[name[key]]]
return[name[keys_list]]
return[list[[]]] | keyword[def] identifier[list] ( identifier[self] ):
literal[string]
identifier[response] = identifier[self] . identifier[client] . identifier[list_objects_v2] ( identifier[Bucket] = identifier[self] . identifier[db_path] )
keyword[if] literal[string] keyword[in] identifier[response] :
identifier[keys] =[ identifier[key] [ literal[string] ] keyword[for] identifier[key] keyword[in] identifier[response] [ literal[string] ]]
identifier[keys_list] =[]
keyword[for] identifier[key_name] keyword[in] identifier[keys] :
identifier[key] = identifier[self] . identifier[get] ( identifier[key_name] )
identifier[keys_list] . identifier[append] ( identifier[key] )
keyword[return] identifier[keys_list]
keyword[return] [] | def list(self):
"""Lists the keys
:return: Returns a list of all keys (not just key names, but rather
the keys themselves).
"""
response = self.client.list_objects_v2(Bucket=self.db_path)
if u'Contents' in response:
# Filter out everything but the key names
keys = [key[u'Key'] for key in response[u'Contents']]
keys_list = []
for key_name in keys:
key = self.get(key_name)
keys_list.append(key) # depends on [control=['for'], data=['key_name']]
return keys_list # depends on [control=['if'], data=['response']]
return [] |
def calc_avr_uvr_v1(self):
"""Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= (con.hm+der.hv[i]):
flu.avr[i] = 0.
flu.uvr[i] = 0.
else:
flu.avr[i] = (flu.h-(con.hm+der.hv[i]))**2*con.bnvr[i]/2.
flu.uvr[i] = (flu.h-(con.hm+der.hv[i]))*(1.+con.bnvr[i]**2)**.5 | def function[calc_avr_uvr_v1, parameter[self]]:
constant[Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553)
]
variable[con] assign[=] name[self].parameters.control.fastaccess
variable[der] assign[=] name[self].parameters.derived.fastaccess
variable[flu] assign[=] name[self].sequences.fluxes.fastaccess
for taget[name[i]] in starred[call[name[range], parameter[constant[2]]]] begin[:]
if compare[name[flu].h less_or_equal[<=] binary_operation[name[con].hm + call[name[der].hv][name[i]]]] begin[:]
call[name[flu].avr][name[i]] assign[=] constant[0.0]
call[name[flu].uvr][name[i]] assign[=] constant[0.0] | keyword[def] identifier[calc_avr_uvr_v1] ( identifier[self] ):
literal[string]
identifier[con] = identifier[self] . identifier[parameters] . identifier[control] . identifier[fastaccess]
identifier[der] = identifier[self] . identifier[parameters] . identifier[derived] . identifier[fastaccess]
identifier[flu] = identifier[self] . identifier[sequences] . identifier[fluxes] . identifier[fastaccess]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
keyword[if] identifier[flu] . identifier[h] <=( identifier[con] . identifier[hm] + identifier[der] . identifier[hv] [ identifier[i] ]):
identifier[flu] . identifier[avr] [ identifier[i] ]= literal[int]
identifier[flu] . identifier[uvr] [ identifier[i] ]= literal[int]
keyword[else] :
identifier[flu] . identifier[avr] [ identifier[i] ]=( identifier[flu] . identifier[h] -( identifier[con] . identifier[hm] + identifier[der] . identifier[hv] [ identifier[i] ]))** literal[int] * identifier[con] . identifier[bnvr] [ identifier[i] ]/ literal[int]
identifier[flu] . identifier[uvr] [ identifier[i] ]=( identifier[flu] . identifier[h] -( identifier[con] . identifier[hm] + identifier[der] . identifier[hv] [ identifier[i] ]))*( literal[int] + identifier[con] . identifier[bnvr] [ identifier[i] ]** literal[int] )** literal[int] | def calc_avr_uvr_v1(self):
"""Calculate the flown through area and the wetted perimeter of both
outer embankments.
Note that each outer embankment lies beyond its foreland and that all
water flowing exactly above the a embankment is added to |AVR|.
The theoretical surface seperating water above the foreland from water
above its embankment is not contributing to |UVR|.
Required control parameters:
|HM|
|BNVR|
Required derived parameter:
|HV|
Required flux sequence:
|H|
Calculated flux sequence:
|AVR|
|UVR|
Examples:
Generally, right trapezoids are assumed. Here, for simplicity, both
forelands are assumed to be symmetrical. Their smaller bases (bottoms)
hava a length of 2 meters, their non-vertical legs show an inclination
of 1 meter per 4 meters, and their height (depths) is 1 meter. Both
forelands lie 1 meter above the main channels bottom.
Generally, a triangles are assumed, with the vertical side
seperating the foreland from its outer embankment. Here, for
simplicity, both forelands are assumed to be symmetrical. Their
inclinations are 1 meter per 4 meters and their lowest point is
1 meter above the forelands bottom and 2 meters above the main
channels bottom:
>>> from hydpy.models.lstream import *
>>> parameterstep()
>>> hm(1.0)
>>> bnvr(4.0)
>>> derived.hv(1.0)
The first example deals with moderate high flow conditions, where
water flows over the forelands, but not over their outer embankments
(|HM| < |H| < (|HM| + |HV|)):
>>> fluxes.h = 1.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.0, 0.0)
>>> fluxes.uvr
uvr(0.0, 0.0)
The second example deals with extreme high flow conditions, where
water flows over the both foreland and their outer embankments
((|HM| + |HV|) < |H|):
>>> fluxes.h = 2.5
>>> model.calc_avr_uvr_v1()
>>> fluxes.avr
avr(0.5, 0.5)
>>> fluxes.uvr
uvr(2.061553, 2.061553)
"""
con = self.parameters.control.fastaccess
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
for i in range(2):
if flu.h <= con.hm + der.hv[i]:
flu.avr[i] = 0.0
flu.uvr[i] = 0.0 # depends on [control=['if'], data=[]]
else:
flu.avr[i] = (flu.h - (con.hm + der.hv[i])) ** 2 * con.bnvr[i] / 2.0
flu.uvr[i] = (flu.h - (con.hm + der.hv[i])) * (1.0 + con.bnvr[i] ** 2) ** 0.5 # depends on [control=['for'], data=['i']] |
def on_new_line(self):
"""On new input line"""
self.set_cursor_position('eof')
self.current_prompt_pos = self.get_position('cursor')
self.new_input_line = False | def function[on_new_line, parameter[self]]:
constant[On new input line]
call[name[self].set_cursor_position, parameter[constant[eof]]]
name[self].current_prompt_pos assign[=] call[name[self].get_position, parameter[constant[cursor]]]
name[self].new_input_line assign[=] constant[False] | keyword[def] identifier[on_new_line] ( identifier[self] ):
literal[string]
identifier[self] . identifier[set_cursor_position] ( literal[string] )
identifier[self] . identifier[current_prompt_pos] = identifier[self] . identifier[get_position] ( literal[string] )
identifier[self] . identifier[new_input_line] = keyword[False] | def on_new_line(self):
"""On new input line"""
self.set_cursor_position('eof')
self.current_prompt_pos = self.get_position('cursor')
self.new_input_line = False |
def get_report_parts(self, apps, formats):
"""
Make report item texts in a specified format.
"""
for fmt in formats:
width = 100 if fmt is not None else tui.get_terminal_size()[0]
for sr in self.subreports:
sr.make_format(fmt, width)
logger.debug('Build a map for arguments and run\'s statistics ...')
value_mapping = {
'title': self.title,
'patterns': ', '.join([repr(pattern) for pattern in self.args.patterns]) or None,
'pattern_files': ', '.join(self.args.pattern_files) or None,
'hosts': ', '.join(self.args.hosts) or None,
'apps': u', '.join([
u'%s(%d)' % (app.name, app.matches) for app in apps.values() if app.matches > 0
]),
'version': __version__
}
filters = []
for flt in self.args.filters:
filters.append(' AND '.join(['%s=%r' % (k, v.pattern) for k, v in flt.items()]))
if filters:
value_mapping['filters'] = ' OR '.join(['(%s)' % item for item in filters])
else:
value_mapping['filters'] = filters[0] if filters else None
value_mapping.update(self.stats)
report = []
for fmt in formats:
if fmt == 'text':
logger.info('appends a text page report')
report.append(self.make_text_page(value_mapping))
elif fmt == 'html':
logger.info('appends a html page report')
report.append(self.make_html_page(value_mapping))
elif fmt == 'csv':
logger.info('extends with a list of csv subreports')
report.extend(self.make_csv_tables())
return report | def function[get_report_parts, parameter[self, apps, formats]]:
constant[
Make report item texts in a specified format.
]
for taget[name[fmt]] in starred[name[formats]] begin[:]
variable[width] assign[=] <ast.IfExp object at 0x7da18eb54100>
for taget[name[sr]] in starred[name[self].subreports] begin[:]
call[name[sr].make_format, parameter[name[fmt], name[width]]]
call[name[logger].debug, parameter[constant[Build a map for arguments and run's statistics ...]]]
variable[value_mapping] assign[=] dictionary[[<ast.Constant object at 0x7da18eb566e0>, <ast.Constant object at 0x7da18eb56590>, <ast.Constant object at 0x7da18eb57610>, <ast.Constant object at 0x7da18eb560b0>, <ast.Constant object at 0x7da18eb54be0>, <ast.Constant object at 0x7da18eb57ee0>], [<ast.Attribute object at 0x7da18eb54520>, <ast.BoolOp object at 0x7da18eb54b80>, <ast.BoolOp object at 0x7da18eb54d00>, <ast.BoolOp object at 0x7da18eb57940>, <ast.Call object at 0x7da18eb571c0>, <ast.Name object at 0x7da18eb55660>]]
variable[filters] assign[=] list[[]]
for taget[name[flt]] in starred[name[self].args.filters] begin[:]
call[name[filters].append, parameter[call[constant[ AND ].join, parameter[<ast.ListComp object at 0x7da18eb548e0>]]]]
if name[filters] begin[:]
call[name[value_mapping]][constant[filters]] assign[=] call[constant[ OR ].join, parameter[<ast.ListComp object at 0x7da18eb54fa0>]]
call[name[value_mapping].update, parameter[name[self].stats]]
variable[report] assign[=] list[[]]
for taget[name[fmt]] in starred[name[formats]] begin[:]
if compare[name[fmt] equal[==] constant[text]] begin[:]
call[name[logger].info, parameter[constant[appends a text page report]]]
call[name[report].append, parameter[call[name[self].make_text_page, parameter[name[value_mapping]]]]]
return[name[report]] | keyword[def] identifier[get_report_parts] ( identifier[self] , identifier[apps] , identifier[formats] ):
literal[string]
keyword[for] identifier[fmt] keyword[in] identifier[formats] :
identifier[width] = literal[int] keyword[if] identifier[fmt] keyword[is] keyword[not] keyword[None] keyword[else] identifier[tui] . identifier[get_terminal_size] ()[ literal[int] ]
keyword[for] identifier[sr] keyword[in] identifier[self] . identifier[subreports] :
identifier[sr] . identifier[make_format] ( identifier[fmt] , identifier[width] )
identifier[logger] . identifier[debug] ( literal[string] )
identifier[value_mapping] ={
literal[string] : identifier[self] . identifier[title] ,
literal[string] : literal[string] . identifier[join] ([ identifier[repr] ( identifier[pattern] ) keyword[for] identifier[pattern] keyword[in] identifier[self] . identifier[args] . identifier[patterns] ]) keyword[or] keyword[None] ,
literal[string] : literal[string] . identifier[join] ( identifier[self] . identifier[args] . identifier[pattern_files] ) keyword[or] keyword[None] ,
literal[string] : literal[string] . identifier[join] ( identifier[self] . identifier[args] . identifier[hosts] ) keyword[or] keyword[None] ,
literal[string] : literal[string] . identifier[join] ([
literal[string] %( identifier[app] . identifier[name] , identifier[app] . identifier[matches] ) keyword[for] identifier[app] keyword[in] identifier[apps] . identifier[values] () keyword[if] identifier[app] . identifier[matches] > literal[int]
]),
literal[string] : identifier[__version__]
}
identifier[filters] =[]
keyword[for] identifier[flt] keyword[in] identifier[self] . identifier[args] . identifier[filters] :
identifier[filters] . identifier[append] ( literal[string] . identifier[join] ([ literal[string] %( identifier[k] , identifier[v] . identifier[pattern] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[flt] . identifier[items] ()]))
keyword[if] identifier[filters] :
identifier[value_mapping] [ literal[string] ]= literal[string] . identifier[join] ([ literal[string] % identifier[item] keyword[for] identifier[item] keyword[in] identifier[filters] ])
keyword[else] :
identifier[value_mapping] [ literal[string] ]= identifier[filters] [ literal[int] ] keyword[if] identifier[filters] keyword[else] keyword[None]
identifier[value_mapping] . identifier[update] ( identifier[self] . identifier[stats] )
identifier[report] =[]
keyword[for] identifier[fmt] keyword[in] identifier[formats] :
keyword[if] identifier[fmt] == literal[string] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[report] . identifier[append] ( identifier[self] . identifier[make_text_page] ( identifier[value_mapping] ))
keyword[elif] identifier[fmt] == literal[string] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[report] . identifier[append] ( identifier[self] . identifier[make_html_page] ( identifier[value_mapping] ))
keyword[elif] identifier[fmt] == literal[string] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[report] . identifier[extend] ( identifier[self] . identifier[make_csv_tables] ())
keyword[return] identifier[report] | def get_report_parts(self, apps, formats):
"""
Make report item texts in a specified format.
"""
for fmt in formats:
width = 100 if fmt is not None else tui.get_terminal_size()[0]
for sr in self.subreports:
sr.make_format(fmt, width) # depends on [control=['for'], data=['sr']] # depends on [control=['for'], data=['fmt']]
logger.debug("Build a map for arguments and run's statistics ...")
value_mapping = {'title': self.title, 'patterns': ', '.join([repr(pattern) for pattern in self.args.patterns]) or None, 'pattern_files': ', '.join(self.args.pattern_files) or None, 'hosts': ', '.join(self.args.hosts) or None, 'apps': u', '.join([u'%s(%d)' % (app.name, app.matches) for app in apps.values() if app.matches > 0]), 'version': __version__}
filters = []
for flt in self.args.filters:
filters.append(' AND '.join(['%s=%r' % (k, v.pattern) for (k, v) in flt.items()])) # depends on [control=['for'], data=['flt']]
if filters:
value_mapping['filters'] = ' OR '.join(['(%s)' % item for item in filters]) # depends on [control=['if'], data=[]]
else:
value_mapping['filters'] = filters[0] if filters else None
value_mapping.update(self.stats)
report = []
for fmt in formats:
if fmt == 'text':
logger.info('appends a text page report')
report.append(self.make_text_page(value_mapping)) # depends on [control=['if'], data=[]]
elif fmt == 'html':
logger.info('appends a html page report')
report.append(self.make_html_page(value_mapping)) # depends on [control=['if'], data=[]]
elif fmt == 'csv':
logger.info('extends with a list of csv subreports')
report.extend(self.make_csv_tables()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fmt']]
return report |
def noise_gaussian(self, mean, std):
"""Create a gaussian noise variable"""
assert std > 0
ng = self.sym.sym('ng_{:d}'.format(len(self.scope['ng'])))
self.scope['ng'].append(ng)
return mean + std*ng | def function[noise_gaussian, parameter[self, mean, std]]:
constant[Create a gaussian noise variable]
assert[compare[name[std] greater[>] constant[0]]]
variable[ng] assign[=] call[name[self].sym.sym, parameter[call[constant[ng_{:d}].format, parameter[call[name[len], parameter[call[name[self].scope][constant[ng]]]]]]]]
call[call[name[self].scope][constant[ng]].append, parameter[name[ng]]]
return[binary_operation[name[mean] + binary_operation[name[std] * name[ng]]]] | keyword[def] identifier[noise_gaussian] ( identifier[self] , identifier[mean] , identifier[std] ):
literal[string]
keyword[assert] identifier[std] > literal[int]
identifier[ng] = identifier[self] . identifier[sym] . identifier[sym] ( literal[string] . identifier[format] ( identifier[len] ( identifier[self] . identifier[scope] [ literal[string] ])))
identifier[self] . identifier[scope] [ literal[string] ]. identifier[append] ( identifier[ng] )
keyword[return] identifier[mean] + identifier[std] * identifier[ng] | def noise_gaussian(self, mean, std):
"""Create a gaussian noise variable"""
assert std > 0
ng = self.sym.sym('ng_{:d}'.format(len(self.scope['ng'])))
self.scope['ng'].append(ng)
return mean + std * ng |
def saveSheets(fn, *vsheets, confirm_overwrite=False):
'Save sheet `vs` with given filename `fn`.'
givenpath = Path(fn)
# determine filetype to save as
filetype = ''
basename, ext = os.path.splitext(fn)
if ext:
filetype = ext[1:]
filetype = filetype or options.save_filetype
if len(vsheets) > 1:
if not fn.endswith('/'): # forcibly specify save individual files into directory by ending path with /
savefunc = getGlobals().get('multisave_' + filetype, None)
if savefunc:
# use specific multisave function
return savefunc(givenpath, *vsheets)
# more than one sheet; either no specific multisave for save filetype, or path ends with /
# save as individual files in the givenpath directory
if not givenpath.exists():
try:
os.makedirs(givenpath.resolve(), exist_ok=True)
except FileExistsError:
pass
assert givenpath.is_dir(), filetype + ' cannot save multiple sheets to non-dir'
# get save function to call
savefunc = getGlobals().get('save_' + filetype) or fail('no function save_'+filetype)
if givenpath.exists():
if confirm_overwrite:
confirm('%s already exists. overwrite? ' % fn)
status('saving %s sheets to %s' % (len(vsheets), givenpath.fqpn))
for vs in vsheets:
p = Path(os.path.join(givenpath.fqpn, vs.name+'.'+filetype))
savefunc(p, vs)
else:
# get save function to call
savefunc = getGlobals().get('save_' + filetype) or fail('no function save_'+filetype)
if givenpath.exists():
if confirm_overwrite:
confirm('%s already exists. overwrite? ' % fn)
status('saving to %s as %s' % (givenpath.fqpn, filetype))
savefunc(givenpath, vsheets[0]) | def function[saveSheets, parameter[fn]]:
constant[Save sheet `vs` with given filename `fn`.]
variable[givenpath] assign[=] call[name[Path], parameter[name[fn]]]
variable[filetype] assign[=] constant[]
<ast.Tuple object at 0x7da20e9b21d0> assign[=] call[name[os].path.splitext, parameter[name[fn]]]
if name[ext] begin[:]
variable[filetype] assign[=] call[name[ext]][<ast.Slice object at 0x7da20e9b0460>]
variable[filetype] assign[=] <ast.BoolOp object at 0x7da20e9b18d0>
if compare[call[name[len], parameter[name[vsheets]]] greater[>] constant[1]] begin[:]
if <ast.UnaryOp object at 0x7da20e9b2d40> begin[:]
variable[savefunc] assign[=] call[call[name[getGlobals], parameter[]].get, parameter[binary_operation[constant[multisave_] + name[filetype]], constant[None]]]
if name[savefunc] begin[:]
return[call[name[savefunc], parameter[name[givenpath], <ast.Starred object at 0x7da18ede6440>]]]
if <ast.UnaryOp object at 0x7da18ede50c0> begin[:]
<ast.Try object at 0x7da18ede4d60>
assert[call[name[givenpath].is_dir, parameter[]]]
variable[savefunc] assign[=] <ast.BoolOp object at 0x7da18ede5510>
if call[name[givenpath].exists, parameter[]] begin[:]
if name[confirm_overwrite] begin[:]
call[name[confirm], parameter[binary_operation[constant[%s already exists. overwrite? ] <ast.Mod object at 0x7da2590d6920> name[fn]]]]
call[name[status], parameter[binary_operation[constant[saving %s sheets to %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b1dfa260>, <ast.Attribute object at 0x7da1b1df8cd0>]]]]]
for taget[name[vs]] in starred[name[vsheets]] begin[:]
variable[p] assign[=] call[name[Path], parameter[call[name[os].path.join, parameter[name[givenpath].fqpn, binary_operation[binary_operation[name[vs].name + constant[.]] + name[filetype]]]]]]
call[name[savefunc], parameter[name[p], name[vs]]] | keyword[def] identifier[saveSheets] ( identifier[fn] ,* identifier[vsheets] , identifier[confirm_overwrite] = keyword[False] ):
literal[string]
identifier[givenpath] = identifier[Path] ( identifier[fn] )
identifier[filetype] = literal[string]
identifier[basename] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fn] )
keyword[if] identifier[ext] :
identifier[filetype] = identifier[ext] [ literal[int] :]
identifier[filetype] = identifier[filetype] keyword[or] identifier[options] . identifier[save_filetype]
keyword[if] identifier[len] ( identifier[vsheets] )> literal[int] :
keyword[if] keyword[not] identifier[fn] . identifier[endswith] ( literal[string] ):
identifier[savefunc] = identifier[getGlobals] (). identifier[get] ( literal[string] + identifier[filetype] , keyword[None] )
keyword[if] identifier[savefunc] :
keyword[return] identifier[savefunc] ( identifier[givenpath] ,* identifier[vsheets] )
keyword[if] keyword[not] identifier[givenpath] . identifier[exists] ():
keyword[try] :
identifier[os] . identifier[makedirs] ( identifier[givenpath] . identifier[resolve] (), identifier[exist_ok] = keyword[True] )
keyword[except] identifier[FileExistsError] :
keyword[pass]
keyword[assert] identifier[givenpath] . identifier[is_dir] (), identifier[filetype] + literal[string]
identifier[savefunc] = identifier[getGlobals] (). identifier[get] ( literal[string] + identifier[filetype] ) keyword[or] identifier[fail] ( literal[string] + identifier[filetype] )
keyword[if] identifier[givenpath] . identifier[exists] ():
keyword[if] identifier[confirm_overwrite] :
identifier[confirm] ( literal[string] % identifier[fn] )
identifier[status] ( literal[string] %( identifier[len] ( identifier[vsheets] ), identifier[givenpath] . identifier[fqpn] ))
keyword[for] identifier[vs] keyword[in] identifier[vsheets] :
identifier[p] = identifier[Path] ( identifier[os] . identifier[path] . identifier[join] ( identifier[givenpath] . identifier[fqpn] , identifier[vs] . identifier[name] + literal[string] + identifier[filetype] ))
identifier[savefunc] ( identifier[p] , identifier[vs] )
keyword[else] :
identifier[savefunc] = identifier[getGlobals] (). identifier[get] ( literal[string] + identifier[filetype] ) keyword[or] identifier[fail] ( literal[string] + identifier[filetype] )
keyword[if] identifier[givenpath] . identifier[exists] ():
keyword[if] identifier[confirm_overwrite] :
identifier[confirm] ( literal[string] % identifier[fn] )
identifier[status] ( literal[string] %( identifier[givenpath] . identifier[fqpn] , identifier[filetype] ))
identifier[savefunc] ( identifier[givenpath] , identifier[vsheets] [ literal[int] ]) | def saveSheets(fn, *vsheets, confirm_overwrite=False):
"""Save sheet `vs` with given filename `fn`."""
givenpath = Path(fn)
# determine filetype to save as
filetype = ''
(basename, ext) = os.path.splitext(fn)
if ext:
filetype = ext[1:] # depends on [control=['if'], data=[]]
filetype = filetype or options.save_filetype
if len(vsheets) > 1:
if not fn.endswith('/'): # forcibly specify save individual files into directory by ending path with /
savefunc = getGlobals().get('multisave_' + filetype, None)
if savefunc:
# use specific multisave function
return savefunc(givenpath, *vsheets) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# more than one sheet; either no specific multisave for save filetype, or path ends with /
# save as individual files in the givenpath directory
if not givenpath.exists():
try:
os.makedirs(givenpath.resolve(), exist_ok=True) # depends on [control=['try'], data=[]]
except FileExistsError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
assert givenpath.is_dir(), filetype + ' cannot save multiple sheets to non-dir'
# get save function to call
savefunc = getGlobals().get('save_' + filetype) or fail('no function save_' + filetype)
if givenpath.exists():
if confirm_overwrite:
confirm('%s already exists. overwrite? ' % fn) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
status('saving %s sheets to %s' % (len(vsheets), givenpath.fqpn))
for vs in vsheets:
p = Path(os.path.join(givenpath.fqpn, vs.name + '.' + filetype))
savefunc(p, vs) # depends on [control=['for'], data=['vs']] # depends on [control=['if'], data=[]]
else:
# get save function to call
savefunc = getGlobals().get('save_' + filetype) or fail('no function save_' + filetype)
if givenpath.exists():
if confirm_overwrite:
confirm('%s already exists. overwrite? ' % fn) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
status('saving to %s as %s' % (givenpath.fqpn, filetype))
savefunc(givenpath, vsheets[0]) |
def get_cam_bounds(self):
"""Return the bounds of the camera in x, y, xMax, and yMax format."""
world_pos = self.get_world_pos()
screen_res = Ragnarok.get_world().get_backbuffer_size() * .5
return (self.pan.X - screen_res.X), (self.pan.Y - screen_res.Y), (self.pan.X + screen_res.X), (
self.pan.Y + screen_res.Y) | def function[get_cam_bounds, parameter[self]]:
constant[Return the bounds of the camera in x, y, xMax, and yMax format.]
variable[world_pos] assign[=] call[name[self].get_world_pos, parameter[]]
variable[screen_res] assign[=] binary_operation[call[call[name[Ragnarok].get_world, parameter[]].get_backbuffer_size, parameter[]] * constant[0.5]]
return[tuple[[<ast.BinOp object at 0x7da18f720040>, <ast.BinOp object at 0x7da18f720820>, <ast.BinOp object at 0x7da18f723a30>, <ast.BinOp object at 0x7da18f7221d0>]]] | keyword[def] identifier[get_cam_bounds] ( identifier[self] ):
literal[string]
identifier[world_pos] = identifier[self] . identifier[get_world_pos] ()
identifier[screen_res] = identifier[Ragnarok] . identifier[get_world] (). identifier[get_backbuffer_size] ()* literal[int]
keyword[return] ( identifier[self] . identifier[pan] . identifier[X] - identifier[screen_res] . identifier[X] ),( identifier[self] . identifier[pan] . identifier[Y] - identifier[screen_res] . identifier[Y] ),( identifier[self] . identifier[pan] . identifier[X] + identifier[screen_res] . identifier[X] ),(
identifier[self] . identifier[pan] . identifier[Y] + identifier[screen_res] . identifier[Y] ) | def get_cam_bounds(self):
"""Return the bounds of the camera in x, y, xMax, and yMax format."""
world_pos = self.get_world_pos()
screen_res = Ragnarok.get_world().get_backbuffer_size() * 0.5
return (self.pan.X - screen_res.X, self.pan.Y - screen_res.Y, self.pan.X + screen_res.X, self.pan.Y + screen_res.Y) |
def wrap(item, args=None, krgs=None, **kwargs):
"""Wraps the given item content between horizontal lines. Item can be a
string or a function.
**Examples**:
::
qprompt.wrap("Hi, this will be wrapped.") # String item.
qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item.
"""
with Wrap(**kwargs):
if callable(item):
args = args or []
krgs = krgs or {}
item(*args, **krgs)
else:
echo(item) | def function[wrap, parameter[item, args, krgs]]:
constant[Wraps the given item content between horizontal lines. Item can be a
string or a function.
**Examples**:
::
qprompt.wrap("Hi, this will be wrapped.") # String item.
qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item.
]
with call[name[Wrap], parameter[]] begin[:]
if call[name[callable], parameter[name[item]]] begin[:]
variable[args] assign[=] <ast.BoolOp object at 0x7da20c990340>
variable[krgs] assign[=] <ast.BoolOp object at 0x7da20c991e10>
call[name[item], parameter[<ast.Starred object at 0x7da20c993f10>]] | keyword[def] identifier[wrap] ( identifier[item] , identifier[args] = keyword[None] , identifier[krgs] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[Wrap] (** identifier[kwargs] ):
keyword[if] identifier[callable] ( identifier[item] ):
identifier[args] = identifier[args] keyword[or] []
identifier[krgs] = identifier[krgs] keyword[or] {}
identifier[item] (* identifier[args] ,** identifier[krgs] )
keyword[else] :
identifier[echo] ( identifier[item] ) | def wrap(item, args=None, krgs=None, **kwargs):
"""Wraps the given item content between horizontal lines. Item can be a
string or a function.
**Examples**:
::
qprompt.wrap("Hi, this will be wrapped.") # String item.
qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item.
"""
with Wrap(**kwargs):
if callable(item):
args = args or []
krgs = krgs or {}
item(*args, **krgs) # depends on [control=['if'], data=[]]
else:
echo(item) # depends on [control=['with'], data=[]] |
def saveVirtualOutputs(self,outdict):
""" Assign in-memory versions of generated products for this
``imageObject`` based on dictionary 'outdict'.
"""
if not self.inmemory:
return
for outname in outdict:
self.virtualOutputs[outname] = outdict[outname] | def function[saveVirtualOutputs, parameter[self, outdict]]:
constant[ Assign in-memory versions of generated products for this
``imageObject`` based on dictionary 'outdict'.
]
if <ast.UnaryOp object at 0x7da1b1b4b8e0> begin[:]
return[None]
for taget[name[outname]] in starred[name[outdict]] begin[:]
call[name[self].virtualOutputs][name[outname]] assign[=] call[name[outdict]][name[outname]] | keyword[def] identifier[saveVirtualOutputs] ( identifier[self] , identifier[outdict] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[inmemory] :
keyword[return]
keyword[for] identifier[outname] keyword[in] identifier[outdict] :
identifier[self] . identifier[virtualOutputs] [ identifier[outname] ]= identifier[outdict] [ identifier[outname] ] | def saveVirtualOutputs(self, outdict):
""" Assign in-memory versions of generated products for this
``imageObject`` based on dictionary 'outdict'.
"""
if not self.inmemory:
return # depends on [control=['if'], data=[]]
for outname in outdict:
self.virtualOutputs[outname] = outdict[outname] # depends on [control=['for'], data=['outname']] |
def get_jp2_bit_depth(stream):
"""Reads bit encoding depth of jpeg2000 file in binary stream format
:param stream: binary stream format
:type stream: Binary I/O (e.g. io.BytesIO, io.BufferedReader, ...)
:return: bit depth
:rtype: int
"""
stream.seek(0)
while True:
read_buffer = stream.read(8)
if len(read_buffer) < 8:
raise ValueError('Image Header Box not found in Jpeg2000 file')
_, box_id = struct.unpack('>I4s', read_buffer)
if box_id == b'ihdr':
read_buffer = stream.read(14)
params = struct.unpack('>IIHBBBB', read_buffer)
return (params[3] & 0x7f) + 1 | def function[get_jp2_bit_depth, parameter[stream]]:
constant[Reads bit encoding depth of jpeg2000 file in binary stream format
:param stream: binary stream format
:type stream: Binary I/O (e.g. io.BytesIO, io.BufferedReader, ...)
:return: bit depth
:rtype: int
]
call[name[stream].seek, parameter[constant[0]]]
while constant[True] begin[:]
variable[read_buffer] assign[=] call[name[stream].read, parameter[constant[8]]]
if compare[call[name[len], parameter[name[read_buffer]]] less[<] constant[8]] begin[:]
<ast.Raise object at 0x7da1b18b4040>
<ast.Tuple object at 0x7da1b18b4d00> assign[=] call[name[struct].unpack, parameter[constant[>I4s], name[read_buffer]]]
if compare[name[box_id] equal[==] constant[b'ihdr']] begin[:]
variable[read_buffer] assign[=] call[name[stream].read, parameter[constant[14]]]
variable[params] assign[=] call[name[struct].unpack, parameter[constant[>IIHBBBB], name[read_buffer]]]
return[binary_operation[binary_operation[call[name[params]][constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[127]] + constant[1]]] | keyword[def] identifier[get_jp2_bit_depth] ( identifier[stream] ):
literal[string]
identifier[stream] . identifier[seek] ( literal[int] )
keyword[while] keyword[True] :
identifier[read_buffer] = identifier[stream] . identifier[read] ( literal[int] )
keyword[if] identifier[len] ( identifier[read_buffer] )< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[_] , identifier[box_id] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[read_buffer] )
keyword[if] identifier[box_id] == literal[string] :
identifier[read_buffer] = identifier[stream] . identifier[read] ( literal[int] )
identifier[params] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[read_buffer] )
keyword[return] ( identifier[params] [ literal[int] ]& literal[int] )+ literal[int] | def get_jp2_bit_depth(stream):
"""Reads bit encoding depth of jpeg2000 file in binary stream format
:param stream: binary stream format
:type stream: Binary I/O (e.g. io.BytesIO, io.BufferedReader, ...)
:return: bit depth
:rtype: int
"""
stream.seek(0)
while True:
read_buffer = stream.read(8)
if len(read_buffer) < 8:
raise ValueError('Image Header Box not found in Jpeg2000 file') # depends on [control=['if'], data=[]]
(_, box_id) = struct.unpack('>I4s', read_buffer)
if box_id == b'ihdr':
read_buffer = stream.read(14)
params = struct.unpack('>IIHBBBB', read_buffer)
return (params[3] & 127) + 1 # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def append(self, exp):
"""
Args:
exp (Experience):
"""
if self._curr_size < self.max_size:
self._assign(self._curr_pos, exp)
self._curr_pos = (self._curr_pos + 1) % self.max_size
self._curr_size += 1
else:
self._assign(self._curr_pos, exp)
self._curr_pos = (self._curr_pos + 1) % self.max_size | def function[append, parameter[self, exp]]:
constant[
Args:
exp (Experience):
]
if compare[name[self]._curr_size less[<] name[self].max_size] begin[:]
call[name[self]._assign, parameter[name[self]._curr_pos, name[exp]]]
name[self]._curr_pos assign[=] binary_operation[binary_operation[name[self]._curr_pos + constant[1]] <ast.Mod object at 0x7da2590d6920> name[self].max_size]
<ast.AugAssign object at 0x7da20e9b14e0> | keyword[def] identifier[append] ( identifier[self] , identifier[exp] ):
literal[string]
keyword[if] identifier[self] . identifier[_curr_size] < identifier[self] . identifier[max_size] :
identifier[self] . identifier[_assign] ( identifier[self] . identifier[_curr_pos] , identifier[exp] )
identifier[self] . identifier[_curr_pos] =( identifier[self] . identifier[_curr_pos] + literal[int] )% identifier[self] . identifier[max_size]
identifier[self] . identifier[_curr_size] += literal[int]
keyword[else] :
identifier[self] . identifier[_assign] ( identifier[self] . identifier[_curr_pos] , identifier[exp] )
identifier[self] . identifier[_curr_pos] =( identifier[self] . identifier[_curr_pos] + literal[int] )% identifier[self] . identifier[max_size] | def append(self, exp):
"""
Args:
exp (Experience):
"""
if self._curr_size < self.max_size:
self._assign(self._curr_pos, exp)
self._curr_pos = (self._curr_pos + 1) % self.max_size
self._curr_size += 1 # depends on [control=['if'], data=[]]
else:
self._assign(self._curr_pos, exp)
self._curr_pos = (self._curr_pos + 1) % self.max_size |
def _fix(interval):
'''
Helper function for ``GenomeIntervalTree.from_bed and ``.from_table``.
Data tables may contain intervals with begin >= end. Such intervals lead to infinite recursions and
other unpleasant behaviour, so something has to be done about them. We 'fix' them by simply setting end = begin+1.
'''
if interval.begin >= interval.end:
warnings.warn("Interval with reversed coordinates (begin >= end) detected when reading data. Interval was automatically fixed to point interval [begin, begin+1).")
return Interval(interval.begin, interval.begin+1, interval.data)
else:
return interval | def function[_fix, parameter[interval]]:
constant[
Helper function for ``GenomeIntervalTree.from_bed and ``.from_table``.
Data tables may contain intervals with begin >= end. Such intervals lead to infinite recursions and
other unpleasant behaviour, so something has to be done about them. We 'fix' them by simply setting end = begin+1.
]
if compare[name[interval].begin greater_or_equal[>=] name[interval].end] begin[:]
call[name[warnings].warn, parameter[constant[Interval with reversed coordinates (begin >= end) detected when reading data. Interval was automatically fixed to point interval [begin, begin+1).]]]
return[call[name[Interval], parameter[name[interval].begin, binary_operation[name[interval].begin + constant[1]], name[interval].data]]] | keyword[def] identifier[_fix] ( identifier[interval] ):
literal[string]
keyword[if] identifier[interval] . identifier[begin] >= identifier[interval] . identifier[end] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[Interval] ( identifier[interval] . identifier[begin] , identifier[interval] . identifier[begin] + literal[int] , identifier[interval] . identifier[data] )
keyword[else] :
keyword[return] identifier[interval] | def _fix(interval):
"""
Helper function for ``GenomeIntervalTree.from_bed and ``.from_table``.
Data tables may contain intervals with begin >= end. Such intervals lead to infinite recursions and
other unpleasant behaviour, so something has to be done about them. We 'fix' them by simply setting end = begin+1.
"""
if interval.begin >= interval.end:
warnings.warn('Interval with reversed coordinates (begin >= end) detected when reading data. Interval was automatically fixed to point interval [begin, begin+1).')
return Interval(interval.begin, interval.begin + 1, interval.data) # depends on [control=['if'], data=[]]
else:
return interval |
def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = (list_name == 'required')
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict)
except ValueError:
if strict:
raise
self.policy_url = args.get('policy_url') | def function[parseExtensionArgs, parameter[self, args, strict]]:
constant[Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
]
for taget[name[list_name]] in starred[list[[<ast.Constant object at 0x7da18f722320>, <ast.Constant object at 0x7da18f721b70>]]] begin[:]
variable[required] assign[=] compare[name[list_name] equal[==] constant[required]]
variable[items] assign[=] call[name[args].get, parameter[name[list_name]]]
if name[items] begin[:]
for taget[name[field_name]] in starred[call[name[items].split, parameter[constant[,]]]] begin[:]
<ast.Try object at 0x7da18f723040>
name[self].policy_url assign[=] call[name[args].get, parameter[constant[policy_url]]] | keyword[def] identifier[parseExtensionArgs] ( identifier[self] , identifier[args] , identifier[strict] = keyword[False] ):
literal[string]
keyword[for] identifier[list_name] keyword[in] [ literal[string] , literal[string] ]:
identifier[required] =( identifier[list_name] == literal[string] )
identifier[items] = identifier[args] . identifier[get] ( identifier[list_name] )
keyword[if] identifier[items] :
keyword[for] identifier[field_name] keyword[in] identifier[items] . identifier[split] ( literal[string] ):
keyword[try] :
identifier[self] . identifier[requestField] ( identifier[field_name] , identifier[required] , identifier[strict] )
keyword[except] identifier[ValueError] :
keyword[if] identifier[strict] :
keyword[raise]
identifier[self] . identifier[policy_url] = identifier[args] . identifier[get] ( literal[string] ) | def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = list_name == 'required'
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict) # depends on [control=['try'], data=[]]
except ValueError:
if strict:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['field_name']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['list_name']]
self.policy_url = args.get('policy_url') |
def process_slice(self, b_rot90=None):
'''
Processes a single slice.
'''
if b_rot90:
self._Mnp_2Dslice = np.rot90(self._Mnp_2Dslice)
if self.func == 'invertIntensities':
self.invert_slice_intensities() | def function[process_slice, parameter[self, b_rot90]]:
constant[
Processes a single slice.
]
if name[b_rot90] begin[:]
name[self]._Mnp_2Dslice assign[=] call[name[np].rot90, parameter[name[self]._Mnp_2Dslice]]
if compare[name[self].func equal[==] constant[invertIntensities]] begin[:]
call[name[self].invert_slice_intensities, parameter[]] | keyword[def] identifier[process_slice] ( identifier[self] , identifier[b_rot90] = keyword[None] ):
literal[string]
keyword[if] identifier[b_rot90] :
identifier[self] . identifier[_Mnp_2Dslice] = identifier[np] . identifier[rot90] ( identifier[self] . identifier[_Mnp_2Dslice] )
keyword[if] identifier[self] . identifier[func] == literal[string] :
identifier[self] . identifier[invert_slice_intensities] () | def process_slice(self, b_rot90=None):
"""
Processes a single slice.
"""
if b_rot90:
self._Mnp_2Dslice = np.rot90(self._Mnp_2Dslice) # depends on [control=['if'], data=[]]
if self.func == 'invertIntensities':
self.invert_slice_intensities() # depends on [control=['if'], data=[]] |
def parse_raw_token(self, raw_token):
"""Parse token and secret from raw token response."""
if raw_token is None:
return (None, None, None)
# Load as json first then parse as query string
try:
token_data = json.loads(raw_token)
except ValueError:
qs = parse_qs(raw_token)
token = qs.get('access_token', [None])[0]
refresh_token = qs.get('refresh_token', [None])[0]
expires_at = qs.get(self.expires_in_key, [None])[0]
else:
token = token_data.get('access_token', None)
refresh_token = token_data.get('refresh_token', None)
expires_at = token_data.get(self.expires_in_key, None)
if expires_at:
expires_at = timezone.now() + timedelta(seconds=int(expires_at))
return (token, refresh_token, expires_at) | def function[parse_raw_token, parameter[self, raw_token]]:
constant[Parse token and secret from raw token response.]
if compare[name[raw_token] is constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b26524a0>, <ast.Constant object at 0x7da1b2652470>, <ast.Constant object at 0x7da1b2652440>]]]
<ast.Try object at 0x7da1b26523e0>
if name[expires_at] begin[:]
variable[expires_at] assign[=] binary_operation[call[name[timezone].now, parameter[]] + call[name[timedelta], parameter[]]]
return[tuple[[<ast.Name object at 0x7da1b2651c60>, <ast.Name object at 0x7da1b26527d0>, <ast.Name object at 0x7da1b2652800>]]] | keyword[def] identifier[parse_raw_token] ( identifier[self] , identifier[raw_token] ):
literal[string]
keyword[if] identifier[raw_token] keyword[is] keyword[None] :
keyword[return] ( keyword[None] , keyword[None] , keyword[None] )
keyword[try] :
identifier[token_data] = identifier[json] . identifier[loads] ( identifier[raw_token] )
keyword[except] identifier[ValueError] :
identifier[qs] = identifier[parse_qs] ( identifier[raw_token] )
identifier[token] = identifier[qs] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ]
identifier[refresh_token] = identifier[qs] . identifier[get] ( literal[string] ,[ keyword[None] ])[ literal[int] ]
identifier[expires_at] = identifier[qs] . identifier[get] ( identifier[self] . identifier[expires_in_key] ,[ keyword[None] ])[ literal[int] ]
keyword[else] :
identifier[token] = identifier[token_data] . identifier[get] ( literal[string] , keyword[None] )
identifier[refresh_token] = identifier[token_data] . identifier[get] ( literal[string] , keyword[None] )
identifier[expires_at] = identifier[token_data] . identifier[get] ( identifier[self] . identifier[expires_in_key] , keyword[None] )
keyword[if] identifier[expires_at] :
identifier[expires_at] = identifier[timezone] . identifier[now] ()+ identifier[timedelta] ( identifier[seconds] = identifier[int] ( identifier[expires_at] ))
keyword[return] ( identifier[token] , identifier[refresh_token] , identifier[expires_at] ) | def parse_raw_token(self, raw_token):
"""Parse token and secret from raw token response."""
if raw_token is None:
return (None, None, None) # depends on [control=['if'], data=[]]
# Load as json first then parse as query string
try:
token_data = json.loads(raw_token) # depends on [control=['try'], data=[]]
except ValueError:
qs = parse_qs(raw_token)
token = qs.get('access_token', [None])[0]
refresh_token = qs.get('refresh_token', [None])[0]
expires_at = qs.get(self.expires_in_key, [None])[0] # depends on [control=['except'], data=[]]
else:
token = token_data.get('access_token', None)
refresh_token = token_data.get('refresh_token', None)
expires_at = token_data.get(self.expires_in_key, None)
if expires_at:
expires_at = timezone.now() + timedelta(seconds=int(expires_at)) # depends on [control=['if'], data=[]]
return (token, refresh_token, expires_at) |
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
sort=None, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
verify_integrity=verify_integrity,
copy=copy, sort=sort)
return op.get_result() | def function[concat, parameter[objs, axis, join, join_axes, ignore_index, keys, levels, names, verify_integrity, sort, copy]]:
constant[
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
]
variable[op] assign[=] call[name[_Concatenator], parameter[name[objs]]]
return[call[name[op].get_result, parameter[]]] | keyword[def] identifier[concat] ( identifier[objs] , identifier[axis] = literal[int] , identifier[join] = literal[string] , identifier[join_axes] = keyword[None] , identifier[ignore_index] = keyword[False] ,
identifier[keys] = keyword[None] , identifier[levels] = keyword[None] , identifier[names] = keyword[None] , identifier[verify_integrity] = keyword[False] ,
identifier[sort] = keyword[None] , identifier[copy] = keyword[True] ):
literal[string]
identifier[op] = identifier[_Concatenator] ( identifier[objs] , identifier[axis] = identifier[axis] , identifier[join_axes] = identifier[join_axes] ,
identifier[ignore_index] = identifier[ignore_index] , identifier[join] = identifier[join] ,
identifier[keys] = identifier[keys] , identifier[levels] = identifier[levels] , identifier[names] = identifier[names] ,
identifier[verify_integrity] = identifier[verify_integrity] ,
identifier[copy] = identifier[copy] , identifier[sort] = identifier[sort] )
keyword[return] identifier[op] . identifier[get_result] () | def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=None, copy=True):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes.
Can also add a layer of hierarchical indexing on the concatenation axis,
which may be useful if the labels are the same (or overlapping) on
the passed axis number.
Parameters
----------
objs : a sequence or mapping of Series, DataFrame, or Panel objects
If a dict is passed, the sorted keys will be used as the `keys`
argument, unless it is passed, in which case the values will be
selected (see below). Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised.
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
join_axes : list of Index objects
Specific indexes to use for the other n - 1 axes instead of performing
inner/outer set logic.
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
keys : sequence, default None
If multiple levels passed, should contain tuples. Construct
hierarchical index using the passed keys as the outermost level.
levels : list of sequences, default None
Specific levels (unique values) to use for constructing a
MultiIndex. Otherwise they will be inferred from the keys.
names : list, default None
Names for the levels in the resulting hierarchical index.
verify_integrity : bool, default False
Check whether the new concatenated axis contains duplicates. This can
be very expensive relative to the actual data concatenation.
sort : bool, default None
Sort non-concatenation axis if it is not already aligned when `join`
is 'outer'. The current default of sorting is deprecated and will
change to not-sorting in a future version of pandas.
Explicitly pass ``sort=True`` to silence the warning and sort.
Explicitly pass ``sort=False`` to silence the warning and not sort.
This has no effect when ``join='inner'``, which already preserves
the order of the non-concatenation axis.
.. versionadded:: 0.23.0
copy : bool, default True
If False, do not copy data unnecessarily.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.append : Concatenate DataFrames.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Notes
-----
The keys, levels, and names arguments are all optional.
A walkthrough of how this method fits in with other tools for combining
pandas objects can be found `here
<http://pandas.pydata.org/pandas-docs/stable/merging.html>`__.
Examples
--------
Combine two ``Series``.
>>> s1 = pd.Series(['a', 'b'])
>>> s2 = pd.Series(['c', 'd'])
>>> pd.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> pd.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Add a hierarchical index at the outermost level of
the data with the ``keys`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'])
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Label the index keys you create with the ``names`` option.
>>> pd.concat([s1, s2], keys=['s1', 's2'],
... names=['Series name', 'Row ID'])
Series name Row ID
s1 0 a
1 b
s2 0 c
1 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> pd.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``NaN`` values.
>>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> pd.concat([df1, df3], sort=False)
letter number animal
0 a 1 NaN
1 b 2 NaN
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> pd.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the x axis by
passing in ``axis=1``.
>>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> pd.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
Prevent the result from including duplicate index values with the
``verify_integrity`` option.
>>> df5 = pd.DataFrame([1], index=['a'])
>>> df5
0
a 1
>>> df6 = pd.DataFrame([2], index=['a'])
>>> df6
0
a 2
>>> pd.concat([df5, df6], verify_integrity=True)
Traceback (most recent call last):
...
ValueError: Indexes have overlapping values: ['a']
"""
op = _Concatenator(objs, axis=axis, join_axes=join_axes, ignore_index=ignore_index, join=join, keys=keys, levels=levels, names=names, verify_integrity=verify_integrity, copy=copy, sort=sort)
return op.get_result() |
def inv(matrix):
'''
5 has way too many multiplies.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([a]).inv()
Matrix([[1/a]])
>> cse(Matrix([[a, b], [c, d]]).inv())
Matrix([
[1/a + b*c/(a**2*(d - b*c/a)), -b/(a*(d - b*c/a))],
[ -c/(a*(d - b*c/a)), 1/(d - b*c/a)]])
>> m_3 = Matrix([[a, b, c], [d, e, f], [g, h, i]])
>> #cse(m_3.inv())
>> m_4 = Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]])
>> cse(m_4.inv())
# Note: for 3, 4 - forgot to generate code using optimizations='basic'
'''
size = len(matrix)
if size == 1:
return [1.0/matrix[0]]
elif size == 2:
(a, b), (c, d) = matrix
x0 = 1.0/a
x1 = b*x0
x2 = 1.0/(d - c*x1)
x3 = c*x2
return [[x0 + b*x3*x0*x0, -x1*x2],
[-x0*x3, x2]]
elif size == 3:
(a, b, c), (d, e, f), (g, h, i) = matrix
x0 = 1./a
x1 = b*d
x2 = e - x0*x1
x3 = 1./x2
x4 = b*g
x5 = h - x0*x4
x6 = x0*x3
x7 = d*x6
x8 = -g*x0 + x5*x7
x9 = c*d
x10 = f - x0*x9
x11 = b*x6
x12 = c*x0 - x10*x11
x13 = a*e
x14 = -x1 + x13
x15 = 1./(-a*f*h - c*e*g + f*x4 + h*x9 - i*x1 + i*x13)
x16 = x14*x15
x17 = x12*x16
x18 = x14*x15*x3
x19 = x18*x5
x20 = x10*x18
return [[x0 - x17*x8 + x1*x3*x0*x0, -x11 + x12*x19, -x17],
[-x20*x8 - x7, x10*x16*x5*x2**-2 + x3, -x20],
[ x16*x8, -x19, x16]]
elif size == 4:
(a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p) = matrix
x0 = 1./a
x1 = b*e
x2 = f - x0*x1
x3 = 1./x2
x4 = i*x0
x5 = -b*x4 + j
x6 = x0*x3
x7 = e*x6
x8 = -x4 + x5*x7
x9 = c*x0
x10 = -e*x9 + g
x11 = b*x6
x12 = -x10*x11 + x9
x13 = a*f
x14 = -x1 + x13
x15 = k*x13
x16 = b*g*i
x17 = c*e*j
x18 = a*g*j
x19 = k*x1
x20 = c*f*i
x21 = x15 + x16 + x17 - x18 - x19 - x20
x22 = 1/x21
x23 = x14*x22
x24 = x12*x23
x25 = m*x0
x26 = -b*x25 + n
x27 = x26*x3
x28 = -m*x9 + o - x10*x27
x29 = x23*x8
x30 = -x25 + x26*x7 - x28*x29
x31 = d*x0
x32 = -e*x31 + h
x33 = x3*x32
x34 = -i*x31 + l - x33*x5
x35 = -x11*x32 - x24*x34 + x31
x36 = a*n
x37 = g*l
x38 = h*o
x39 = l*o
x40 = b*m
x41 = h*k
x42 = c*l
x43 = f*m
x44 = c*h
x45 = i*n
x46 = d*k
x47 = e*n
x48 = d*o
x49 = d*g
x50 = j*m
x51 = 1.0/(a*j*x38 - b*i*x38 - e*j*x48 + f*i*x48 + p*x15
+ p*x16 + p*x17 - p*x18 - p*x19 - p*x20 + x1*x39
- x13*x39 + x36*x37 - x36*x41 - x37*x40 + x40*x41
+ x42*x43 - x42*x47 - x43*x46 + x44*x45 - x44*x50
- x45*x49 + x46*x47 + x49*x50)
x52 = x21*x51
x53 = x35*x52
x54 = x14*x22*x3
x55 = x5*x54
x56 = -x27 + x28*x55
x57 = x52*x56
x58 = x14*x51
x59 = x28*x58
x60 = x10*x54
x61 = x33 - x34*x60
x62 = x52*x61
x63 = x34*x58
return [[x0 - x24*x8 - x30*x53 + x1*x3*x0*x0, -x11 + x12*x55 - x35*x57, -x24 + x35*x59, -x53],
[-x30*x62 - x60*x8 - x7, x10*x23*x5*x2**-2 + x3 - x56*x62, x59*x61 - x60, -x62],
[x29 - x30*x63, -x55 - x56*x63, x14**2*x22*x28*x34*x51 + x23, -x63],
[x30*x52, x57, -x59, x52]]
else:
# TODO algorithm?
import numpy as np
return np.linalg.inv(matrix) | def function[inv, parameter[matrix]]:
constant[
5 has way too many multiplies.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([a]).inv()
Matrix([[1/a]])
>> cse(Matrix([[a, b], [c, d]]).inv())
Matrix([
[1/a + b*c/(a**2*(d - b*c/a)), -b/(a*(d - b*c/a))],
[ -c/(a*(d - b*c/a)), 1/(d - b*c/a)]])
>> m_3 = Matrix([[a, b, c], [d, e, f], [g, h, i]])
>> #cse(m_3.inv())
>> m_4 = Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]])
>> cse(m_4.inv())
# Note: for 3, 4 - forgot to generate code using optimizations='basic'
]
variable[size] assign[=] call[name[len], parameter[name[matrix]]]
if compare[name[size] equal[==] constant[1]] begin[:]
return[list[[<ast.BinOp object at 0x7da1b12f26e0>]]] | keyword[def] identifier[inv] ( identifier[matrix] ):
literal[string]
identifier[size] = identifier[len] ( identifier[matrix] )
keyword[if] identifier[size] == literal[int] :
keyword[return] [ literal[int] / identifier[matrix] [ literal[int] ]]
keyword[elif] identifier[size] == literal[int] :
( identifier[a] , identifier[b] ),( identifier[c] , identifier[d] )= identifier[matrix]
identifier[x0] = literal[int] / identifier[a]
identifier[x1] = identifier[b] * identifier[x0]
identifier[x2] = literal[int] /( identifier[d] - identifier[c] * identifier[x1] )
identifier[x3] = identifier[c] * identifier[x2]
keyword[return] [[ identifier[x0] + identifier[b] * identifier[x3] * identifier[x0] * identifier[x0] ,- identifier[x1] * identifier[x2] ],
[- identifier[x0] * identifier[x3] , identifier[x2] ]]
keyword[elif] identifier[size] == literal[int] :
( identifier[a] , identifier[b] , identifier[c] ),( identifier[d] , identifier[e] , identifier[f] ),( identifier[g] , identifier[h] , identifier[i] )= identifier[matrix]
identifier[x0] = literal[int] / identifier[a]
identifier[x1] = identifier[b] * identifier[d]
identifier[x2] = identifier[e] - identifier[x0] * identifier[x1]
identifier[x3] = literal[int] / identifier[x2]
identifier[x4] = identifier[b] * identifier[g]
identifier[x5] = identifier[h] - identifier[x0] * identifier[x4]
identifier[x6] = identifier[x0] * identifier[x3]
identifier[x7] = identifier[d] * identifier[x6]
identifier[x8] =- identifier[g] * identifier[x0] + identifier[x5] * identifier[x7]
identifier[x9] = identifier[c] * identifier[d]
identifier[x10] = identifier[f] - identifier[x0] * identifier[x9]
identifier[x11] = identifier[b] * identifier[x6]
identifier[x12] = identifier[c] * identifier[x0] - identifier[x10] * identifier[x11]
identifier[x13] = identifier[a] * identifier[e]
identifier[x14] =- identifier[x1] + identifier[x13]
identifier[x15] = literal[int] /(- identifier[a] * identifier[f] * identifier[h] - identifier[c] * identifier[e] * identifier[g] + identifier[f] * identifier[x4] + identifier[h] * identifier[x9] - identifier[i] * identifier[x1] + identifier[i] * identifier[x13] )
identifier[x16] = identifier[x14] * identifier[x15]
identifier[x17] = identifier[x12] * identifier[x16]
identifier[x18] = identifier[x14] * identifier[x15] * identifier[x3]
identifier[x19] = identifier[x18] * identifier[x5]
identifier[x20] = identifier[x10] * identifier[x18]
keyword[return] [[ identifier[x0] - identifier[x17] * identifier[x8] + identifier[x1] * identifier[x3] * identifier[x0] * identifier[x0] ,- identifier[x11] + identifier[x12] * identifier[x19] ,- identifier[x17] ],
[- identifier[x20] * identifier[x8] - identifier[x7] , identifier[x10] * identifier[x16] * identifier[x5] * identifier[x2] **- literal[int] + identifier[x3] ,- identifier[x20] ],
[ identifier[x16] * identifier[x8] ,- identifier[x19] , identifier[x16] ]]
keyword[elif] identifier[size] == literal[int] :
( identifier[a] , identifier[b] , identifier[c] , identifier[d] ),( identifier[e] , identifier[f] , identifier[g] , identifier[h] ),( identifier[i] , identifier[j] , identifier[k] , identifier[l] ),( identifier[m] , identifier[n] , identifier[o] , identifier[p] )= identifier[matrix]
identifier[x0] = literal[int] / identifier[a]
identifier[x1] = identifier[b] * identifier[e]
identifier[x2] = identifier[f] - identifier[x0] * identifier[x1]
identifier[x3] = literal[int] / identifier[x2]
identifier[x4] = identifier[i] * identifier[x0]
identifier[x5] =- identifier[b] * identifier[x4] + identifier[j]
identifier[x6] = identifier[x0] * identifier[x3]
identifier[x7] = identifier[e] * identifier[x6]
identifier[x8] =- identifier[x4] + identifier[x5] * identifier[x7]
identifier[x9] = identifier[c] * identifier[x0]
identifier[x10] =- identifier[e] * identifier[x9] + identifier[g]
identifier[x11] = identifier[b] * identifier[x6]
identifier[x12] =- identifier[x10] * identifier[x11] + identifier[x9]
identifier[x13] = identifier[a] * identifier[f]
identifier[x14] =- identifier[x1] + identifier[x13]
identifier[x15] = identifier[k] * identifier[x13]
identifier[x16] = identifier[b] * identifier[g] * identifier[i]
identifier[x17] = identifier[c] * identifier[e] * identifier[j]
identifier[x18] = identifier[a] * identifier[g] * identifier[j]
identifier[x19] = identifier[k] * identifier[x1]
identifier[x20] = identifier[c] * identifier[f] * identifier[i]
identifier[x21] = identifier[x15] + identifier[x16] + identifier[x17] - identifier[x18] - identifier[x19] - identifier[x20]
identifier[x22] = literal[int] / identifier[x21]
identifier[x23] = identifier[x14] * identifier[x22]
identifier[x24] = identifier[x12] * identifier[x23]
identifier[x25] = identifier[m] * identifier[x0]
identifier[x26] =- identifier[b] * identifier[x25] + identifier[n]
identifier[x27] = identifier[x26] * identifier[x3]
identifier[x28] =- identifier[m] * identifier[x9] + identifier[o] - identifier[x10] * identifier[x27]
identifier[x29] = identifier[x23] * identifier[x8]
identifier[x30] =- identifier[x25] + identifier[x26] * identifier[x7] - identifier[x28] * identifier[x29]
identifier[x31] = identifier[d] * identifier[x0]
identifier[x32] =- identifier[e] * identifier[x31] + identifier[h]
identifier[x33] = identifier[x3] * identifier[x32]
identifier[x34] =- identifier[i] * identifier[x31] + identifier[l] - identifier[x33] * identifier[x5]
identifier[x35] =- identifier[x11] * identifier[x32] - identifier[x24] * identifier[x34] + identifier[x31]
identifier[x36] = identifier[a] * identifier[n]
identifier[x37] = identifier[g] * identifier[l]
identifier[x38] = identifier[h] * identifier[o]
identifier[x39] = identifier[l] * identifier[o]
identifier[x40] = identifier[b] * identifier[m]
identifier[x41] = identifier[h] * identifier[k]
identifier[x42] = identifier[c] * identifier[l]
identifier[x43] = identifier[f] * identifier[m]
identifier[x44] = identifier[c] * identifier[h]
identifier[x45] = identifier[i] * identifier[n]
identifier[x46] = identifier[d] * identifier[k]
identifier[x47] = identifier[e] * identifier[n]
identifier[x48] = identifier[d] * identifier[o]
identifier[x49] = identifier[d] * identifier[g]
identifier[x50] = identifier[j] * identifier[m]
identifier[x51] = literal[int] /( identifier[a] * identifier[j] * identifier[x38] - identifier[b] * identifier[i] * identifier[x38] - identifier[e] * identifier[j] * identifier[x48] + identifier[f] * identifier[i] * identifier[x48] + identifier[p] * identifier[x15]
+ identifier[p] * identifier[x16] + identifier[p] * identifier[x17] - identifier[p] * identifier[x18] - identifier[p] * identifier[x19] - identifier[p] * identifier[x20] + identifier[x1] * identifier[x39]
- identifier[x13] * identifier[x39] + identifier[x36] * identifier[x37] - identifier[x36] * identifier[x41] - identifier[x37] * identifier[x40] + identifier[x40] * identifier[x41]
+ identifier[x42] * identifier[x43] - identifier[x42] * identifier[x47] - identifier[x43] * identifier[x46] + identifier[x44] * identifier[x45] - identifier[x44] * identifier[x50]
- identifier[x45] * identifier[x49] + identifier[x46] * identifier[x47] + identifier[x49] * identifier[x50] )
identifier[x52] = identifier[x21] * identifier[x51]
identifier[x53] = identifier[x35] * identifier[x52]
identifier[x54] = identifier[x14] * identifier[x22] * identifier[x3]
identifier[x55] = identifier[x5] * identifier[x54]
identifier[x56] =- identifier[x27] + identifier[x28] * identifier[x55]
identifier[x57] = identifier[x52] * identifier[x56]
identifier[x58] = identifier[x14] * identifier[x51]
identifier[x59] = identifier[x28] * identifier[x58]
identifier[x60] = identifier[x10] * identifier[x54]
identifier[x61] = identifier[x33] - identifier[x34] * identifier[x60]
identifier[x62] = identifier[x52] * identifier[x61]
identifier[x63] = identifier[x34] * identifier[x58]
keyword[return] [[ identifier[x0] - identifier[x24] * identifier[x8] - identifier[x30] * identifier[x53] + identifier[x1] * identifier[x3] * identifier[x0] * identifier[x0] ,- identifier[x11] + identifier[x12] * identifier[x55] - identifier[x35] * identifier[x57] ,- identifier[x24] + identifier[x35] * identifier[x59] ,- identifier[x53] ],
[- identifier[x30] * identifier[x62] - identifier[x60] * identifier[x8] - identifier[x7] , identifier[x10] * identifier[x23] * identifier[x5] * identifier[x2] **- literal[int] + identifier[x3] - identifier[x56] * identifier[x62] , identifier[x59] * identifier[x61] - identifier[x60] ,- identifier[x62] ],
[ identifier[x29] - identifier[x30] * identifier[x63] ,- identifier[x55] - identifier[x56] * identifier[x63] , identifier[x14] ** literal[int] * identifier[x22] * identifier[x28] * identifier[x34] * identifier[x51] + identifier[x23] ,- identifier[x63] ],
[ identifier[x30] * identifier[x52] , identifier[x57] ,- identifier[x59] , identifier[x52] ]]
keyword[else] :
keyword[import] identifier[numpy] keyword[as] identifier[np]
keyword[return] identifier[np] . identifier[linalg] . identifier[inv] ( identifier[matrix] ) | def inv(matrix):
"""
5 has way too many multiplies.
>> from sympy import *
>> from sympy.abc import *
>> Matrix([a]).inv()
Matrix([[1/a]])
>> cse(Matrix([[a, b], [c, d]]).inv())
Matrix([
[1/a + b*c/(a**2*(d - b*c/a)), -b/(a*(d - b*c/a))],
[ -c/(a*(d - b*c/a)), 1/(d - b*c/a)]])
>> m_3 = Matrix([[a, b, c], [d, e, f], [g, h, i]])
>> #cse(m_3.inv())
>> m_4 = Matrix([[a, b, c, d], [e, f, g, h], [i, j, k, l], [m, n, o, p]])
>> cse(m_4.inv())
# Note: for 3, 4 - forgot to generate code using optimizations='basic'
"""
size = len(matrix)
if size == 1:
return [1.0 / matrix[0]] # depends on [control=['if'], data=[]]
elif size == 2:
((a, b), (c, d)) = matrix
x0 = 1.0 / a
x1 = b * x0
x2 = 1.0 / (d - c * x1)
x3 = c * x2
return [[x0 + b * x3 * x0 * x0, -x1 * x2], [-x0 * x3, x2]] # depends on [control=['if'], data=[]]
elif size == 3:
((a, b, c), (d, e, f), (g, h, i)) = matrix
x0 = 1.0 / a
x1 = b * d
x2 = e - x0 * x1
x3 = 1.0 / x2
x4 = b * g
x5 = h - x0 * x4
x6 = x0 * x3
x7 = d * x6
x8 = -g * x0 + x5 * x7
x9 = c * d
x10 = f - x0 * x9
x11 = b * x6
x12 = c * x0 - x10 * x11
x13 = a * e
x14 = -x1 + x13
x15 = 1.0 / (-a * f * h - c * e * g + f * x4 + h * x9 - i * x1 + i * x13)
x16 = x14 * x15
x17 = x12 * x16
x18 = x14 * x15 * x3
x19 = x18 * x5
x20 = x10 * x18
return [[x0 - x17 * x8 + x1 * x3 * x0 * x0, -x11 + x12 * x19, -x17], [-x20 * x8 - x7, x10 * x16 * x5 * x2 ** (-2) + x3, -x20], [x16 * x8, -x19, x16]] # depends on [control=['if'], data=[]]
elif size == 4:
((a, b, c, d), (e, f, g, h), (i, j, k, l), (m, n, o, p)) = matrix
x0 = 1.0 / a
x1 = b * e
x2 = f - x0 * x1
x3 = 1.0 / x2
x4 = i * x0
x5 = -b * x4 + j
x6 = x0 * x3
x7 = e * x6
x8 = -x4 + x5 * x7
x9 = c * x0
x10 = -e * x9 + g
x11 = b * x6
x12 = -x10 * x11 + x9
x13 = a * f
x14 = -x1 + x13
x15 = k * x13
x16 = b * g * i
x17 = c * e * j
x18 = a * g * j
x19 = k * x1
x20 = c * f * i
x21 = x15 + x16 + x17 - x18 - x19 - x20
x22 = 1 / x21
x23 = x14 * x22
x24 = x12 * x23
x25 = m * x0
x26 = -b * x25 + n
x27 = x26 * x3
x28 = -m * x9 + o - x10 * x27
x29 = x23 * x8
x30 = -x25 + x26 * x7 - x28 * x29
x31 = d * x0
x32 = -e * x31 + h
x33 = x3 * x32
x34 = -i * x31 + l - x33 * x5
x35 = -x11 * x32 - x24 * x34 + x31
x36 = a * n
x37 = g * l
x38 = h * o
x39 = l * o
x40 = b * m
x41 = h * k
x42 = c * l
x43 = f * m
x44 = c * h
x45 = i * n
x46 = d * k
x47 = e * n
x48 = d * o
x49 = d * g
x50 = j * m
x51 = 1.0 / (a * j * x38 - b * i * x38 - e * j * x48 + f * i * x48 + p * x15 + p * x16 + p * x17 - p * x18 - p * x19 - p * x20 + x1 * x39 - x13 * x39 + x36 * x37 - x36 * x41 - x37 * x40 + x40 * x41 + x42 * x43 - x42 * x47 - x43 * x46 + x44 * x45 - x44 * x50 - x45 * x49 + x46 * x47 + x49 * x50)
x52 = x21 * x51
x53 = x35 * x52
x54 = x14 * x22 * x3
x55 = x5 * x54
x56 = -x27 + x28 * x55
x57 = x52 * x56
x58 = x14 * x51
x59 = x28 * x58
x60 = x10 * x54
x61 = x33 - x34 * x60
x62 = x52 * x61
x63 = x34 * x58
return [[x0 - x24 * x8 - x30 * x53 + x1 * x3 * x0 * x0, -x11 + x12 * x55 - x35 * x57, -x24 + x35 * x59, -x53], [-x30 * x62 - x60 * x8 - x7, x10 * x23 * x5 * x2 ** (-2) + x3 - x56 * x62, x59 * x61 - x60, -x62], [x29 - x30 * x63, -x55 - x56 * x63, x14 ** 2 * x22 * x28 * x34 * x51 + x23, -x63], [x30 * x52, x57, -x59, x52]] # depends on [control=['if'], data=[]]
else:
# TODO algorithm?
import numpy as np
return np.linalg.inv(matrix) |
def glob(self, filename):
"""Returns a list of files that match the given pattern(s)."""
# Only support prefix with * at the end and no ? in the string
star_i = filename.find('*')
quest_i = filename.find('?')
if quest_i >= 0:
raise NotImplementedError(
"{} not supported by compat glob".format(filename))
if star_i != len(filename) - 1:
# Just return empty so we can use glob from directory watcher
#
# TODO: Remove and instead handle in GetLogdirSubdirectories.
# However, we would need to handle it for all non-local registered
# filesystems in some way.
return []
filename = filename[:-1]
client = boto3.client("s3")
bucket, path = self.bucket_and_path(filename)
p = client.get_paginator("list_objects")
keys = []
for r in p.paginate(Bucket=bucket, Prefix=path):
for o in r.get("Contents", []):
key = o["Key"][len(path):]
if key: # Skip the base dir, which would add an empty string
keys.append(filename + key)
return keys | def function[glob, parameter[self, filename]]:
constant[Returns a list of files that match the given pattern(s).]
variable[star_i] assign[=] call[name[filename].find, parameter[constant[*]]]
variable[quest_i] assign[=] call[name[filename].find, parameter[constant[?]]]
if compare[name[quest_i] greater_or_equal[>=] constant[0]] begin[:]
<ast.Raise object at 0x7da20e9b0730>
if compare[name[star_i] not_equal[!=] binary_operation[call[name[len], parameter[name[filename]]] - constant[1]]] begin[:]
return[list[[]]]
variable[filename] assign[=] call[name[filename]][<ast.Slice object at 0x7da20e9b05e0>]
variable[client] assign[=] call[name[boto3].client, parameter[constant[s3]]]
<ast.Tuple object at 0x7da20e9b07c0> assign[=] call[name[self].bucket_and_path, parameter[name[filename]]]
variable[p] assign[=] call[name[client].get_paginator, parameter[constant[list_objects]]]
variable[keys] assign[=] list[[]]
for taget[name[r]] in starred[call[name[p].paginate, parameter[]]] begin[:]
for taget[name[o]] in starred[call[name[r].get, parameter[constant[Contents], list[[]]]]] begin[:]
variable[key] assign[=] call[call[name[o]][constant[Key]]][<ast.Slice object at 0x7da20e9b38b0>]
if name[key] begin[:]
call[name[keys].append, parameter[binary_operation[name[filename] + name[key]]]]
return[name[keys]] | keyword[def] identifier[glob] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[star_i] = identifier[filename] . identifier[find] ( literal[string] )
identifier[quest_i] = identifier[filename] . identifier[find] ( literal[string] )
keyword[if] identifier[quest_i] >= literal[int] :
keyword[raise] identifier[NotImplementedError] (
literal[string] . identifier[format] ( identifier[filename] ))
keyword[if] identifier[star_i] != identifier[len] ( identifier[filename] )- literal[int] :
keyword[return] []
identifier[filename] = identifier[filename] [:- literal[int] ]
identifier[client] = identifier[boto3] . identifier[client] ( literal[string] )
identifier[bucket] , identifier[path] = identifier[self] . identifier[bucket_and_path] ( identifier[filename] )
identifier[p] = identifier[client] . identifier[get_paginator] ( literal[string] )
identifier[keys] =[]
keyword[for] identifier[r] keyword[in] identifier[p] . identifier[paginate] ( identifier[Bucket] = identifier[bucket] , identifier[Prefix] = identifier[path] ):
keyword[for] identifier[o] keyword[in] identifier[r] . identifier[get] ( literal[string] ,[]):
identifier[key] = identifier[o] [ literal[string] ][ identifier[len] ( identifier[path] ):]
keyword[if] identifier[key] :
identifier[keys] . identifier[append] ( identifier[filename] + identifier[key] )
keyword[return] identifier[keys] | def glob(self, filename):
"""Returns a list of files that match the given pattern(s)."""
# Only support prefix with * at the end and no ? in the string
star_i = filename.find('*')
quest_i = filename.find('?')
if quest_i >= 0:
raise NotImplementedError('{} not supported by compat glob'.format(filename)) # depends on [control=['if'], data=[]]
if star_i != len(filename) - 1:
# Just return empty so we can use glob from directory watcher
#
# TODO: Remove and instead handle in GetLogdirSubdirectories.
# However, we would need to handle it for all non-local registered
# filesystems in some way.
return [] # depends on [control=['if'], data=[]]
filename = filename[:-1]
client = boto3.client('s3')
(bucket, path) = self.bucket_and_path(filename)
p = client.get_paginator('list_objects')
keys = []
for r in p.paginate(Bucket=bucket, Prefix=path):
for o in r.get('Contents', []):
key = o['Key'][len(path):]
if key: # Skip the base dir, which would add an empty string
keys.append(filename + key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['o']] # depends on [control=['for'], data=['r']]
return keys |
def parse(cls, filename, root=None):
"""Parses the file at filename and returns a PythonFile.
If root is specified, it will open the file with root prepended to the path. The idea is to
allow for errors to contain a friendlier file path than the full absolute path.
"""
if root is not None:
if os.path.isabs(filename):
raise ValueError("filename must be a relative path if root is specified")
full_filename = os.path.join(root, filename)
else:
full_filename = filename
with io.open(full_filename, 'rb') as fp:
blob = fp.read()
tree = cls._parse(blob, filename)
return cls(blob=blob, tree=tree, root=root, filename=filename) | def function[parse, parameter[cls, filename, root]]:
constant[Parses the file at filename and returns a PythonFile.
If root is specified, it will open the file with root prepended to the path. The idea is to
allow for errors to contain a friendlier file path than the full absolute path.
]
if compare[name[root] is_not constant[None]] begin[:]
if call[name[os].path.isabs, parameter[name[filename]]] begin[:]
<ast.Raise object at 0x7da1b1e6bdc0>
variable[full_filename] assign[=] call[name[os].path.join, parameter[name[root], name[filename]]]
with call[name[io].open, parameter[name[full_filename], constant[rb]]] begin[:]
variable[blob] assign[=] call[name[fp].read, parameter[]]
variable[tree] assign[=] call[name[cls]._parse, parameter[name[blob], name[filename]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[parse] ( identifier[cls] , identifier[filename] , identifier[root] = keyword[None] ):
literal[string]
keyword[if] identifier[root] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[os] . identifier[path] . identifier[isabs] ( identifier[filename] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[full_filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[filename] )
keyword[else] :
identifier[full_filename] = identifier[filename]
keyword[with] identifier[io] . identifier[open] ( identifier[full_filename] , literal[string] ) keyword[as] identifier[fp] :
identifier[blob] = identifier[fp] . identifier[read] ()
identifier[tree] = identifier[cls] . identifier[_parse] ( identifier[blob] , identifier[filename] )
keyword[return] identifier[cls] ( identifier[blob] = identifier[blob] , identifier[tree] = identifier[tree] , identifier[root] = identifier[root] , identifier[filename] = identifier[filename] ) | def parse(cls, filename, root=None):
"""Parses the file at filename and returns a PythonFile.
If root is specified, it will open the file with root prepended to the path. The idea is to
allow for errors to contain a friendlier file path than the full absolute path.
"""
if root is not None:
if os.path.isabs(filename):
raise ValueError('filename must be a relative path if root is specified') # depends on [control=['if'], data=[]]
full_filename = os.path.join(root, filename) # depends on [control=['if'], data=['root']]
else:
full_filename = filename
with io.open(full_filename, 'rb') as fp:
blob = fp.read() # depends on [control=['with'], data=['fp']]
tree = cls._parse(blob, filename)
return cls(blob=blob, tree=tree, root=root, filename=filename) |
def do_package(self, line):
"""package <package-pid> <science-metadata-pid> <science-pid> [science- pid.
...] Create a simple OAI-ORE Resource Map on a Member Node.
"""
pids = self._split_args(line, 3, -1, pad=False)
self._command_processor.create_package(pids)
self._print_info_if_verbose(
'Added package create operation for identifier "{}" to write queue'.format(
pids[0]
)
) | def function[do_package, parameter[self, line]]:
constant[package <package-pid> <science-metadata-pid> <science-pid> [science- pid.
...] Create a simple OAI-ORE Resource Map on a Member Node.
]
variable[pids] assign[=] call[name[self]._split_args, parameter[name[line], constant[3], <ast.UnaryOp object at 0x7da1b1addd80>]]
call[name[self]._command_processor.create_package, parameter[name[pids]]]
call[name[self]._print_info_if_verbose, parameter[call[constant[Added package create operation for identifier "{}" to write queue].format, parameter[call[name[pids]][constant[0]]]]]] | keyword[def] identifier[do_package] ( identifier[self] , identifier[line] ):
literal[string]
identifier[pids] = identifier[self] . identifier[_split_args] ( identifier[line] , literal[int] ,- literal[int] , identifier[pad] = keyword[False] )
identifier[self] . identifier[_command_processor] . identifier[create_package] ( identifier[pids] )
identifier[self] . identifier[_print_info_if_verbose] (
literal[string] . identifier[format] (
identifier[pids] [ literal[int] ]
)
) | def do_package(self, line):
"""package <package-pid> <science-metadata-pid> <science-pid> [science- pid.
...] Create a simple OAI-ORE Resource Map on a Member Node.
"""
pids = self._split_args(line, 3, -1, pad=False)
self._command_processor.create_package(pids)
self._print_info_if_verbose('Added package create operation for identifier "{}" to write queue'.format(pids[0])) |
def infer_genome(genome_object_string_or_int):
"""
If given an integer, return associated human EnsemblRelease for that
Ensembl version.
If given a string, return latest EnsemblRelease which has a reference
of the same name.
If given a PyEnsembl Genome, simply return it.
"""
if isinstance(genome_object_string_or_int, Genome):
return genome_object_string_or_int
if is_integer(genome_object_string_or_int):
return cached_release(genome_object_string_or_int)
elif is_string(genome_object_string_or_int):
# first infer the canonical reference name, e.g. mapping hg19 -> GRCh37
# and then get the associated PyEnsembl Genome object
reference_name = infer_reference_name(genome_object_string_or_int)
return genome_for_reference_name(reference_name)
else:
raise TypeError(
("Expected genome to be an int, string, or pyensembl.Genome "
"instance, got %s : %s") % (
str(genome_object_string_or_int),
type(genome_object_string_or_int))) | def function[infer_genome, parameter[genome_object_string_or_int]]:
constant[
If given an integer, return associated human EnsemblRelease for that
Ensembl version.
If given a string, return latest EnsemblRelease which has a reference
of the same name.
If given a PyEnsembl Genome, simply return it.
]
if call[name[isinstance], parameter[name[genome_object_string_or_int], name[Genome]]] begin[:]
return[name[genome_object_string_or_int]]
if call[name[is_integer], parameter[name[genome_object_string_or_int]]] begin[:]
return[call[name[cached_release], parameter[name[genome_object_string_or_int]]]] | keyword[def] identifier[infer_genome] ( identifier[genome_object_string_or_int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[genome_object_string_or_int] , identifier[Genome] ):
keyword[return] identifier[genome_object_string_or_int]
keyword[if] identifier[is_integer] ( identifier[genome_object_string_or_int] ):
keyword[return] identifier[cached_release] ( identifier[genome_object_string_or_int] )
keyword[elif] identifier[is_string] ( identifier[genome_object_string_or_int] ):
identifier[reference_name] = identifier[infer_reference_name] ( identifier[genome_object_string_or_int] )
keyword[return] identifier[genome_for_reference_name] ( identifier[reference_name] )
keyword[else] :
keyword[raise] identifier[TypeError] (
( literal[string]
literal[string] )%(
identifier[str] ( identifier[genome_object_string_or_int] ),
identifier[type] ( identifier[genome_object_string_or_int] ))) | def infer_genome(genome_object_string_or_int):
"""
If given an integer, return associated human EnsemblRelease for that
Ensembl version.
If given a string, return latest EnsemblRelease which has a reference
of the same name.
If given a PyEnsembl Genome, simply return it.
"""
if isinstance(genome_object_string_or_int, Genome):
return genome_object_string_or_int # depends on [control=['if'], data=[]]
if is_integer(genome_object_string_or_int):
return cached_release(genome_object_string_or_int) # depends on [control=['if'], data=[]]
elif is_string(genome_object_string_or_int):
# first infer the canonical reference name, e.g. mapping hg19 -> GRCh37
# and then get the associated PyEnsembl Genome object
reference_name = infer_reference_name(genome_object_string_or_int)
return genome_for_reference_name(reference_name) # depends on [control=['if'], data=[]]
else:
raise TypeError('Expected genome to be an int, string, or pyensembl.Genome instance, got %s : %s' % (str(genome_object_string_or_int), type(genome_object_string_or_int))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.