code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _kernel(kernel_spec): """Expands the kernel spec into a length 2 list. Args: kernel_spec: An integer or a length 1 or 2 sequence that is expanded to a list. Returns: A length 2 list. """ if isinstance(kernel_spec, tf.compat.integral_types): return [kernel_spec, kernel_spec] elif len(kernel_spec) == 1: return [kernel_spec[0], kernel_spec[0]] else: assert len(kernel_spec) == 2 return kernel_spec
def function[_kernel, parameter[kernel_spec]]: constant[Expands the kernel spec into a length 2 list. Args: kernel_spec: An integer or a length 1 or 2 sequence that is expanded to a list. Returns: A length 2 list. ] if call[name[isinstance], parameter[name[kernel_spec], name[tf].compat.integral_types]] begin[:] return[list[[<ast.Name object at 0x7da20c993490>, <ast.Name object at 0x7da20c993fa0>]]]
keyword[def] identifier[_kernel] ( identifier[kernel_spec] ): literal[string] keyword[if] identifier[isinstance] ( identifier[kernel_spec] , identifier[tf] . identifier[compat] . identifier[integral_types] ): keyword[return] [ identifier[kernel_spec] , identifier[kernel_spec] ] keyword[elif] identifier[len] ( identifier[kernel_spec] )== literal[int] : keyword[return] [ identifier[kernel_spec] [ literal[int] ], identifier[kernel_spec] [ literal[int] ]] keyword[else] : keyword[assert] identifier[len] ( identifier[kernel_spec] )== literal[int] keyword[return] identifier[kernel_spec]
def _kernel(kernel_spec): """Expands the kernel spec into a length 2 list. Args: kernel_spec: An integer or a length 1 or 2 sequence that is expanded to a list. Returns: A length 2 list. """ if isinstance(kernel_spec, tf.compat.integral_types): return [kernel_spec, kernel_spec] # depends on [control=['if'], data=[]] elif len(kernel_spec) == 1: return [kernel_spec[0], kernel_spec[0]] # depends on [control=['if'], data=[]] else: assert len(kernel_spec) == 2 return kernel_spec
def haurwitz(apparent_zenith): ''' Determine clear sky GHI using the Haurwitz model. Implements the Haurwitz clear sky model for global horizontal irradiance (GHI) as presented in [1, 2]. A report on clear sky models found the Haurwitz model to have the best performance in terms of average monthly error among models which require only zenith angle [3]. Parameters ---------- apparent_zenith : Series The apparent (refraction corrected) sun zenith angle in degrees. Returns ------- ghi : DataFrame The modeled global horizonal irradiance in W/m^2 provided by the Haurwitz clear-sky model. References ---------- [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of Meteorology, vol. 3, pp. 123-124, 1946. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. ''' cos_zenith = tools.cosd(apparent_zenith.values) clearsky_ghi = np.zeros_like(apparent_zenith.values) cos_zen_gte_0 = cos_zenith > 0 clearsky_ghi[cos_zen_gte_0] = (1098.0 * cos_zenith[cos_zen_gte_0] * np.exp(-0.059/cos_zenith[cos_zen_gte_0])) df_out = pd.DataFrame(index=apparent_zenith.index, data=clearsky_ghi, columns=['ghi']) return df_out
def function[haurwitz, parameter[apparent_zenith]]: constant[ Determine clear sky GHI using the Haurwitz model. Implements the Haurwitz clear sky model for global horizontal irradiance (GHI) as presented in [1, 2]. A report on clear sky models found the Haurwitz model to have the best performance in terms of average monthly error among models which require only zenith angle [3]. Parameters ---------- apparent_zenith : Series The apparent (refraction corrected) sun zenith angle in degrees. Returns ------- ghi : DataFrame The modeled global horizonal irradiance in W/m^2 provided by the Haurwitz clear-sky model. References ---------- [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of Meteorology, vol. 3, pp. 123-124, 1946. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. ] variable[cos_zenith] assign[=] call[name[tools].cosd, parameter[name[apparent_zenith].values]] variable[clearsky_ghi] assign[=] call[name[np].zeros_like, parameter[name[apparent_zenith].values]] variable[cos_zen_gte_0] assign[=] compare[name[cos_zenith] greater[>] constant[0]] call[name[clearsky_ghi]][name[cos_zen_gte_0]] assign[=] binary_operation[binary_operation[constant[1098.0] * call[name[cos_zenith]][name[cos_zen_gte_0]]] * call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b1aa5240> / call[name[cos_zenith]][name[cos_zen_gte_0]]]]]] variable[df_out] assign[=] call[name[pd].DataFrame, parameter[]] return[name[df_out]]
keyword[def] identifier[haurwitz] ( identifier[apparent_zenith] ): literal[string] identifier[cos_zenith] = identifier[tools] . identifier[cosd] ( identifier[apparent_zenith] . identifier[values] ) identifier[clearsky_ghi] = identifier[np] . identifier[zeros_like] ( identifier[apparent_zenith] . identifier[values] ) identifier[cos_zen_gte_0] = identifier[cos_zenith] > literal[int] identifier[clearsky_ghi] [ identifier[cos_zen_gte_0] ]=( literal[int] * identifier[cos_zenith] [ identifier[cos_zen_gte_0] ]* identifier[np] . identifier[exp] (- literal[int] / identifier[cos_zenith] [ identifier[cos_zen_gte_0] ])) identifier[df_out] = identifier[pd] . identifier[DataFrame] ( identifier[index] = identifier[apparent_zenith] . identifier[index] , identifier[data] = identifier[clearsky_ghi] , identifier[columns] =[ literal[string] ]) keyword[return] identifier[df_out]
def haurwitz(apparent_zenith): """ Determine clear sky GHI using the Haurwitz model. Implements the Haurwitz clear sky model for global horizontal irradiance (GHI) as presented in [1, 2]. A report on clear sky models found the Haurwitz model to have the best performance in terms of average monthly error among models which require only zenith angle [3]. Parameters ---------- apparent_zenith : Series The apparent (refraction corrected) sun zenith angle in degrees. Returns ------- ghi : DataFrame The modeled global horizonal irradiance in W/m^2 provided by the Haurwitz clear-sky model. References ---------- [1] B. Haurwitz, "Insolation in Relation to Cloudiness and Cloud Density," Journal of Meteorology, vol. 2, pp. 154-166, 1945. [2] B. Haurwitz, "Insolation in Relation to Cloud Type," Journal of Meteorology, vol. 3, pp. 123-124, 1946. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. """ cos_zenith = tools.cosd(apparent_zenith.values) clearsky_ghi = np.zeros_like(apparent_zenith.values) cos_zen_gte_0 = cos_zenith > 0 clearsky_ghi[cos_zen_gte_0] = 1098.0 * cos_zenith[cos_zen_gte_0] * np.exp(-0.059 / cos_zenith[cos_zen_gte_0]) df_out = pd.DataFrame(index=apparent_zenith.index, data=clearsky_ghi, columns=['ghi']) return df_out
def reverse_whois(self, query, exclude=[], scope='current', mode=None, **kwargs): """List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ). """ return self._results('reverse-whois', '/v1/reverse-whois', terms=delimited(query), exclude=delimited(exclude), scope=scope, mode=mode, **kwargs)
def function[reverse_whois, parameter[self, query, exclude, scope, mode]]: constant[List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ). ] return[call[name[self]._results, parameter[constant[reverse-whois], constant[/v1/reverse-whois]]]]
keyword[def] identifier[reverse_whois] ( identifier[self] , identifier[query] , identifier[exclude] =[], identifier[scope] = literal[string] , identifier[mode] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[_results] ( literal[string] , literal[string] , identifier[terms] = identifier[delimited] ( identifier[query] ), identifier[exclude] = identifier[delimited] ( identifier[exclude] ), identifier[scope] = identifier[scope] , identifier[mode] = identifier[mode] ,** identifier[kwargs] )
def reverse_whois(self, query, exclude=[], scope='current', mode=None, **kwargs): """List of one or more terms to search for in the Whois record, as a Python list or separated with the pipe character ( | ). """ return self._results('reverse-whois', '/v1/reverse-whois', terms=delimited(query), exclude=delimited(exclude), scope=scope, mode=mode, **kwargs)
def loads(string): """ Deserializes Java objects and primitive data serialized by ObjectOutputStream from a string. """ f = StringIO.StringIO(string) marshaller = JavaObjectUnmarshaller(f) marshaller.add_transformer(DefaultObjectTransformer()) return marshaller.readObject()
def function[loads, parameter[string]]: constant[ Deserializes Java objects and primitive data serialized by ObjectOutputStream from a string. ] variable[f] assign[=] call[name[StringIO].StringIO, parameter[name[string]]] variable[marshaller] assign[=] call[name[JavaObjectUnmarshaller], parameter[name[f]]] call[name[marshaller].add_transformer, parameter[call[name[DefaultObjectTransformer], parameter[]]]] return[call[name[marshaller].readObject, parameter[]]]
keyword[def] identifier[loads] ( identifier[string] ): literal[string] identifier[f] = identifier[StringIO] . identifier[StringIO] ( identifier[string] ) identifier[marshaller] = identifier[JavaObjectUnmarshaller] ( identifier[f] ) identifier[marshaller] . identifier[add_transformer] ( identifier[DefaultObjectTransformer] ()) keyword[return] identifier[marshaller] . identifier[readObject] ()
def loads(string): """ Deserializes Java objects and primitive data serialized by ObjectOutputStream from a string. """ f = StringIO.StringIO(string) marshaller = JavaObjectUnmarshaller(f) marshaller.add_transformer(DefaultObjectTransformer()) return marshaller.readObject()
def get_frequency_grid(times, samplesperpeak=5, nyquistfactor=5, minfreq=None, maxfreq=None, returnf0dfnf=False): '''This calculates a frequency grid for the period finding functions in this module. Based on the autofrequency function in astropy.stats.lombscargle. http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency ''' baseline = times.max() - times.min() nsamples = times.size df = 1. / baseline / samplesperpeak if minfreq is not None: f0 = minfreq else: f0 = 0.5 * df if maxfreq is not None: Nf = int(npceil((maxfreq - f0) / df)) else: Nf = int(0.5 * samplesperpeak * nyquistfactor * nsamples) if returnf0dfnf: return f0, df, Nf, f0 + df * nparange(Nf) else: return f0 + df * nparange(Nf)
def function[get_frequency_grid, parameter[times, samplesperpeak, nyquistfactor, minfreq, maxfreq, returnf0dfnf]]: constant[This calculates a frequency grid for the period finding functions in this module. Based on the autofrequency function in astropy.stats.lombscargle. http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency ] variable[baseline] assign[=] binary_operation[call[name[times].max, parameter[]] - call[name[times].min, parameter[]]] variable[nsamples] assign[=] name[times].size variable[df] assign[=] binary_operation[binary_operation[constant[1.0] / name[baseline]] / name[samplesperpeak]] if compare[name[minfreq] is_not constant[None]] begin[:] variable[f0] assign[=] name[minfreq] if compare[name[maxfreq] is_not constant[None]] begin[:] variable[Nf] assign[=] call[name[int], parameter[call[name[npceil], parameter[binary_operation[binary_operation[name[maxfreq] - name[f0]] / name[df]]]]]] if name[returnf0dfnf] begin[:] return[tuple[[<ast.Name object at 0x7da20c992290>, <ast.Name object at 0x7da20c992ce0>, <ast.Name object at 0x7da20c992110>, <ast.BinOp object at 0x7da20c9904f0>]]]
keyword[def] identifier[get_frequency_grid] ( identifier[times] , identifier[samplesperpeak] = literal[int] , identifier[nyquistfactor] = literal[int] , identifier[minfreq] = keyword[None] , identifier[maxfreq] = keyword[None] , identifier[returnf0dfnf] = keyword[False] ): literal[string] identifier[baseline] = identifier[times] . identifier[max] ()- identifier[times] . identifier[min] () identifier[nsamples] = identifier[times] . identifier[size] identifier[df] = literal[int] / identifier[baseline] / identifier[samplesperpeak] keyword[if] identifier[minfreq] keyword[is] keyword[not] keyword[None] : identifier[f0] = identifier[minfreq] keyword[else] : identifier[f0] = literal[int] * identifier[df] keyword[if] identifier[maxfreq] keyword[is] keyword[not] keyword[None] : identifier[Nf] = identifier[int] ( identifier[npceil] (( identifier[maxfreq] - identifier[f0] )/ identifier[df] )) keyword[else] : identifier[Nf] = identifier[int] ( literal[int] * identifier[samplesperpeak] * identifier[nyquistfactor] * identifier[nsamples] ) keyword[if] identifier[returnf0dfnf] : keyword[return] identifier[f0] , identifier[df] , identifier[Nf] , identifier[f0] + identifier[df] * identifier[nparange] ( identifier[Nf] ) keyword[else] : keyword[return] identifier[f0] + identifier[df] * identifier[nparange] ( identifier[Nf] )
def get_frequency_grid(times, samplesperpeak=5, nyquistfactor=5, minfreq=None, maxfreq=None, returnf0dfnf=False): """This calculates a frequency grid for the period finding functions in this module. Based on the autofrequency function in astropy.stats.lombscargle. http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency """ baseline = times.max() - times.min() nsamples = times.size df = 1.0 / baseline / samplesperpeak if minfreq is not None: f0 = minfreq # depends on [control=['if'], data=['minfreq']] else: f0 = 0.5 * df if maxfreq is not None: Nf = int(npceil((maxfreq - f0) / df)) # depends on [control=['if'], data=['maxfreq']] else: Nf = int(0.5 * samplesperpeak * nyquistfactor * nsamples) if returnf0dfnf: return (f0, df, Nf, f0 + df * nparange(Nf)) # depends on [control=['if'], data=[]] else: return f0 + df * nparange(Nf)
def remove_file_data(file_id, silent=True): """Remove file instance and associated data. :param file_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param silent: It stops propagation of a possible arised IntegrityError exception. (Default: ``True``) :raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes wrong and silent is set to ``False``. """ try: # First remove FileInstance from database and commit transaction to # ensure integrity constraints are checked and enforced. f = FileInstance.get(file_id) if not f.writable: return f.delete() db.session.commit() # Next, remove the file on disk. This leaves the possibility of having # a file on disk dangling in case the database removal works, and the # disk file removal doesn't work. f.storage().delete() except IntegrityError: if not silent: raise
def function[remove_file_data, parameter[file_id, silent]]: constant[Remove file instance and associated data. :param file_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param silent: It stops propagation of a possible arised IntegrityError exception. (Default: ``True``) :raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes wrong and silent is set to ``False``. ] <ast.Try object at 0x7da18f58c250>
keyword[def] identifier[remove_file_data] ( identifier[file_id] , identifier[silent] = keyword[True] ): literal[string] keyword[try] : identifier[f] = identifier[FileInstance] . identifier[get] ( identifier[file_id] ) keyword[if] keyword[not] identifier[f] . identifier[writable] : keyword[return] identifier[f] . identifier[delete] () identifier[db] . identifier[session] . identifier[commit] () identifier[f] . identifier[storage] (). identifier[delete] () keyword[except] identifier[IntegrityError] : keyword[if] keyword[not] identifier[silent] : keyword[raise]
def remove_file_data(file_id, silent=True): """Remove file instance and associated data. :param file_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param silent: It stops propagation of a possible arised IntegrityError exception. (Default: ``True``) :raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes wrong and silent is set to ``False``. """ try: # First remove FileInstance from database and commit transaction to # ensure integrity constraints are checked and enforced. f = FileInstance.get(file_id) if not f.writable: return # depends on [control=['if'], data=[]] f.delete() db.session.commit() # Next, remove the file on disk. This leaves the possibility of having # a file on disk dangling in case the database removal works, and the # disk file removal doesn't work. f.storage().delete() # depends on [control=['try'], data=[]] except IntegrityError: if not silent: raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]]
def add_lvl_to_ui(self, level, header): """Insert the level and header into the ui. :param level: a newly created level :type level: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel` :param header: a newly created header :type header: QtCore.QWidget|None :returns: None :rtype: None :raises: None """ lay = self.layout() rc = lay.rowCount() lay.addWidget(level, rc+1, 1) if header is not None: lay.addWidget(header, rc+1, 0) lay.setColumnStretch(1,1)
def function[add_lvl_to_ui, parameter[self, level, header]]: constant[Insert the level and header into the ui. :param level: a newly created level :type level: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel` :param header: a newly created header :type header: QtCore.QWidget|None :returns: None :rtype: None :raises: None ] variable[lay] assign[=] call[name[self].layout, parameter[]] variable[rc] assign[=] call[name[lay].rowCount, parameter[]] call[name[lay].addWidget, parameter[name[level], binary_operation[name[rc] + constant[1]], constant[1]]] if compare[name[header] is_not constant[None]] begin[:] call[name[lay].addWidget, parameter[name[header], binary_operation[name[rc] + constant[1]], constant[0]]] call[name[lay].setColumnStretch, parameter[constant[1], constant[1]]]
keyword[def] identifier[add_lvl_to_ui] ( identifier[self] , identifier[level] , identifier[header] ): literal[string] identifier[lay] = identifier[self] . identifier[layout] () identifier[rc] = identifier[lay] . identifier[rowCount] () identifier[lay] . identifier[addWidget] ( identifier[level] , identifier[rc] + literal[int] , literal[int] ) keyword[if] identifier[header] keyword[is] keyword[not] keyword[None] : identifier[lay] . identifier[addWidget] ( identifier[header] , identifier[rc] + literal[int] , literal[int] ) identifier[lay] . identifier[setColumnStretch] ( literal[int] , literal[int] )
def add_lvl_to_ui(self, level, header): """Insert the level and header into the ui. :param level: a newly created level :type level: :class:`jukeboxcore.gui.widgets.browser.AbstractLevel` :param header: a newly created header :type header: QtCore.QWidget|None :returns: None :rtype: None :raises: None """ lay = self.layout() rc = lay.rowCount() lay.addWidget(level, rc + 1, 1) if header is not None: lay.addWidget(header, rc + 1, 0) # depends on [control=['if'], data=['header']] lay.setColumnStretch(1, 1)
def load_healthchecks(self): """ Loads healthchecks. """ self.load_default_healthchecks() if getattr(settings, 'AUTODISCOVER_HEALTHCHECKS', True): self.autodiscover_healthchecks() self._registry_loaded = True
def function[load_healthchecks, parameter[self]]: constant[ Loads healthchecks. ] call[name[self].load_default_healthchecks, parameter[]] if call[name[getattr], parameter[name[settings], constant[AUTODISCOVER_HEALTHCHECKS], constant[True]]] begin[:] call[name[self].autodiscover_healthchecks, parameter[]] name[self]._registry_loaded assign[=] constant[True]
keyword[def] identifier[load_healthchecks] ( identifier[self] ): literal[string] identifier[self] . identifier[load_default_healthchecks] () keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[True] ): identifier[self] . identifier[autodiscover_healthchecks] () identifier[self] . identifier[_registry_loaded] = keyword[True]
def load_healthchecks(self): """ Loads healthchecks. """ self.load_default_healthchecks() if getattr(settings, 'AUTODISCOVER_HEALTHCHECKS', True): self.autodiscover_healthchecks() # depends on [control=['if'], data=[]] self._registry_loaded = True
def is_valid_bucket_notification_config(notifications): """ Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise. """ # check if notifications is a dict. if not isinstance(notifications, dict): raise TypeError('notifications configuration must be a dictionary') if len(notifications) == 0: raise InvalidArgumentError( 'notifications configuration may not be empty' ) VALID_NOTIFICATION_KEYS = set([ "TopicConfigurations", "QueueConfigurations", "CloudFunctionConfigurations", ]) VALID_SERVICE_CONFIG_KEYS = set([ 'Id', 'Arn', 'Events', 'Filter', ]) NOTIFICATION_EVENTS = set([ 's3:ReducedRedundancyLostObject', 's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated', ]) for key, value in notifications.items(): # check if key names are valid if key not in VALID_NOTIFICATION_KEYS: raise InvalidArgumentError(( '{} is an invalid key ' 'for notifications configuration').format(key)) # check if config values conform # first check if value is a list if not isinstance(value, list): raise InvalidArgumentError(( 'The value for key "{}" in the notifications ' 'configuration must be a list.').format(key)) for service_config in value: # check type matches if not isinstance(service_config, dict): raise InvalidArgumentError(( 'Each service configuration item for "{}" must be a ' 'dictionary').format(key)) # check keys are valid for skey in service_config.keys(): if skey not in VALID_SERVICE_CONFIG_KEYS: raise InvalidArgumentError(( '{} is an invalid key for a service ' 'configuration item').format(skey)) # check for required keys arn = service_config.get('Arn', '') if arn == '': raise InvalidArgumentError( 'Arn key in service config must be present and has to be ' 'non-empty string' ) events = service_config.get('Events', []) if len(events) < 1: raise InvalidArgumentError( 'At least one event must be specified in a service config' ) if not isinstance(events, list): raise InvalidArgumentError('"Events" must be a list of strings ' 'in a service configuration') # check if 'Id' key is present, it should be string or bytes. if not isinstance(service_config.get('Id', ''), basestring): raise InvalidArgumentError('"Id" key must be a string') for event in events: if event not in NOTIFICATION_EVENTS: raise InvalidArgumentError( '{} is not a valid event. Valid ' 'events are: {}'.format(event, NOTIFICATION_EVENTS)) if 'Filter' in service_config: exception_msg = ( '{} - If a Filter key is given, it must be a ' 'dictionary, the dictionary must have the ' 'key "Key", and its value must be an object, with ' 'a key named "FilterRules" which must be a non-empty list.' ).format( service_config['Filter'] ) try: filter_rules = service_config.get('Filter', {}).get( 'Key', {}).get('FilterRules', []) if not isinstance(filter_rules, list): raise InvalidArgumentError(exception_msg) if len(filter_rules) < 1: raise InvalidArgumentError(exception_msg) except AttributeError: raise InvalidArgumentError(exception_msg) for filter_rule in filter_rules: try: name = filter_rule['Name'] value = filter_rule['Value'] except KeyError: raise InvalidArgumentError(( '{} - a FilterRule dictionary must have "Name" ' 'and "Value" keys').format(filter_rule)) if name not in ['prefix', 'suffix']: raise InvalidArgumentError(( '{} - The "Name" key in a filter rule must be ' 'either "prefix" or "suffix"').format(name)) return True
def function[is_valid_bucket_notification_config, parameter[notifications]]: constant[ Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise. ] if <ast.UnaryOp object at 0x7da1b1ece3e0> begin[:] <ast.Raise object at 0x7da1b1ecd000> if compare[call[name[len], parameter[name[notifications]]] equal[==] constant[0]] begin[:] <ast.Raise object at 0x7da1b1ecc160> variable[VALID_NOTIFICATION_KEYS] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b1ece320>, <ast.Constant object at 0x7da1b1ece350>, <ast.Constant object at 0x7da1b1ecda80>]]]] variable[VALID_SERVICE_CONFIG_KEYS] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b1ecd7b0>, <ast.Constant object at 0x7da1b1ece170>, <ast.Constant object at 0x7da1b1ecc670>, <ast.Constant object at 0x7da1b1eccd90>]]]] variable[NOTIFICATION_EVENTS] assign[=] call[name[set], parameter[list[[<ast.Constant object at 0x7da1b1ecd510>, <ast.Constant object at 0x7da1b1ecebf0>, <ast.Constant object at 0x7da1b1ecc1c0>, <ast.Constant object at 0x7da1b1ecf0d0>, <ast.Constant object at 0x7da1b1eccca0>, <ast.Constant object at 0x7da1b1ece470>, <ast.Constant object at 0x7da1b1ecdea0>, <ast.Constant object at 0x7da1b1ece530>, <ast.Constant object at 0x7da1b1ece740>]]]] for taget[tuple[[<ast.Name object at 0x7da1b1ecdbd0>, <ast.Name object at 0x7da1b1ecc6d0>]]] in starred[call[name[notifications].items, parameter[]]] begin[:] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[VALID_NOTIFICATION_KEYS]] begin[:] <ast.Raise object at 0x7da1b1ecd180> if <ast.UnaryOp object at 0x7da1b1ecc070> begin[:] <ast.Raise object at 0x7da1b1eceda0> for taget[name[service_config]] in starred[name[value]] begin[:] if <ast.UnaryOp object at 0x7da1b1eccb20> begin[:] <ast.Raise object at 0x7da1b1ecd8a0> for taget[name[skey]] in starred[call[name[service_config].keys, parameter[]]] begin[:] if compare[name[skey] <ast.NotIn object at 0x7da2590d7190> name[VALID_SERVICE_CONFIG_KEYS]] begin[:] <ast.Raise object at 0x7da20c9932e0> variable[arn] assign[=] call[name[service_config].get, parameter[constant[Arn], constant[]]] if compare[name[arn] equal[==] constant[]] begin[:] <ast.Raise object at 0x7da20c990370> variable[events] assign[=] call[name[service_config].get, parameter[constant[Events], list[[]]]] if compare[call[name[len], parameter[name[events]]] less[<] constant[1]] begin[:] <ast.Raise object at 0x7da1b22ebcd0> if <ast.UnaryOp object at 0x7da1b22e8520> begin[:] <ast.Raise object at 0x7da1b22e93f0> if <ast.UnaryOp object at 0x7da1b22e81c0> begin[:] <ast.Raise object at 0x7da1b22e8190> for taget[name[event]] in starred[name[events]] begin[:] if compare[name[event] <ast.NotIn object at 0x7da2590d7190> name[NOTIFICATION_EVENTS]] begin[:] <ast.Raise object at 0x7da1b22e95a0> if compare[constant[Filter] in name[service_config]] begin[:] variable[exception_msg] assign[=] call[constant[{} - If a Filter key is given, it must be a dictionary, the dictionary must have the key "Key", and its value must be an object, with a key named "FilterRules" which must be a non-empty list.].format, parameter[call[name[service_config]][constant[Filter]]]] <ast.Try object at 0x7da1b1ecc970> for taget[name[filter_rule]] in starred[name[filter_rules]] begin[:] <ast.Try object at 0x7da1b1ece290> if compare[name[name] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b1ecee90>, <ast.Constant object at 0x7da1b1ecc9d0>]]] begin[:] <ast.Raise object at 0x7da1b1ecdb70> return[constant[True]]
keyword[def] identifier[is_valid_bucket_notification_config] ( identifier[notifications] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[notifications] , identifier[dict] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[len] ( identifier[notifications] )== literal[int] : keyword[raise] identifier[InvalidArgumentError] ( literal[string] ) identifier[VALID_NOTIFICATION_KEYS] = identifier[set] ([ literal[string] , literal[string] , literal[string] , ]) identifier[VALID_SERVICE_CONFIG_KEYS] = identifier[set] ([ literal[string] , literal[string] , literal[string] , literal[string] , ]) identifier[NOTIFICATION_EVENTS] = identifier[set] ([ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , ]) keyword[for] identifier[key] , identifier[value] keyword[in] identifier[notifications] . identifier[items] (): keyword[if] identifier[key] keyword[not] keyword[in] identifier[VALID_NOTIFICATION_KEYS] : keyword[raise] identifier[InvalidArgumentError] (( literal[string] literal[string] ). identifier[format] ( identifier[key] )) keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[list] ): keyword[raise] identifier[InvalidArgumentError] (( literal[string] literal[string] ). identifier[format] ( identifier[key] )) keyword[for] identifier[service_config] keyword[in] identifier[value] : keyword[if] keyword[not] identifier[isinstance] ( identifier[service_config] , identifier[dict] ): keyword[raise] identifier[InvalidArgumentError] (( literal[string] literal[string] ). identifier[format] ( identifier[key] )) keyword[for] identifier[skey] keyword[in] identifier[service_config] . identifier[keys] (): keyword[if] identifier[skey] keyword[not] keyword[in] identifier[VALID_SERVICE_CONFIG_KEYS] : keyword[raise] identifier[InvalidArgumentError] (( literal[string] literal[string] ). identifier[format] ( identifier[skey] )) identifier[arn] = identifier[service_config] . identifier[get] ( literal[string] , literal[string] ) keyword[if] identifier[arn] == literal[string] : keyword[raise] identifier[InvalidArgumentError] ( literal[string] literal[string] ) identifier[events] = identifier[service_config] . identifier[get] ( literal[string] ,[]) keyword[if] identifier[len] ( identifier[events] )< literal[int] : keyword[raise] identifier[InvalidArgumentError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[events] , identifier[list] ): keyword[raise] identifier[InvalidArgumentError] ( literal[string] literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[service_config] . identifier[get] ( literal[string] , literal[string] ), identifier[basestring] ): keyword[raise] identifier[InvalidArgumentError] ( literal[string] ) keyword[for] identifier[event] keyword[in] identifier[events] : keyword[if] identifier[event] keyword[not] keyword[in] identifier[NOTIFICATION_EVENTS] : keyword[raise] identifier[InvalidArgumentError] ( literal[string] literal[string] . identifier[format] ( identifier[event] , identifier[NOTIFICATION_EVENTS] )) keyword[if] literal[string] keyword[in] identifier[service_config] : identifier[exception_msg] =( literal[string] literal[string] literal[string] literal[string] ). identifier[format] ( identifier[service_config] [ literal[string] ] ) keyword[try] : identifier[filter_rules] = identifier[service_config] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ,[]) keyword[if] keyword[not] identifier[isinstance] ( identifier[filter_rules] , identifier[list] ): keyword[raise] identifier[InvalidArgumentError] ( identifier[exception_msg] ) keyword[if] identifier[len] ( identifier[filter_rules] )< literal[int] : keyword[raise] identifier[InvalidArgumentError] ( identifier[exception_msg] ) keyword[except] identifier[AttributeError] : keyword[raise] identifier[InvalidArgumentError] ( identifier[exception_msg] ) keyword[for] identifier[filter_rule] keyword[in] identifier[filter_rules] : keyword[try] : identifier[name] = identifier[filter_rule] [ literal[string] ] identifier[value] = identifier[filter_rule] [ literal[string] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[InvalidArgumentError] (( literal[string] literal[string] ). identifier[format] ( identifier[filter_rule] )) keyword[if] identifier[name] keyword[not] keyword[in] [ literal[string] , literal[string] ]: keyword[raise] identifier[InvalidArgumentError] (( literal[string] literal[string] ). identifier[format] ( identifier[name] )) keyword[return] keyword[True]
def is_valid_bucket_notification_config(notifications): """ Validate the notifications config structure :param notifications: Dictionary with specific structure. :return: True if input is a valid bucket notifications structure. Raise :exc:`InvalidArgumentError` otherwise. """ # check if notifications is a dict. if not isinstance(notifications, dict): raise TypeError('notifications configuration must be a dictionary') # depends on [control=['if'], data=[]] if len(notifications) == 0: raise InvalidArgumentError('notifications configuration may not be empty') # depends on [control=['if'], data=[]] VALID_NOTIFICATION_KEYS = set(['TopicConfigurations', 'QueueConfigurations', 'CloudFunctionConfigurations']) VALID_SERVICE_CONFIG_KEYS = set(['Id', 'Arn', 'Events', 'Filter']) NOTIFICATION_EVENTS = set(['s3:ReducedRedundancyLostObject', 's3:ObjectCreated:*', 's3:ObjectCreated:Put', 's3:ObjectCreated:Post', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload', 's3:ObjectRemoved:*', 's3:ObjectRemoved:Delete', 's3:ObjectRemoved:DeleteMarkerCreated']) for (key, value) in notifications.items(): # check if key names are valid if key not in VALID_NOTIFICATION_KEYS: raise InvalidArgumentError('{} is an invalid key for notifications configuration'.format(key)) # depends on [control=['if'], data=['key']] # check if config values conform # first check if value is a list if not isinstance(value, list): raise InvalidArgumentError('The value for key "{}" in the notifications configuration must be a list.'.format(key)) # depends on [control=['if'], data=[]] for service_config in value: # check type matches if not isinstance(service_config, dict): raise InvalidArgumentError('Each service configuration item for "{}" must be a dictionary'.format(key)) # depends on [control=['if'], data=[]] # check keys are valid for skey in service_config.keys(): if skey not in VALID_SERVICE_CONFIG_KEYS: raise InvalidArgumentError('{} is an invalid key for a service configuration item'.format(skey)) # depends on [control=['if'], data=['skey']] # depends on [control=['for'], data=['skey']] # check for required keys arn = service_config.get('Arn', '') if arn == '': raise InvalidArgumentError('Arn key in service config must be present and has to be non-empty string') # depends on [control=['if'], data=[]] events = service_config.get('Events', []) if len(events) < 1: raise InvalidArgumentError('At least one event must be specified in a service config') # depends on [control=['if'], data=[]] if not isinstance(events, list): raise InvalidArgumentError('"Events" must be a list of strings in a service configuration') # depends on [control=['if'], data=[]] # check if 'Id' key is present, it should be string or bytes. if not isinstance(service_config.get('Id', ''), basestring): raise InvalidArgumentError('"Id" key must be a string') # depends on [control=['if'], data=[]] for event in events: if event not in NOTIFICATION_EVENTS: raise InvalidArgumentError('{} is not a valid event. Valid events are: {}'.format(event, NOTIFICATION_EVENTS)) # depends on [control=['if'], data=['event', 'NOTIFICATION_EVENTS']] # depends on [control=['for'], data=['event']] if 'Filter' in service_config: exception_msg = '{} - If a Filter key is given, it must be a dictionary, the dictionary must have the key "Key", and its value must be an object, with a key named "FilterRules" which must be a non-empty list.'.format(service_config['Filter']) try: filter_rules = service_config.get('Filter', {}).get('Key', {}).get('FilterRules', []) if not isinstance(filter_rules, list): raise InvalidArgumentError(exception_msg) # depends on [control=['if'], data=[]] if len(filter_rules) < 1: raise InvalidArgumentError(exception_msg) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except AttributeError: raise InvalidArgumentError(exception_msg) # depends on [control=['except'], data=[]] for filter_rule in filter_rules: try: name = filter_rule['Name'] value = filter_rule['Value'] # depends on [control=['try'], data=[]] except KeyError: raise InvalidArgumentError('{} - a FilterRule dictionary must have "Name" and "Value" keys'.format(filter_rule)) # depends on [control=['except'], data=[]] if name not in ['prefix', 'suffix']: raise InvalidArgumentError('{} - The "Name" key in a filter rule must be either "prefix" or "suffix"'.format(name)) # depends on [control=['if'], data=['name']] # depends on [control=['for'], data=['filter_rule']] # depends on [control=['if'], data=['service_config']] # depends on [control=['for'], data=['service_config']] # depends on [control=['for'], data=[]] return True
def parse(input_string, prefix=''): """Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content """ tree = parser.parse(input_string) visitor = ChatlVisitor(prefix) visit_parse_tree(tree, visitor) return visitor.parsed
def function[parse, parameter[input_string, prefix]]: constant[Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content ] variable[tree] assign[=] call[name[parser].parse, parameter[name[input_string]]] variable[visitor] assign[=] call[name[ChatlVisitor], parameter[name[prefix]]] call[name[visit_parse_tree], parameter[name[tree], name[visitor]]] return[name[visitor].parsed]
keyword[def] identifier[parse] ( identifier[input_string] , identifier[prefix] = literal[string] ): literal[string] identifier[tree] = identifier[parser] . identifier[parse] ( identifier[input_string] ) identifier[visitor] = identifier[ChatlVisitor] ( identifier[prefix] ) identifier[visit_parse_tree] ( identifier[tree] , identifier[visitor] ) keyword[return] identifier[visitor] . identifier[parsed]
def parse(input_string, prefix=''): """Parses the given DSL string and returns parsed results. Args: input_string (str): DSL string prefix (str): Optional prefix to add to every element name, useful to namespace things Returns: dict: Parsed content """ tree = parser.parse(input_string) visitor = ChatlVisitor(prefix) visit_parse_tree(tree, visitor) return visitor.parsed
def exempt(self, obj): """ decorator to mark a view as exempt from htmlmin. """ name = '%s.%s' % (obj.__module__, obj.__name__) @wraps(obj) def __inner(*a, **k): return obj(*a, **k) self._exempt_routes.add(name) return __inner
def function[exempt, parameter[self, obj]]: constant[ decorator to mark a view as exempt from htmlmin. ] variable[name] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18ede6890>, <ast.Attribute object at 0x7da18ede6860>]]] def function[__inner, parameter[]]: return[call[name[obj], parameter[<ast.Starred object at 0x7da18ede43a0>]]] call[name[self]._exempt_routes.add, parameter[name[name]]] return[name[__inner]]
keyword[def] identifier[exempt] ( identifier[self] , identifier[obj] ): literal[string] identifier[name] = literal[string] %( identifier[obj] . identifier[__module__] , identifier[obj] . identifier[__name__] ) @ identifier[wraps] ( identifier[obj] ) keyword[def] identifier[__inner] (* identifier[a] ,** identifier[k] ): keyword[return] identifier[obj] (* identifier[a] ,** identifier[k] ) identifier[self] . identifier[_exempt_routes] . identifier[add] ( identifier[name] ) keyword[return] identifier[__inner]
def exempt(self, obj): """ decorator to mark a view as exempt from htmlmin. """ name = '%s.%s' % (obj.__module__, obj.__name__) @wraps(obj) def __inner(*a, **k): return obj(*a, **k) self._exempt_routes.add(name) return __inner
def _write_response_to_file(self, response): """ Write response to the appropriate section of the file at self.local_path. :param response: requests.Response: response containing stream-able data """ with open(self.local_path, 'r+b') as outfile: # open file for read/write (no truncate) outfile.seek(self.seek_amt) for chunk in response.iter_content(chunk_size=self.bytes_per_chunk): if chunk: # filter out keep-alive chunks outfile.write(chunk) self._on_bytes_read(len(chunk))
def function[_write_response_to_file, parameter[self, response]]: constant[ Write response to the appropriate section of the file at self.local_path. :param response: requests.Response: response containing stream-able data ] with call[name[open], parameter[name[self].local_path, constant[r+b]]] begin[:] call[name[outfile].seek, parameter[name[self].seek_amt]] for taget[name[chunk]] in starred[call[name[response].iter_content, parameter[]]] begin[:] if name[chunk] begin[:] call[name[outfile].write, parameter[name[chunk]]] call[name[self]._on_bytes_read, parameter[call[name[len], parameter[name[chunk]]]]]
keyword[def] identifier[_write_response_to_file] ( identifier[self] , identifier[response] ): literal[string] keyword[with] identifier[open] ( identifier[self] . identifier[local_path] , literal[string] ) keyword[as] identifier[outfile] : identifier[outfile] . identifier[seek] ( identifier[self] . identifier[seek_amt] ) keyword[for] identifier[chunk] keyword[in] identifier[response] . identifier[iter_content] ( identifier[chunk_size] = identifier[self] . identifier[bytes_per_chunk] ): keyword[if] identifier[chunk] : identifier[outfile] . identifier[write] ( identifier[chunk] ) identifier[self] . identifier[_on_bytes_read] ( identifier[len] ( identifier[chunk] ))
def _write_response_to_file(self, response): """ Write response to the appropriate section of the file at self.local_path. :param response: requests.Response: response containing stream-able data """ with open(self.local_path, 'r+b') as outfile: # open file for read/write (no truncate) outfile.seek(self.seek_amt) for chunk in response.iter_content(chunk_size=self.bytes_per_chunk): if chunk: # filter out keep-alive chunks outfile.write(chunk) self._on_bytes_read(len(chunk)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['chunk']] # depends on [control=['with'], data=['outfile']]
def compute_checksum(line): """Compute the TLE checksum for the given line.""" return sum((int(c) if c.isdigit() else c == '-') for c in line[0:68]) % 10
def function[compute_checksum, parameter[line]]: constant[Compute the TLE checksum for the given line.] return[binary_operation[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b0c35840>]] <ast.Mod object at 0x7da2590d6920> constant[10]]]
keyword[def] identifier[compute_checksum] ( identifier[line] ): literal[string] keyword[return] identifier[sum] (( identifier[int] ( identifier[c] ) keyword[if] identifier[c] . identifier[isdigit] () keyword[else] identifier[c] == literal[string] ) keyword[for] identifier[c] keyword[in] identifier[line] [ literal[int] : literal[int] ])% literal[int]
def compute_checksum(line): """Compute the TLE checksum for the given line.""" return sum((int(c) if c.isdigit() else c == '-' for c in line[0:68])) % 10
def commandify(use_argcomplete=False, exit=True, *args, **kwargs): '''Turns decorated functions into command line args Finds the main_command and all commands and generates command line args from these.''' parser = CommandifyArgumentParser(*args, **kwargs) parser.setup_arguments() if use_argcomplete: try: import argcomplete except ImportError: print('argcomplete not installed, please install it.') parser.exit(status=2) # Must happen between setup_arguments() and parse_args(). argcomplete.autocomplete(parser) args = parser.parse_args() if exit: parser.dispatch_commands() parser.exit(0) else: return parser.dispatch_commands()
def function[commandify, parameter[use_argcomplete, exit]]: constant[Turns decorated functions into command line args Finds the main_command and all commands and generates command line args from these.] variable[parser] assign[=] call[name[CommandifyArgumentParser], parameter[<ast.Starred object at 0x7da18f00ffd0>]] call[name[parser].setup_arguments, parameter[]] if name[use_argcomplete] begin[:] <ast.Try object at 0x7da2045678e0> call[name[argcomplete].autocomplete, parameter[name[parser]]] variable[args] assign[=] call[name[parser].parse_args, parameter[]] if name[exit] begin[:] call[name[parser].dispatch_commands, parameter[]] call[name[parser].exit, parameter[constant[0]]]
keyword[def] identifier[commandify] ( identifier[use_argcomplete] = keyword[False] , identifier[exit] = keyword[True] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[parser] = identifier[CommandifyArgumentParser] (* identifier[args] ,** identifier[kwargs] ) identifier[parser] . identifier[setup_arguments] () keyword[if] identifier[use_argcomplete] : keyword[try] : keyword[import] identifier[argcomplete] keyword[except] identifier[ImportError] : identifier[print] ( literal[string] ) identifier[parser] . identifier[exit] ( identifier[status] = literal[int] ) identifier[argcomplete] . identifier[autocomplete] ( identifier[parser] ) identifier[args] = identifier[parser] . identifier[parse_args] () keyword[if] identifier[exit] : identifier[parser] . identifier[dispatch_commands] () identifier[parser] . identifier[exit] ( literal[int] ) keyword[else] : keyword[return] identifier[parser] . identifier[dispatch_commands] ()
def commandify(use_argcomplete=False, exit=True, *args, **kwargs): """Turns decorated functions into command line args Finds the main_command and all commands and generates command line args from these.""" parser = CommandifyArgumentParser(*args, **kwargs) parser.setup_arguments() if use_argcomplete: try: import argcomplete # depends on [control=['try'], data=[]] except ImportError: print('argcomplete not installed, please install it.') parser.exit(status=2) # depends on [control=['except'], data=[]] # Must happen between setup_arguments() and parse_args(). argcomplete.autocomplete(parser) # depends on [control=['if'], data=[]] args = parser.parse_args() if exit: parser.dispatch_commands() parser.exit(0) # depends on [control=['if'], data=[]] else: return parser.dispatch_commands()
def init_instance(self, key): """ Create an empty instance if it doesn't exist. If the instance already exists, this is a noop. """ with self._lock: if key not in self._metadata: self._metadata[key] = {} self._metric_ids[key] = []
def function[init_instance, parameter[self, key]]: constant[ Create an empty instance if it doesn't exist. If the instance already exists, this is a noop. ] with name[self]._lock begin[:] if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[self]._metadata] begin[:] call[name[self]._metadata][name[key]] assign[=] dictionary[[], []] call[name[self]._metric_ids][name[key]] assign[=] list[[]]
keyword[def] identifier[init_instance] ( identifier[self] , identifier[key] ): literal[string] keyword[with] identifier[self] . identifier[_lock] : keyword[if] identifier[key] keyword[not] keyword[in] identifier[self] . identifier[_metadata] : identifier[self] . identifier[_metadata] [ identifier[key] ]={} identifier[self] . identifier[_metric_ids] [ identifier[key] ]=[]
def init_instance(self, key): """ Create an empty instance if it doesn't exist. If the instance already exists, this is a noop. """ with self._lock: if key not in self._metadata: self._metadata[key] = {} self._metric_ids[key] = [] # depends on [control=['if'], data=['key']] # depends on [control=['with'], data=[]]
def inurl(needles, haystack, position='any'): """convenience function to make string.find return bool""" count = 0 # lowercase everything to do case-insensitive search haystack2 = haystack.lower() for needle in needles: needle2 = needle.lower() if position == 'any': if haystack2.find(needle2) > -1: count += 1 elif position == 'end': if haystack2.endswith(needle2): count += 1 elif position == 'begin': if haystack2.startswith(needle2): count += 1 # assessment if count > 0: return True return False
def function[inurl, parameter[needles, haystack, position]]: constant[convenience function to make string.find return bool] variable[count] assign[=] constant[0] variable[haystack2] assign[=] call[name[haystack].lower, parameter[]] for taget[name[needle]] in starred[name[needles]] begin[:] variable[needle2] assign[=] call[name[needle].lower, parameter[]] if compare[name[position] equal[==] constant[any]] begin[:] if compare[call[name[haystack2].find, parameter[name[needle2]]] greater[>] <ast.UnaryOp object at 0x7da207f981f0>] begin[:] <ast.AugAssign object at 0x7da207f9b430> if compare[name[count] greater[>] constant[0]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[inurl] ( identifier[needles] , identifier[haystack] , identifier[position] = literal[string] ): literal[string] identifier[count] = literal[int] identifier[haystack2] = identifier[haystack] . identifier[lower] () keyword[for] identifier[needle] keyword[in] identifier[needles] : identifier[needle2] = identifier[needle] . identifier[lower] () keyword[if] identifier[position] == literal[string] : keyword[if] identifier[haystack2] . identifier[find] ( identifier[needle2] )>- literal[int] : identifier[count] += literal[int] keyword[elif] identifier[position] == literal[string] : keyword[if] identifier[haystack2] . identifier[endswith] ( identifier[needle2] ): identifier[count] += literal[int] keyword[elif] identifier[position] == literal[string] : keyword[if] identifier[haystack2] . identifier[startswith] ( identifier[needle2] ): identifier[count] += literal[int] keyword[if] identifier[count] > literal[int] : keyword[return] keyword[True] keyword[return] keyword[False]
def inurl(needles, haystack, position='any'): """convenience function to make string.find return bool""" count = 0 # lowercase everything to do case-insensitive search haystack2 = haystack.lower() for needle in needles: needle2 = needle.lower() if position == 'any': if haystack2.find(needle2) > -1: count += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif position == 'end': if haystack2.endswith(needle2): count += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif position == 'begin': if haystack2.startswith(needle2): count += 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['needle']] # assessment if count > 0: return True # depends on [control=['if'], data=[]] return False
def get_defaults(self): """Get a dictionary that contains all the available defaults.""" defaults = self._defaults.copy() for field_key, default in self._default_callables.items(): defaults[field_key] = default() return defaults
def function[get_defaults, parameter[self]]: constant[Get a dictionary that contains all the available defaults.] variable[defaults] assign[=] call[name[self]._defaults.copy, parameter[]] for taget[tuple[[<ast.Name object at 0x7da204567970>, <ast.Name object at 0x7da2045677f0>]]] in starred[call[name[self]._default_callables.items, parameter[]]] begin[:] call[name[defaults]][name[field_key]] assign[=] call[name[default], parameter[]] return[name[defaults]]
keyword[def] identifier[get_defaults] ( identifier[self] ): literal[string] identifier[defaults] = identifier[self] . identifier[_defaults] . identifier[copy] () keyword[for] identifier[field_key] , identifier[default] keyword[in] identifier[self] . identifier[_default_callables] . identifier[items] (): identifier[defaults] [ identifier[field_key] ]= identifier[default] () keyword[return] identifier[defaults]
def get_defaults(self): """Get a dictionary that contains all the available defaults.""" defaults = self._defaults.copy() for (field_key, default) in self._default_callables.items(): defaults[field_key] = default() # depends on [control=['for'], data=[]] return defaults
def translate(self, addr): """ Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. """ # get family of this address so we translate to the same family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] host = socket.getfqdn(addr) for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): try: return a[4][0] except Exception: pass return addr
def function[translate, parameter[self, addr]]: constant[ Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. ] variable[family] assign[=] call[call[call[name[socket].getaddrinfo, parameter[name[addr], constant[0], name[socket].AF_UNSPEC, name[socket].SOCK_STREAM]]][constant[0]]][constant[0]] variable[host] assign[=] call[name[socket].getfqdn, parameter[name[addr]]] for taget[name[a]] in starred[call[name[socket].getaddrinfo, parameter[name[host], constant[0], name[family], name[socket].SOCK_STREAM]]] begin[:] <ast.Try object at 0x7da1b22b8790> return[name[addr]]
keyword[def] identifier[translate] ( identifier[self] , identifier[addr] ): literal[string] identifier[family] = identifier[socket] . identifier[getaddrinfo] ( identifier[addr] , literal[int] , identifier[socket] . identifier[AF_UNSPEC] , identifier[socket] . identifier[SOCK_STREAM] )[ literal[int] ][ literal[int] ] identifier[host] = identifier[socket] . identifier[getfqdn] ( identifier[addr] ) keyword[for] identifier[a] keyword[in] identifier[socket] . identifier[getaddrinfo] ( identifier[host] , literal[int] , identifier[family] , identifier[socket] . identifier[SOCK_STREAM] ): keyword[try] : keyword[return] identifier[a] [ literal[int] ][ literal[int] ] keyword[except] identifier[Exception] : keyword[pass] keyword[return] identifier[addr]
def translate(self, addr): """ Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which will point to the private IP address within the same datacenter. """ # get family of this address so we translate to the same family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0] host = socket.getfqdn(addr) for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM): try: return a[4][0] # depends on [control=['try'], data=[]] except Exception: pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['a']] return addr
def export_csv(self, table, output=None, columns="*", **kwargs): """ Export a table to a CSV file. If an output path is provided, write to file. Else, return a string. Wrapper around pandas.sql.to_csv(). See: http://pandas.pydata.org/pandas-docs/stable/io.html#io-store-in-csv Arguments: table (str): Name of the table to export. output (str, optional): Path of the file to write. columns (str, optional): A comma separated list of columns to export. **kwargs: Additional args passed to pandas.sql.to_csv() Returns: str: CSV string, or None if writing to file. Raises: IOError: In case of error writing to file. SchemaError: If the named table is not found. """ import pandas.io.sql as panda # Determine if we're writing to a file or returning a string. isfile = output is not None output = output or StringIO() if table not in self.tables: raise SchemaError("Cannot find table '{table}'" .format(table=table)) # Don't print row indexes by default. if "index" not in kwargs: kwargs["index"] = False table = panda.read_sql("SELECT {columns} FROM {table}" .format(columns=columns, table=table), self.connection) table.to_csv(output, **kwargs) return None if isfile else output.getvalue()
def function[export_csv, parameter[self, table, output, columns]]: constant[ Export a table to a CSV file. If an output path is provided, write to file. Else, return a string. Wrapper around pandas.sql.to_csv(). See: http://pandas.pydata.org/pandas-docs/stable/io.html#io-store-in-csv Arguments: table (str): Name of the table to export. output (str, optional): Path of the file to write. columns (str, optional): A comma separated list of columns to export. **kwargs: Additional args passed to pandas.sql.to_csv() Returns: str: CSV string, or None if writing to file. Raises: IOError: In case of error writing to file. SchemaError: If the named table is not found. ] import module[pandas.io.sql] as alias[panda] variable[isfile] assign[=] compare[name[output] is_not constant[None]] variable[output] assign[=] <ast.BoolOp object at 0x7da1b085c400> if compare[name[table] <ast.NotIn object at 0x7da2590d7190> name[self].tables] begin[:] <ast.Raise object at 0x7da1b085c490> if compare[constant[index] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:] call[name[kwargs]][constant[index]] assign[=] constant[False] variable[table] assign[=] call[name[panda].read_sql, parameter[call[constant[SELECT {columns} FROM {table}].format, parameter[]], name[self].connection]] call[name[table].to_csv, parameter[name[output]]] return[<ast.IfExp object at 0x7da1b085f670>]
keyword[def] identifier[export_csv] ( identifier[self] , identifier[table] , identifier[output] = keyword[None] , identifier[columns] = literal[string] ,** identifier[kwargs] ): literal[string] keyword[import] identifier[pandas] . identifier[io] . identifier[sql] keyword[as] identifier[panda] identifier[isfile] = identifier[output] keyword[is] keyword[not] keyword[None] identifier[output] = identifier[output] keyword[or] identifier[StringIO] () keyword[if] identifier[table] keyword[not] keyword[in] identifier[self] . identifier[tables] : keyword[raise] identifier[SchemaError] ( literal[string] . identifier[format] ( identifier[table] = identifier[table] )) keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= keyword[False] identifier[table] = identifier[panda] . identifier[read_sql] ( literal[string] . identifier[format] ( identifier[columns] = identifier[columns] , identifier[table] = identifier[table] ), identifier[self] . identifier[connection] ) identifier[table] . identifier[to_csv] ( identifier[output] ,** identifier[kwargs] ) keyword[return] keyword[None] keyword[if] identifier[isfile] keyword[else] identifier[output] . identifier[getvalue] ()
def export_csv(self, table, output=None, columns='*', **kwargs): """ Export a table to a CSV file. If an output path is provided, write to file. Else, return a string. Wrapper around pandas.sql.to_csv(). See: http://pandas.pydata.org/pandas-docs/stable/io.html#io-store-in-csv Arguments: table (str): Name of the table to export. output (str, optional): Path of the file to write. columns (str, optional): A comma separated list of columns to export. **kwargs: Additional args passed to pandas.sql.to_csv() Returns: str: CSV string, or None if writing to file. Raises: IOError: In case of error writing to file. SchemaError: If the named table is not found. """ import pandas.io.sql as panda # Determine if we're writing to a file or returning a string. isfile = output is not None output = output or StringIO() if table not in self.tables: raise SchemaError("Cannot find table '{table}'".format(table=table)) # depends on [control=['if'], data=['table']] # Don't print row indexes by default. if 'index' not in kwargs: kwargs['index'] = False # depends on [control=['if'], data=['kwargs']] table = panda.read_sql('SELECT {columns} FROM {table}'.format(columns=columns, table=table), self.connection) table.to_csv(output, **kwargs) return None if isfile else output.getvalue()
def add_terms_to_whitelist(self, terms): """ Add a list of terms to the user's company's whitelist. :param terms: The list of terms to whitelist. :return: The list of extracted |Indicator| objects that were whitelisted. """ resp = self._client.post("whitelist", json=terms) return [Indicator.from_dict(indicator) for indicator in resp.json()]
def function[add_terms_to_whitelist, parameter[self, terms]]: constant[ Add a list of terms to the user's company's whitelist. :param terms: The list of terms to whitelist. :return: The list of extracted |Indicator| objects that were whitelisted. ] variable[resp] assign[=] call[name[self]._client.post, parameter[constant[whitelist]]] return[<ast.ListComp object at 0x7da1b2346110>]
keyword[def] identifier[add_terms_to_whitelist] ( identifier[self] , identifier[terms] ): literal[string] identifier[resp] = identifier[self] . identifier[_client] . identifier[post] ( literal[string] , identifier[json] = identifier[terms] ) keyword[return] [ identifier[Indicator] . identifier[from_dict] ( identifier[indicator] ) keyword[for] identifier[indicator] keyword[in] identifier[resp] . identifier[json] ()]
def add_terms_to_whitelist(self, terms): """ Add a list of terms to the user's company's whitelist. :param terms: The list of terms to whitelist. :return: The list of extracted |Indicator| objects that were whitelisted. """ resp = self._client.post('whitelist', json=terms) return [Indicator.from_dict(indicator) for indicator in resp.json()]
def firmware_download_input_protocol_type_scp_protocol_scp_file(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") firmware_download = ET.Element("firmware_download") config = firmware_download input = ET.SubElement(firmware_download, "input") protocol_type = ET.SubElement(input, "protocol-type") scp_protocol = ET.SubElement(protocol_type, "scp-protocol") scp = ET.SubElement(scp_protocol, "scp") file = ET.SubElement(scp, "file") file.text = kwargs.pop('file') callback = kwargs.pop('callback', self._callback) return callback(config)
def function[firmware_download_input_protocol_type_scp_protocol_scp_file, parameter[self]]: constant[Auto Generated Code ] variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]] variable[firmware_download] assign[=] call[name[ET].Element, parameter[constant[firmware_download]]] variable[config] assign[=] name[firmware_download] variable[input] assign[=] call[name[ET].SubElement, parameter[name[firmware_download], constant[input]]] variable[protocol_type] assign[=] call[name[ET].SubElement, parameter[name[input], constant[protocol-type]]] variable[scp_protocol] assign[=] call[name[ET].SubElement, parameter[name[protocol_type], constant[scp-protocol]]] variable[scp] assign[=] call[name[ET].SubElement, parameter[name[scp_protocol], constant[scp]]] variable[file] assign[=] call[name[ET].SubElement, parameter[name[scp], constant[file]]] name[file].text assign[=] call[name[kwargs].pop, parameter[constant[file]]] variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]] return[call[name[callback], parameter[name[config]]]]
keyword[def] identifier[firmware_download_input_protocol_type_scp_protocol_scp_file] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[config] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[firmware_download] = identifier[ET] . identifier[Element] ( literal[string] ) identifier[config] = identifier[firmware_download] identifier[input] = identifier[ET] . identifier[SubElement] ( identifier[firmware_download] , literal[string] ) identifier[protocol_type] = identifier[ET] . identifier[SubElement] ( identifier[input] , literal[string] ) identifier[scp_protocol] = identifier[ET] . identifier[SubElement] ( identifier[protocol_type] , literal[string] ) identifier[scp] = identifier[ET] . identifier[SubElement] ( identifier[scp_protocol] , literal[string] ) identifier[file] = identifier[ET] . identifier[SubElement] ( identifier[scp] , literal[string] ) identifier[file] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] ) keyword[return] identifier[callback] ( identifier[config] )
def firmware_download_input_protocol_type_scp_protocol_scp_file(self, **kwargs): """Auto Generated Code """ config = ET.Element('config') firmware_download = ET.Element('firmware_download') config = firmware_download input = ET.SubElement(firmware_download, 'input') protocol_type = ET.SubElement(input, 'protocol-type') scp_protocol = ET.SubElement(protocol_type, 'scp-protocol') scp = ET.SubElement(scp_protocol, 'scp') file = ET.SubElement(scp, 'file') file.text = kwargs.pop('file') callback = kwargs.pop('callback', self._callback) return callback(config)
def info(self): """Return cache information. .. note:: This is not the cache info for the entire Redis key space. """ info = redis_conn.info() return _CacheInfo(info['keyspace_hits'], info['keyspace_misses'], self.size())
def function[info, parameter[self]]: constant[Return cache information. .. note:: This is not the cache info for the entire Redis key space. ] variable[info] assign[=] call[name[redis_conn].info, parameter[]] return[call[name[_CacheInfo], parameter[call[name[info]][constant[keyspace_hits]], call[name[info]][constant[keyspace_misses]], call[name[self].size, parameter[]]]]]
keyword[def] identifier[info] ( identifier[self] ): literal[string] identifier[info] = identifier[redis_conn] . identifier[info] () keyword[return] identifier[_CacheInfo] ( identifier[info] [ literal[string] ], identifier[info] [ literal[string] ], identifier[self] . identifier[size] ())
def info(self): """Return cache information. .. note:: This is not the cache info for the entire Redis key space. """ info = redis_conn.info() return _CacheInfo(info['keyspace_hits'], info['keyspace_misses'], self.size())
def value(self,ascode=None): """Return text cast to the correct type or the selected type""" if ascode is None: ascode = self.code return self.cast[ascode](self.text)
def function[value, parameter[self, ascode]]: constant[Return text cast to the correct type or the selected type] if compare[name[ascode] is constant[None]] begin[:] variable[ascode] assign[=] name[self].code return[call[call[name[self].cast][name[ascode]], parameter[name[self].text]]]
keyword[def] identifier[value] ( identifier[self] , identifier[ascode] = keyword[None] ): literal[string] keyword[if] identifier[ascode] keyword[is] keyword[None] : identifier[ascode] = identifier[self] . identifier[code] keyword[return] identifier[self] . identifier[cast] [ identifier[ascode] ]( identifier[self] . identifier[text] )
def value(self, ascode=None): """Return text cast to the correct type or the selected type""" if ascode is None: ascode = self.code # depends on [control=['if'], data=['ascode']] return self.cast[ascode](self.text)
def send_message(self, message): """Send chat message to this steam user :param message: message to send :type message: str """ self._steam.send(MsgProto(EMsg.ClientFriendMsg), { 'steamid': self.steam_id, 'chat_entry_type': EChatEntryType.ChatMsg, 'message': message.encode('utf8'), })
def function[send_message, parameter[self, message]]: constant[Send chat message to this steam user :param message: message to send :type message: str ] call[name[self]._steam.send, parameter[call[name[MsgProto], parameter[name[EMsg].ClientFriendMsg]], dictionary[[<ast.Constant object at 0x7da18f00fac0>, <ast.Constant object at 0x7da18f00f0d0>, <ast.Constant object at 0x7da18f00ec20>], [<ast.Attribute object at 0x7da18f00d810>, <ast.Attribute object at 0x7da18f00f760>, <ast.Call object at 0x7da18f00f520>]]]]
keyword[def] identifier[send_message] ( identifier[self] , identifier[message] ): literal[string] identifier[self] . identifier[_steam] . identifier[send] ( identifier[MsgProto] ( identifier[EMsg] . identifier[ClientFriendMsg] ),{ literal[string] : identifier[self] . identifier[steam_id] , literal[string] : identifier[EChatEntryType] . identifier[ChatMsg] , literal[string] : identifier[message] . identifier[encode] ( literal[string] ), })
def send_message(self, message): """Send chat message to this steam user :param message: message to send :type message: str """ self._steam.send(MsgProto(EMsg.ClientFriendMsg), {'steamid': self.steam_id, 'chat_entry_type': EChatEntryType.ChatMsg, 'message': message.encode('utf8')})
def get_default_triple(): """ Return the default target triple LLVM is configured to produce code for. """ with ffi.OutputString() as out: ffi.lib.LLVMPY_GetDefaultTargetTriple(out) return str(out)
def function[get_default_triple, parameter[]]: constant[ Return the default target triple LLVM is configured to produce code for. ] with call[name[ffi].OutputString, parameter[]] begin[:] call[name[ffi].lib.LLVMPY_GetDefaultTargetTriple, parameter[name[out]]] return[call[name[str], parameter[name[out]]]]
keyword[def] identifier[get_default_triple] (): literal[string] keyword[with] identifier[ffi] . identifier[OutputString] () keyword[as] identifier[out] : identifier[ffi] . identifier[lib] . identifier[LLVMPY_GetDefaultTargetTriple] ( identifier[out] ) keyword[return] identifier[str] ( identifier[out] )
def get_default_triple(): """ Return the default target triple LLVM is configured to produce code for. """ with ffi.OutputString() as out: ffi.lib.LLVMPY_GetDefaultTargetTriple(out) return str(out) # depends on [control=['with'], data=['out']]
def updated(self): 'return datetime.datetime' return dateutil.parser.parse(str(self.f.latestRevision.updated))
def function[updated, parameter[self]]: constant[return datetime.datetime] return[call[name[dateutil].parser.parse, parameter[call[name[str], parameter[name[self].f.latestRevision.updated]]]]]
keyword[def] identifier[updated] ( identifier[self] ): literal[string] keyword[return] identifier[dateutil] . identifier[parser] . identifier[parse] ( identifier[str] ( identifier[self] . identifier[f] . identifier[latestRevision] . identifier[updated] ))
def updated(self): """return datetime.datetime""" return dateutil.parser.parse(str(self.f.latestRevision.updated))
def viewBoxAxisRange(viewBox, axisNumber): """ Calculates the range of an axis of a viewBox. """ rect = viewBox.childrenBoundingRect() # taken from viewBox.autoRange() if rect is not None: if axisNumber == X_AXIS: return rect.left(), rect.right() elif axisNumber == Y_AXIS: return rect.bottom(), rect.top() else: raise ValueError("axisNumber should be 0 or 1, got: {}".format(axisNumber)) else: # Does this happen? Probably when the plot is empty. raise AssertionError("No children bbox. Plot range not updated.")
def function[viewBoxAxisRange, parameter[viewBox, axisNumber]]: constant[ Calculates the range of an axis of a viewBox. ] variable[rect] assign[=] call[name[viewBox].childrenBoundingRect, parameter[]] if compare[name[rect] is_not constant[None]] begin[:] if compare[name[axisNumber] equal[==] name[X_AXIS]] begin[:] return[tuple[[<ast.Call object at 0x7da1b0416b90>, <ast.Call object at 0x7da1b0415090>]]]
keyword[def] identifier[viewBoxAxisRange] ( identifier[viewBox] , identifier[axisNumber] ): literal[string] identifier[rect] = identifier[viewBox] . identifier[childrenBoundingRect] () keyword[if] identifier[rect] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[axisNumber] == identifier[X_AXIS] : keyword[return] identifier[rect] . identifier[left] (), identifier[rect] . identifier[right] () keyword[elif] identifier[axisNumber] == identifier[Y_AXIS] : keyword[return] identifier[rect] . identifier[bottom] (), identifier[rect] . identifier[top] () keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[axisNumber] )) keyword[else] : keyword[raise] identifier[AssertionError] ( literal[string] )
def viewBoxAxisRange(viewBox, axisNumber): """ Calculates the range of an axis of a viewBox. """ rect = viewBox.childrenBoundingRect() # taken from viewBox.autoRange() if rect is not None: if axisNumber == X_AXIS: return (rect.left(), rect.right()) # depends on [control=['if'], data=[]] elif axisNumber == Y_AXIS: return (rect.bottom(), rect.top()) # depends on [control=['if'], data=[]] else: raise ValueError('axisNumber should be 0 or 1, got: {}'.format(axisNumber)) # depends on [control=['if'], data=['rect']] else: # Does this happen? Probably when the plot is empty. raise AssertionError('No children bbox. Plot range not updated.')
def start_tag(self, alt=None, empty=False): """Return XML start tag for the receiver.""" if alt: name = alt else: name = self.name result = "<" + name for it in self.attr: result += ' %s="%s"' % (it, escape(self.attr[it], {'"':"&quot;", '%': "%%"})) if empty: return result + "/>%s" else: return result + ">"
def function[start_tag, parameter[self, alt, empty]]: constant[Return XML start tag for the receiver.] if name[alt] begin[:] variable[name] assign[=] name[alt] variable[result] assign[=] binary_operation[constant[<] + name[name]] for taget[name[it]] in starred[name[self].attr] begin[:] <ast.AugAssign object at 0x7da20c6aa290> if name[empty] begin[:] return[binary_operation[name[result] + constant[/>%s]]]
keyword[def] identifier[start_tag] ( identifier[self] , identifier[alt] = keyword[None] , identifier[empty] = keyword[False] ): literal[string] keyword[if] identifier[alt] : identifier[name] = identifier[alt] keyword[else] : identifier[name] = identifier[self] . identifier[name] identifier[result] = literal[string] + identifier[name] keyword[for] identifier[it] keyword[in] identifier[self] . identifier[attr] : identifier[result] += literal[string] %( identifier[it] , identifier[escape] ( identifier[self] . identifier[attr] [ identifier[it] ],{ literal[string] : literal[string] , literal[string] : literal[string] })) keyword[if] identifier[empty] : keyword[return] identifier[result] + literal[string] keyword[else] : keyword[return] identifier[result] + literal[string]
def start_tag(self, alt=None, empty=False): """Return XML start tag for the receiver.""" if alt: name = alt # depends on [control=['if'], data=[]] else: name = self.name result = '<' + name for it in self.attr: result += ' %s="%s"' % (it, escape(self.attr[it], {'"': '&quot;', '%': '%%'})) # depends on [control=['for'], data=['it']] if empty: return result + '/>%s' # depends on [control=['if'], data=[]] else: return result + '>'
def _name_to_tensor(self, tensor_name): """The tensor with the given name. Args: tensor_name: a string, name of a tensor in the graph. Returns: a tf.Tensor or mtf.Tensor """ id1, id2 = self._tensor_name_to_ids[tensor_name] return self._operations[id1].outputs[id2]
def function[_name_to_tensor, parameter[self, tensor_name]]: constant[The tensor with the given name. Args: tensor_name: a string, name of a tensor in the graph. Returns: a tf.Tensor or mtf.Tensor ] <ast.Tuple object at 0x7da204564100> assign[=] call[name[self]._tensor_name_to_ids][name[tensor_name]] return[call[call[name[self]._operations][name[id1]].outputs][name[id2]]]
keyword[def] identifier[_name_to_tensor] ( identifier[self] , identifier[tensor_name] ): literal[string] identifier[id1] , identifier[id2] = identifier[self] . identifier[_tensor_name_to_ids] [ identifier[tensor_name] ] keyword[return] identifier[self] . identifier[_operations] [ identifier[id1] ]. identifier[outputs] [ identifier[id2] ]
def _name_to_tensor(self, tensor_name): """The tensor with the given name. Args: tensor_name: a string, name of a tensor in the graph. Returns: a tf.Tensor or mtf.Tensor """ (id1, id2) = self._tensor_name_to_ids[tensor_name] return self._operations[id1].outputs[id2]
def _run_cnvkit_shared(inputs, backgrounds): """Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. """ if tz.get_in(["depth", "bins", "normalized"], inputs[0]): ckouts = [] for data in inputs: cnr_file = tz.get_in(["depth", "bins", "normalized"], data) cns_file = os.path.join(_sv_workdir(data), "%s.cns" % dd.get_sample_name(data)) cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, inputs + backgrounds, cns_file) ckouts.append({"cnr": cnr_file, "cns": cns_file, "background": tz.get_in(["depth", "bins", "background"], data)}) return ckouts else: return _run_cnvkit_shared_orig(inputs, backgrounds)
def function[_run_cnvkit_shared, parameter[inputs, backgrounds]]: constant[Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. ] if call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da20c6e5000>, <ast.Constant object at 0x7da20c6e5240>, <ast.Constant object at 0x7da20c6e5870>]], call[name[inputs]][constant[0]]]] begin[:] variable[ckouts] assign[=] list[[]] for taget[name[data]] in starred[name[inputs]] begin[:] variable[cnr_file] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da20c6e4a90>, <ast.Constant object at 0x7da20c6e60b0>, <ast.Constant object at 0x7da20c6e75e0>]], name[data]]] variable[cns_file] assign[=] call[name[os].path.join, parameter[call[name[_sv_workdir], parameter[name[data]]], binary_operation[constant[%s.cns] <ast.Mod object at 0x7da2590d6920> call[name[dd].get_sample_name, parameter[name[data]]]]]] variable[cns_file] assign[=] call[name[_cnvkit_segment], parameter[name[cnr_file], call[name[dd].get_coverage_interval, parameter[name[data]]], name[data], binary_operation[name[inputs] + name[backgrounds]], name[cns_file]]] call[name[ckouts].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1985990>, <ast.Constant object at 0x7da1b1985660>, <ast.Constant object at 0x7da1b19873a0>], [<ast.Name object at 0x7da1b1984a90>, <ast.Name object at 0x7da1b1987cd0>, <ast.Call object at 0x7da1b1986f50>]]]] return[name[ckouts]]
keyword[def] identifier[_run_cnvkit_shared] ( identifier[inputs] , identifier[backgrounds] ): literal[string] keyword[if] identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[inputs] [ literal[int] ]): identifier[ckouts] =[] keyword[for] identifier[data] keyword[in] identifier[inputs] : identifier[cnr_file] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] ) identifier[cns_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[_sv_workdir] ( identifier[data] ), literal[string] % identifier[dd] . identifier[get_sample_name] ( identifier[data] )) identifier[cns_file] = identifier[_cnvkit_segment] ( identifier[cnr_file] , identifier[dd] . identifier[get_coverage_interval] ( identifier[data] ), identifier[data] , identifier[inputs] + identifier[backgrounds] , identifier[cns_file] ) identifier[ckouts] . identifier[append] ({ literal[string] : identifier[cnr_file] , literal[string] : identifier[cns_file] , literal[string] : identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] )}) keyword[return] identifier[ckouts] keyword[else] : keyword[return] identifier[_run_cnvkit_shared_orig] ( identifier[inputs] , identifier[backgrounds] )
def _run_cnvkit_shared(inputs, backgrounds): """Shared functionality to run CNVkit, parallelizing over multiple BAM files. Handles new style cases where we have pre-normalized inputs and old cases where we run CNVkit individually. """ if tz.get_in(['depth', 'bins', 'normalized'], inputs[0]): ckouts = [] for data in inputs: cnr_file = tz.get_in(['depth', 'bins', 'normalized'], data) cns_file = os.path.join(_sv_workdir(data), '%s.cns' % dd.get_sample_name(data)) cns_file = _cnvkit_segment(cnr_file, dd.get_coverage_interval(data), data, inputs + backgrounds, cns_file) ckouts.append({'cnr': cnr_file, 'cns': cns_file, 'background': tz.get_in(['depth', 'bins', 'background'], data)}) # depends on [control=['for'], data=['data']] return ckouts # depends on [control=['if'], data=[]] else: return _run_cnvkit_shared_orig(inputs, backgrounds)
def get_style_id(self, style_or_name, style_type): """ Return the id of the style corresponding to *style_or_name*, or |None| if *style_or_name* is |None|. If *style_or_name* is not a style object, the style is looked up using *style_or_name* as a style name, raising |ValueError| if no style with that name is defined. Raises |ValueError| if the target style is not of *style_type*. """ if style_or_name is None: return None elif isinstance(style_or_name, BaseStyle): return self._get_style_id_from_style(style_or_name, style_type) else: return self._get_style_id_from_name(style_or_name, style_type)
def function[get_style_id, parameter[self, style_or_name, style_type]]: constant[ Return the id of the style corresponding to *style_or_name*, or |None| if *style_or_name* is |None|. If *style_or_name* is not a style object, the style is looked up using *style_or_name* as a style name, raising |ValueError| if no style with that name is defined. Raises |ValueError| if the target style is not of *style_type*. ] if compare[name[style_or_name] is constant[None]] begin[:] return[constant[None]]
keyword[def] identifier[get_style_id] ( identifier[self] , identifier[style_or_name] , identifier[style_type] ): literal[string] keyword[if] identifier[style_or_name] keyword[is] keyword[None] : keyword[return] keyword[None] keyword[elif] identifier[isinstance] ( identifier[style_or_name] , identifier[BaseStyle] ): keyword[return] identifier[self] . identifier[_get_style_id_from_style] ( identifier[style_or_name] , identifier[style_type] ) keyword[else] : keyword[return] identifier[self] . identifier[_get_style_id_from_name] ( identifier[style_or_name] , identifier[style_type] )
def get_style_id(self, style_or_name, style_type): """ Return the id of the style corresponding to *style_or_name*, or |None| if *style_or_name* is |None|. If *style_or_name* is not a style object, the style is looked up using *style_or_name* as a style name, raising |ValueError| if no style with that name is defined. Raises |ValueError| if the target style is not of *style_type*. """ if style_or_name is None: return None # depends on [control=['if'], data=[]] elif isinstance(style_or_name, BaseStyle): return self._get_style_id_from_style(style_or_name, style_type) # depends on [control=['if'], data=[]] else: return self._get_style_id_from_name(style_or_name, style_type)
def argshash(self, args, kwargs): "Hash mutable arguments' containers with immutable keys and values." a = repr(args) b = repr(sorted((repr(k), repr(v)) for k, v in kwargs.items())) return a + b
def function[argshash, parameter[self, args, kwargs]]: constant[Hash mutable arguments' containers with immutable keys and values.] variable[a] assign[=] call[name[repr], parameter[name[args]]] variable[b] assign[=] call[name[repr], parameter[call[name[sorted], parameter[<ast.GeneratorExp object at 0x7da1b0feb460>]]]] return[binary_operation[name[a] + name[b]]]
keyword[def] identifier[argshash] ( identifier[self] , identifier[args] , identifier[kwargs] ): literal[string] identifier[a] = identifier[repr] ( identifier[args] ) identifier[b] = identifier[repr] ( identifier[sorted] (( identifier[repr] ( identifier[k] ), identifier[repr] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ())) keyword[return] identifier[a] + identifier[b]
def argshash(self, args, kwargs): """Hash mutable arguments' containers with immutable keys and values.""" a = repr(args) b = repr(sorted(((repr(k), repr(v)) for (k, v) in kwargs.items()))) return a + b
def create(cls, type): """ Return the created tag by specifying an integer """ if type == 0: return TagEnd() elif type == 1: return TagShowFrame() elif type == 2: return TagDefineShape() elif type == 4: return TagPlaceObject() elif type == 5: return TagRemoveObject() elif type == 6: return TagDefineBits() elif type == 7: return TagDefineButton() elif type == 8: return TagJPEGTables() elif type == 9: return TagSetBackgroundColor() elif type == 10: return TagDefineFont() elif type == 11: return TagDefineText() elif type == 12: return TagDoAction() elif type == 13: return TagDefineFontInfo() elif type == 14: return TagDefineSound() elif type == 15: return TagStartSound() elif type == 17: return TagDefineButtonSound() elif type == 18: return TagSoundStreamHead() elif type == 19: return TagSoundStreamBlock() elif type == 20: return TagDefineBitsLossless() elif type == 21: return TagDefineBitsJPEG2() elif type == 22: return TagDefineShape2() elif type == 24: return TagProtect() elif type == 26: return TagPlaceObject2() elif type == 28: return TagRemoveObject2() elif type == 32: return TagDefineShape3() elif type == 33: return TagDefineText2() elif type == 34: return TagDefineButton2() elif type == 35: return TagDefineBitsJPEG3() elif type == 36: return TagDefineBitsLossless2() elif type == 37: return TagDefineEditText() elif type == 39: return TagDefineSprite() elif type == 41: return TagProductInfo() elif type == 43: return TagFrameLabel() elif type == 45: return TagSoundStreamHead2() elif type == 46: return TagDefineMorphShape() elif type == 48: return TagDefineFont2() elif type == 56: return TagExportAssets() elif type == 58: return TagEnableDebugger() elif type == 59: return TagDoInitAction() elif type == 60: return TagDefineVideoStream() elif type == 61: return TagVideoFrame() elif type == 63: return TagDebugID() elif type == 64: return TagEnableDebugger2() elif type == 65: return TagScriptLimits() elif type == 69: return TagFileAttributes() elif type == 70: return TagPlaceObject3() elif type == 73: return TagDefineFontAlignZones() elif type == 74: return TagCSMTextSettings() elif type == 75: return TagDefineFont3() elif type == 76: return TagSymbolClass() elif type == 77: return TagMetadata() elif type == 78: return TagDefineScalingGrid() elif type == 82: return TagDoABC() elif type == 83: return TagDefineShape4() elif type == 84: return TagDefineMorphShape2() elif type == 86: return TagDefineSceneAndFrameLabelData() elif type == 87: return TagDefineBinaryData() elif type == 88: return TagDefineFontName() elif type == 89: return TagStartSound2() else: return None
def function[create, parameter[cls, type]]: constant[ Return the created tag by specifying an integer ] if compare[name[type] equal[==] constant[0]] begin[:] return[call[name[TagEnd], parameter[]]]
keyword[def] identifier[create] ( identifier[cls] , identifier[type] ): literal[string] keyword[if] identifier[type] == literal[int] : keyword[return] identifier[TagEnd] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagShowFrame] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineShape] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagPlaceObject] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagRemoveObject] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineBits] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineButton] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagJPEGTables] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagSetBackgroundColor] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineFont] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineText] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDoAction] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineFontInfo] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineSound] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagStartSound] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineButtonSound] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagSoundStreamHead] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagSoundStreamBlock] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineBitsLossless] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineBitsJPEG2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineShape2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagProtect] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagPlaceObject2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagRemoveObject2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineShape3] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineText2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineButton2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineBitsJPEG3] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineBitsLossless2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineEditText] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineSprite] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagProductInfo] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagFrameLabel] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagSoundStreamHead2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineMorphShape] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineFont2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagExportAssets] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagEnableDebugger] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDoInitAction] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineVideoStream] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagVideoFrame] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDebugID] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagEnableDebugger2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagScriptLimits] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagFileAttributes] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagPlaceObject3] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineFontAlignZones] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagCSMTextSettings] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineFont3] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagSymbolClass] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagMetadata] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineScalingGrid] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDoABC] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineShape4] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineMorphShape2] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineSceneAndFrameLabelData] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineBinaryData] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagDefineFontName] () keyword[elif] identifier[type] == literal[int] : keyword[return] identifier[TagStartSound2] () keyword[else] : keyword[return] keyword[None]
def create(cls, type): """ Return the created tag by specifying an integer """ if type == 0: return TagEnd() # depends on [control=['if'], data=[]] elif type == 1: return TagShowFrame() # depends on [control=['if'], data=[]] elif type == 2: return TagDefineShape() # depends on [control=['if'], data=[]] elif type == 4: return TagPlaceObject() # depends on [control=['if'], data=[]] elif type == 5: return TagRemoveObject() # depends on [control=['if'], data=[]] elif type == 6: return TagDefineBits() # depends on [control=['if'], data=[]] elif type == 7: return TagDefineButton() # depends on [control=['if'], data=[]] elif type == 8: return TagJPEGTables() # depends on [control=['if'], data=[]] elif type == 9: return TagSetBackgroundColor() # depends on [control=['if'], data=[]] elif type == 10: return TagDefineFont() # depends on [control=['if'], data=[]] elif type == 11: return TagDefineText() # depends on [control=['if'], data=[]] elif type == 12: return TagDoAction() # depends on [control=['if'], data=[]] elif type == 13: return TagDefineFontInfo() # depends on [control=['if'], data=[]] elif type == 14: return TagDefineSound() # depends on [control=['if'], data=[]] elif type == 15: return TagStartSound() # depends on [control=['if'], data=[]] elif type == 17: return TagDefineButtonSound() # depends on [control=['if'], data=[]] elif type == 18: return TagSoundStreamHead() # depends on [control=['if'], data=[]] elif type == 19: return TagSoundStreamBlock() # depends on [control=['if'], data=[]] elif type == 20: return TagDefineBitsLossless() # depends on [control=['if'], data=[]] elif type == 21: return TagDefineBitsJPEG2() # depends on [control=['if'], data=[]] elif type == 22: return TagDefineShape2() # depends on [control=['if'], data=[]] elif type == 24: return TagProtect() # depends on [control=['if'], data=[]] elif type == 26: return TagPlaceObject2() # depends on [control=['if'], data=[]] elif type == 28: return TagRemoveObject2() # depends on [control=['if'], data=[]] elif type == 32: return TagDefineShape3() # depends on [control=['if'], data=[]] elif type == 33: return TagDefineText2() # depends on [control=['if'], data=[]] elif type == 34: return TagDefineButton2() # depends on [control=['if'], data=[]] elif type == 35: return TagDefineBitsJPEG3() # depends on [control=['if'], data=[]] elif type == 36: return TagDefineBitsLossless2() # depends on [control=['if'], data=[]] elif type == 37: return TagDefineEditText() # depends on [control=['if'], data=[]] elif type == 39: return TagDefineSprite() # depends on [control=['if'], data=[]] elif type == 41: return TagProductInfo() # depends on [control=['if'], data=[]] elif type == 43: return TagFrameLabel() # depends on [control=['if'], data=[]] elif type == 45: return TagSoundStreamHead2() # depends on [control=['if'], data=[]] elif type == 46: return TagDefineMorphShape() # depends on [control=['if'], data=[]] elif type == 48: return TagDefineFont2() # depends on [control=['if'], data=[]] elif type == 56: return TagExportAssets() # depends on [control=['if'], data=[]] elif type == 58: return TagEnableDebugger() # depends on [control=['if'], data=[]] elif type == 59: return TagDoInitAction() # depends on [control=['if'], data=[]] elif type == 60: return TagDefineVideoStream() # depends on [control=['if'], data=[]] elif type == 61: return TagVideoFrame() # depends on [control=['if'], data=[]] elif type == 63: return TagDebugID() # depends on [control=['if'], data=[]] elif type == 64: return TagEnableDebugger2() # depends on [control=['if'], data=[]] elif type == 65: return TagScriptLimits() # depends on [control=['if'], data=[]] elif type == 69: return TagFileAttributes() # depends on [control=['if'], data=[]] elif type == 70: return TagPlaceObject3() # depends on [control=['if'], data=[]] elif type == 73: return TagDefineFontAlignZones() # depends on [control=['if'], data=[]] elif type == 74: return TagCSMTextSettings() # depends on [control=['if'], data=[]] elif type == 75: return TagDefineFont3() # depends on [control=['if'], data=[]] elif type == 76: return TagSymbolClass() # depends on [control=['if'], data=[]] elif type == 77: return TagMetadata() # depends on [control=['if'], data=[]] elif type == 78: return TagDefineScalingGrid() # depends on [control=['if'], data=[]] elif type == 82: return TagDoABC() # depends on [control=['if'], data=[]] elif type == 83: return TagDefineShape4() # depends on [control=['if'], data=[]] elif type == 84: return TagDefineMorphShape2() # depends on [control=['if'], data=[]] elif type == 86: return TagDefineSceneAndFrameLabelData() # depends on [control=['if'], data=[]] elif type == 87: return TagDefineBinaryData() # depends on [control=['if'], data=[]] elif type == 88: return TagDefineFontName() # depends on [control=['if'], data=[]] elif type == 89: return TagStartSound2() # depends on [control=['if'], data=[]] else: return None
def plotEzJz(self,*args,**kwargs): """ NAME: plotEzJz PURPOSE: plot E_z(.)/sqrt(dens(R)) along the orbit INPUT: pot= Potential instance or list of instances in which the orbit was integrated d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz' +bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2010-08-08 - Written - Bovy (NYU) """ labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$', 'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$', 'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'} if not 'pot' in kwargs: try: pot= self._pot except AttributeError: raise AttributeError("Integrate orbit first or specify pot=") else: pot= kwargs.pop('pot') d1= kwargs.pop('d1','t') self.EzJz= [(evaluatePotentials(pot,self.orbit[ii,0],self.orbit[ii,3], t=self.t[ii],use_physical=False)- evaluatePotentials(pot,self.orbit[ii,0],0., phi= self.orbit[ii,5],t=self.t[ii], use_physical=False)+ self.orbit[ii,4]**2./2.)/\ nu.sqrt(evaluateDensities(pot,self.orbit[ii,0],0., phi=self.orbit[ii,5], t=self.t[ii], use_physical=False))\ for ii in range(len(self.t))] if not 'xlabel' in kwargs: kwargs['xlabel']= labeldict[d1] if not 'ylabel' in kwargs: kwargs['ylabel']= r'$E_z/\sqrt{\rho}$' if d1 == 't': return plot.bovy_plot(nu.array(self.t), nu.array(self.EzJz)/self.EzJz[0], *args,**kwargs) elif d1 == 'z': return plot.bovy_plot(self.orbit[:,3], nu.array(self.EzJz)/self.EzJz[0], *args,**kwargs) elif d1 == 'R': return plot.bovy_plot(self.orbit[:,0], nu.array(self.EzJz)/self.EzJz[0], *args,**kwargs) elif d1 == 'vR': return plot.bovy_plot(self.orbit[:,1], nu.array(self.EzJz)/self.EzJz[0], *args,**kwargs) elif d1 == 'vT': return plot.bovy_plot(self.orbit[:,2], nu.array(self.EzJz)/self.EzJz[0], *args,**kwargs) elif d1 == 'vz': return plot.bovy_plot(self.orbit[:,4], nu.array(self.EzJz)/self.EzJz[0], *args,**kwargs)
def function[plotEzJz, parameter[self]]: constant[ NAME: plotEzJz PURPOSE: plot E_z(.)/sqrt(dens(R)) along the orbit INPUT: pot= Potential instance or list of instances in which the orbit was integrated d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz' +bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2010-08-08 - Written - Bovy (NYU) ] variable[labeldict] assign[=] dictionary[[<ast.Constant object at 0x7da1b0e15a50>, <ast.Constant object at 0x7da1b0e167d0>, <ast.Constant object at 0x7da1b0e16680>, <ast.Constant object at 0x7da1b0e14220>, <ast.Constant object at 0x7da1b0e17370>, <ast.Constant object at 0x7da1b0e16890>, <ast.Constant object at 0x7da1b0e153c0>, <ast.Constant object at 0x7da1b0e17d60>, <ast.Constant object at 0x7da1b0e16560>, <ast.Constant object at 0x7da1b0e15180>, <ast.Constant object at 0x7da1b0e149d0>], [<ast.Constant object at 0x7da1b0e15db0>, <ast.Constant object at 0x7da1b0e14e80>, <ast.Constant object at 0x7da1b0e14a30>, <ast.Constant object at 0x7da1b0e15ab0>, <ast.Constant object at 0x7da1b0e15a80>, <ast.Constant object at 0x7da1b0e14d30>, <ast.Constant object at 0x7da1b0e14c70>, <ast.Constant object at 0x7da1b0e14430>, <ast.Constant object at 0x7da1b0e17280>, <ast.Constant object at 0x7da1b0e15240>, <ast.Constant object at 0x7da1b0e174f0>]] if <ast.UnaryOp object at 0x7da1b0e17a60> begin[:] <ast.Try object at 0x7da1b0e15720> variable[d1] assign[=] call[name[kwargs].pop, parameter[constant[d1], constant[t]]] name[self].EzJz assign[=] <ast.ListComp object at 0x7da1b0e16800> if <ast.UnaryOp object at 0x7da1b0e15450> begin[:] call[name[kwargs]][constant[xlabel]] assign[=] call[name[labeldict]][name[d1]] if <ast.UnaryOp object at 0x7da1b0e173d0> begin[:] call[name[kwargs]][constant[ylabel]] assign[=] constant[$E_z/\sqrt{\rho}$] if compare[name[d1] equal[==] constant[t]] begin[:] return[call[name[plot].bovy_plot, parameter[call[name[nu].array, parameter[name[self].t]], binary_operation[call[name[nu].array, parameter[name[self].EzJz]] / call[name[self].EzJz][constant[0]]], <ast.Starred object at 0x7da20c991990>]]]
keyword[def] identifier[plotEzJz] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[labeldict] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } keyword[if] keyword[not] literal[string] keyword[in] identifier[kwargs] : keyword[try] : identifier[pot] = identifier[self] . identifier[_pot] keyword[except] identifier[AttributeError] : keyword[raise] identifier[AttributeError] ( literal[string] ) keyword[else] : identifier[pot] = identifier[kwargs] . identifier[pop] ( literal[string] ) identifier[d1] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] ) identifier[self] . identifier[EzJz] =[( identifier[evaluatePotentials] ( identifier[pot] , identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ], identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ], identifier[t] = identifier[self] . identifier[t] [ identifier[ii] ], identifier[use_physical] = keyword[False] )- identifier[evaluatePotentials] ( identifier[pot] , identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ], literal[int] , identifier[phi] = identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ], identifier[t] = identifier[self] . identifier[t] [ identifier[ii] ], identifier[use_physical] = keyword[False] )+ identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ]** literal[int] / literal[int] )/ identifier[nu] . identifier[sqrt] ( identifier[evaluateDensities] ( identifier[pot] , identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ], literal[int] , identifier[phi] = identifier[self] . identifier[orbit] [ identifier[ii] , literal[int] ], identifier[t] = identifier[self] . identifier[t] [ identifier[ii] ], identifier[use_physical] = keyword[False] )) keyword[for] identifier[ii] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[t] ))] keyword[if] keyword[not] literal[string] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= identifier[labeldict] [ identifier[d1] ] keyword[if] keyword[not] literal[string] keyword[in] identifier[kwargs] : identifier[kwargs] [ literal[string] ]= literal[string] keyword[if] identifier[d1] == literal[string] : keyword[return] identifier[plot] . identifier[bovy_plot] ( identifier[nu] . identifier[array] ( identifier[self] . identifier[t] ), identifier[nu] . identifier[array] ( identifier[self] . identifier[EzJz] )/ identifier[self] . identifier[EzJz] [ literal[int] ], * identifier[args] ,** identifier[kwargs] ) keyword[elif] identifier[d1] == literal[string] : keyword[return] identifier[plot] . identifier[bovy_plot] ( identifier[self] . identifier[orbit] [:, literal[int] ], identifier[nu] . identifier[array] ( identifier[self] . identifier[EzJz] )/ identifier[self] . identifier[EzJz] [ literal[int] ], * identifier[args] ,** identifier[kwargs] ) keyword[elif] identifier[d1] == literal[string] : keyword[return] identifier[plot] . identifier[bovy_plot] ( identifier[self] . identifier[orbit] [:, literal[int] ], identifier[nu] . identifier[array] ( identifier[self] . identifier[EzJz] )/ identifier[self] . identifier[EzJz] [ literal[int] ], * identifier[args] ,** identifier[kwargs] ) keyword[elif] identifier[d1] == literal[string] : keyword[return] identifier[plot] . identifier[bovy_plot] ( identifier[self] . identifier[orbit] [:, literal[int] ], identifier[nu] . identifier[array] ( identifier[self] . identifier[EzJz] )/ identifier[self] . identifier[EzJz] [ literal[int] ], * identifier[args] ,** identifier[kwargs] ) keyword[elif] identifier[d1] == literal[string] : keyword[return] identifier[plot] . identifier[bovy_plot] ( identifier[self] . identifier[orbit] [:, literal[int] ], identifier[nu] . identifier[array] ( identifier[self] . identifier[EzJz] )/ identifier[self] . identifier[EzJz] [ literal[int] ], * identifier[args] ,** identifier[kwargs] ) keyword[elif] identifier[d1] == literal[string] : keyword[return] identifier[plot] . identifier[bovy_plot] ( identifier[self] . identifier[orbit] [:, literal[int] ], identifier[nu] . identifier[array] ( identifier[self] . identifier[EzJz] )/ identifier[self] . identifier[EzJz] [ literal[int] ], * identifier[args] ,** identifier[kwargs] )
def plotEzJz(self, *args, **kwargs): """ NAME: plotEzJz PURPOSE: plot E_z(.)/sqrt(dens(R)) along the orbit INPUT: pot= Potential instance or list of instances in which the orbit was integrated d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz' +bovy_plot.bovy_plot inputs OUTPUT: figure to output device HISTORY: 2010-08-08 - Written - Bovy (NYU) """ labeldict = {'t': '$t$', 'R': '$R$', 'vR': '$v_R$', 'vT': '$v_T$', 'z': '$z$', 'vz': '$v_z$', 'phi': '$\\phi$', 'x': '$x$', 'y': '$y$', 'vx': '$v_x$', 'vy': '$v_y$'} if not 'pot' in kwargs: try: pot = self._pot # depends on [control=['try'], data=[]] except AttributeError: raise AttributeError('Integrate orbit first or specify pot=') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] else: pot = kwargs.pop('pot') d1 = kwargs.pop('d1', 't') self.EzJz = [(evaluatePotentials(pot, self.orbit[ii, 0], self.orbit[ii, 3], t=self.t[ii], use_physical=False) - evaluatePotentials(pot, self.orbit[ii, 0], 0.0, phi=self.orbit[ii, 5], t=self.t[ii], use_physical=False) + self.orbit[ii, 4] ** 2.0 / 2.0) / nu.sqrt(evaluateDensities(pot, self.orbit[ii, 0], 0.0, phi=self.orbit[ii, 5], t=self.t[ii], use_physical=False)) for ii in range(len(self.t))] if not 'xlabel' in kwargs: kwargs['xlabel'] = labeldict[d1] # depends on [control=['if'], data=[]] if not 'ylabel' in kwargs: kwargs['ylabel'] = '$E_z/\\sqrt{\\rho}$' # depends on [control=['if'], data=[]] if d1 == 't': return plot.bovy_plot(nu.array(self.t), nu.array(self.EzJz) / self.EzJz[0], *args, **kwargs) # depends on [control=['if'], data=[]] elif d1 == 'z': return plot.bovy_plot(self.orbit[:, 3], nu.array(self.EzJz) / self.EzJz[0], *args, **kwargs) # depends on [control=['if'], data=[]] elif d1 == 'R': return plot.bovy_plot(self.orbit[:, 0], nu.array(self.EzJz) / self.EzJz[0], *args, **kwargs) # depends on [control=['if'], data=[]] elif d1 == 'vR': return plot.bovy_plot(self.orbit[:, 1], nu.array(self.EzJz) / self.EzJz[0], *args, **kwargs) # depends on [control=['if'], data=[]] elif d1 == 'vT': return plot.bovy_plot(self.orbit[:, 2], nu.array(self.EzJz) / self.EzJz[0], *args, **kwargs) # depends on [control=['if'], data=[]] elif d1 == 'vz': return plot.bovy_plot(self.orbit[:, 4], nu.array(self.EzJz) / self.EzJz[0], *args, **kwargs) # depends on [control=['if'], data=[]]
def getChanges(self, request): """Catch a POST request from BitBucket and start a build process Check the URL below if you require more information about payload https://confluence.atlassian.com/display/BITBUCKET/POST+Service+Management :param request: the http request Twisted object :param options: additional options """ event_type = request.getHeader(_HEADER_EVENT) event_type = bytes2unicode(event_type) payload = json.loads(bytes2unicode(request.args[b'payload'][0])) repo_url = '{}{}'.format( payload['canon_url'], payload['repository']['absolute_url']) project = request.args.get(b'project', [b''])[0] project = bytes2unicode(project) changes = [] for commit in payload['commits']: changes.append({ 'author': commit['raw_author'], 'files': [f['file'] for f in commit['files']], 'comments': commit['message'], 'revision': commit['raw_node'], 'when_timestamp': dateparse(commit['utctimestamp']), 'branch': commit['branch'], 'revlink': '{}commits/{}'.format(repo_url, commit['raw_node']), 'repository': repo_url, 'project': project, 'properties': { 'event': event_type, }, }) log.msg('New revision: {}'.format(commit['node'])) log.msg('Received {} changes from bitbucket'.format(len(changes))) return (changes, payload['repository']['scm'])
def function[getChanges, parameter[self, request]]: constant[Catch a POST request from BitBucket and start a build process Check the URL below if you require more information about payload https://confluence.atlassian.com/display/BITBUCKET/POST+Service+Management :param request: the http request Twisted object :param options: additional options ] variable[event_type] assign[=] call[name[request].getHeader, parameter[name[_HEADER_EVENT]]] variable[event_type] assign[=] call[name[bytes2unicode], parameter[name[event_type]]] variable[payload] assign[=] call[name[json].loads, parameter[call[name[bytes2unicode], parameter[call[call[name[request].args][constant[b'payload']]][constant[0]]]]]] variable[repo_url] assign[=] call[constant[{}{}].format, parameter[call[name[payload]][constant[canon_url]], call[call[name[payload]][constant[repository]]][constant[absolute_url]]]] variable[project] assign[=] call[call[name[request].args.get, parameter[constant[b'project'], list[[<ast.Constant object at 0x7da18f810a90>]]]]][constant[0]] variable[project] assign[=] call[name[bytes2unicode], parameter[name[project]]] variable[changes] assign[=] list[[]] for taget[name[commit]] in starred[call[name[payload]][constant[commits]]] begin[:] call[name[changes].append, parameter[dictionary[[<ast.Constant object at 0x7da18f811240>, <ast.Constant object at 0x7da18f813e50>, <ast.Constant object at 0x7da18f813160>, <ast.Constant object at 0x7da18f811990>, <ast.Constant object at 0x7da18f8121a0>, <ast.Constant object at 0x7da18f811ea0>, <ast.Constant object at 0x7da18f810670>, <ast.Constant object at 0x7da18f813790>, <ast.Constant object at 0x7da18f8128f0>, <ast.Constant object at 0x7da18f813d30>], [<ast.Subscript object at 0x7da18f8121d0>, <ast.ListComp object at 0x7da18f811390>, <ast.Subscript object at 0x7da18f811c60>, <ast.Subscript object at 0x7da18f812590>, <ast.Call object at 0x7da18f8122f0>, <ast.Subscript object at 0x7da18f812110>, <ast.Call object at 0x7da18f811e10>, <ast.Name object at 0x7da18f810430>, <ast.Name object at 0x7da18f8136d0>, <ast.Dict object at 0x7da18f812920>]]]] call[name[log].msg, parameter[call[constant[New revision: {}].format, parameter[call[name[commit]][constant[node]]]]]] call[name[log].msg, parameter[call[constant[Received {} changes from bitbucket].format, parameter[call[name[len], parameter[name[changes]]]]]]] return[tuple[[<ast.Name object at 0x7da18f813c70>, <ast.Subscript object at 0x7da18f8104f0>]]]
keyword[def] identifier[getChanges] ( identifier[self] , identifier[request] ): literal[string] identifier[event_type] = identifier[request] . identifier[getHeader] ( identifier[_HEADER_EVENT] ) identifier[event_type] = identifier[bytes2unicode] ( identifier[event_type] ) identifier[payload] = identifier[json] . identifier[loads] ( identifier[bytes2unicode] ( identifier[request] . identifier[args] [ literal[string] ][ literal[int] ])) identifier[repo_url] = literal[string] . identifier[format] ( identifier[payload] [ literal[string] ], identifier[payload] [ literal[string] ][ literal[string] ]) identifier[project] = identifier[request] . identifier[args] . identifier[get] ( literal[string] ,[ literal[string] ])[ literal[int] ] identifier[project] = identifier[bytes2unicode] ( identifier[project] ) identifier[changes] =[] keyword[for] identifier[commit] keyword[in] identifier[payload] [ literal[string] ]: identifier[changes] . identifier[append] ({ literal[string] : identifier[commit] [ literal[string] ], literal[string] :[ identifier[f] [ literal[string] ] keyword[for] identifier[f] keyword[in] identifier[commit] [ literal[string] ]], literal[string] : identifier[commit] [ literal[string] ], literal[string] : identifier[commit] [ literal[string] ], literal[string] : identifier[dateparse] ( identifier[commit] [ literal[string] ]), literal[string] : identifier[commit] [ literal[string] ], literal[string] : literal[string] . identifier[format] ( identifier[repo_url] , identifier[commit] [ literal[string] ]), literal[string] : identifier[repo_url] , literal[string] : identifier[project] , literal[string] :{ literal[string] : identifier[event_type] , }, }) identifier[log] . identifier[msg] ( literal[string] . identifier[format] ( identifier[commit] [ literal[string] ])) identifier[log] . identifier[msg] ( literal[string] . identifier[format] ( identifier[len] ( identifier[changes] ))) keyword[return] ( identifier[changes] , identifier[payload] [ literal[string] ][ literal[string] ])
def getChanges(self, request): """Catch a POST request from BitBucket and start a build process Check the URL below if you require more information about payload https://confluence.atlassian.com/display/BITBUCKET/POST+Service+Management :param request: the http request Twisted object :param options: additional options """ event_type = request.getHeader(_HEADER_EVENT) event_type = bytes2unicode(event_type) payload = json.loads(bytes2unicode(request.args[b'payload'][0])) repo_url = '{}{}'.format(payload['canon_url'], payload['repository']['absolute_url']) project = request.args.get(b'project', [b''])[0] project = bytes2unicode(project) changes = [] for commit in payload['commits']: changes.append({'author': commit['raw_author'], 'files': [f['file'] for f in commit['files']], 'comments': commit['message'], 'revision': commit['raw_node'], 'when_timestamp': dateparse(commit['utctimestamp']), 'branch': commit['branch'], 'revlink': '{}commits/{}'.format(repo_url, commit['raw_node']), 'repository': repo_url, 'project': project, 'properties': {'event': event_type}}) log.msg('New revision: {}'.format(commit['node'])) # depends on [control=['for'], data=['commit']] log.msg('Received {} changes from bitbucket'.format(len(changes))) return (changes, payload['repository']['scm'])
def cmdline_split(s: str, platform: Union[int, str] = 'this') -> List[str]: """ As per https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex. Multi-platform variant of ``shlex.split()`` for command-line splitting. For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX. Args: s: string to split platform: - ``'this'`` = auto from current platform; - ``1`` = POSIX; - ``0`` = Windows/CMD - (other values reserved) """ # noqa if platform == 'this': platform = (sys.platform != 'win32') # RNC: includes 64-bit Windows if platform == 1: # POSIX re_cmd_lex = r'''"((?:\\["\\]|[^"])*)"|'([^']*)'|(\\.)|(&&?|\|\|?|\d?\>|[<])|([^\s'"\\&|<>]+)|(\s+)|(.)''' # noqa elif platform == 0: # Windows/CMD re_cmd_lex = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' # noqa else: raise AssertionError('unknown platform %r' % platform) args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(re_cmd_lex, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args
def function[cmdline_split, parameter[s, platform]]: constant[ As per https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex. Multi-platform variant of ``shlex.split()`` for command-line splitting. For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX. Args: s: string to split platform: - ``'this'`` = auto from current platform; - ``1`` = POSIX; - ``0`` = Windows/CMD - (other values reserved) ] if compare[name[platform] equal[==] constant[this]] begin[:] variable[platform] assign[=] compare[name[sys].platform not_equal[!=] constant[win32]] if compare[name[platform] equal[==] constant[1]] begin[:] variable[re_cmd_lex] assign[=] constant["((?:\\["\\]|[^"])*)"|'([^']*)'|(\\.)|(&&?|\|\|?|\d?\>|[<])|([^\s'"\\&|<>]+)|(\s+)|(.)] variable[args] assign[=] list[[]] variable[accu] assign[=] constant[None] for taget[tuple[[<ast.Name object at 0x7da1b190f370>, <ast.Name object at 0x7da1b190e1d0>, <ast.Name object at 0x7da1b190c280>, <ast.Name object at 0x7da1b190f580>, <ast.Name object at 0x7da1b190f220>, <ast.Name object at 0x7da1b190ff70>, <ast.Name object at 0x7da1b190ffa0>]]] in starred[call[name[re].findall, parameter[name[re_cmd_lex], name[s]]]] begin[:] if name[word] begin[:] pass variable[accu] assign[=] binary_operation[<ast.BoolOp object at 0x7da1b189d510> + name[word]] if compare[name[accu] is_not constant[None]] begin[:] call[name[args].append, parameter[name[accu]]] return[name[args]]
keyword[def] identifier[cmdline_split] ( identifier[s] : identifier[str] , identifier[platform] : identifier[Union] [ identifier[int] , identifier[str] ]= literal[string] )-> identifier[List] [ identifier[str] ]: literal[string] keyword[if] identifier[platform] == literal[string] : identifier[platform] =( identifier[sys] . identifier[platform] != literal[string] ) keyword[if] identifier[platform] == literal[int] : identifier[re_cmd_lex] = literal[string] keyword[elif] identifier[platform] == literal[int] : identifier[re_cmd_lex] = literal[string] keyword[else] : keyword[raise] identifier[AssertionError] ( literal[string] % identifier[platform] ) identifier[args] =[] identifier[accu] = keyword[None] keyword[for] identifier[qs] , identifier[qss] , identifier[esc] , identifier[pipe] , identifier[word] , identifier[white] , identifier[fail] keyword[in] identifier[re] . identifier[findall] ( identifier[re_cmd_lex] , identifier[s] ): keyword[if] identifier[word] : keyword[pass] keyword[elif] identifier[esc] : identifier[word] = identifier[esc] [ literal[int] ] keyword[elif] identifier[white] keyword[or] identifier[pipe] : keyword[if] identifier[accu] keyword[is] keyword[not] keyword[None] : identifier[args] . identifier[append] ( identifier[accu] ) keyword[if] identifier[pipe] : identifier[args] . identifier[append] ( identifier[pipe] ) identifier[accu] = keyword[None] keyword[continue] keyword[elif] identifier[fail] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[elif] identifier[qs] : identifier[word] = identifier[qs] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) keyword[if] identifier[platform] == literal[int] : identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] ) keyword[else] : identifier[word] = identifier[qss] identifier[accu] =( identifier[accu] keyword[or] literal[string] )+ identifier[word] keyword[if] identifier[accu] keyword[is] keyword[not] keyword[None] : identifier[args] . identifier[append] ( identifier[accu] ) keyword[return] identifier[args]
def cmdline_split(s: str, platform: Union[int, str]='this') -> List[str]: """ As per https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex. Multi-platform variant of ``shlex.split()`` for command-line splitting. For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX. Args: s: string to split platform: - ``'this'`` = auto from current platform; - ``1`` = POSIX; - ``0`` = Windows/CMD - (other values reserved) """ # noqa if platform == 'this': platform = sys.platform != 'win32' # RNC: includes 64-bit Windows # depends on [control=['if'], data=['platform']] if platform == 1: # POSIX re_cmd_lex = '"((?:\\\\["\\\\]|[^"])*)"|\'([^\']*)\'|(\\\\.)|(&&?|\\|\\|?|\\d?\\>|[<])|([^\\s\'"\\\\&|<>]+)|(\\s+)|(.)' # noqa # depends on [control=['if'], data=[]] elif platform == 0: # Windows/CMD re_cmd_lex = '"((?:""|\\\\["\\\\]|[^"])*)"?()|(\\\\\\\\(?=\\\\*")|\\\\")|(&&?|\\|\\|?|\\d?>|[<])|([^\\s"&|<>]+)|(\\s+)|(.)' # noqa # depends on [control=['if'], data=[]] else: raise AssertionError('unknown platform %r' % platform) args = [] accu = None # collects pieces of one arg for (qs, qss, esc, pipe, word, white, fail) in re.findall(re_cmd_lex, s): if word: pass # most frequent # depends on [control=['if'], data=[]] elif esc: word = esc[1] # depends on [control=['if'], data=[]] elif white or pipe: if accu is not None: args.append(accu) # depends on [control=['if'], data=['accu']] if pipe: args.append(pipe) # depends on [control=['if'], data=[]] accu = None continue # depends on [control=['if'], data=[]] elif fail: raise ValueError('invalid or incomplete shell string') # depends on [control=['if'], data=[]] elif qs: word = qs.replace('\\"', '"').replace('\\\\', '\\') if platform == 0: word = word.replace('""', '"') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: word = qss # may be even empty; must be last accu = (accu or '') + word # depends on [control=['for'], data=[]] if accu is not None: args.append(accu) # depends on [control=['if'], data=['accu']] return args
def set_optional_elements(self): """Sets elements considered option by RSS spec""" self.set_categories() self.set_copyright() self.set_generator() self.set_image() self.set_language() self.set_last_build_date() self.set_managing_editor() self.set_published_date() self.set_pubsubhubbub() self.set_ttl() self.set_web_master()
def function[set_optional_elements, parameter[self]]: constant[Sets elements considered option by RSS spec] call[name[self].set_categories, parameter[]] call[name[self].set_copyright, parameter[]] call[name[self].set_generator, parameter[]] call[name[self].set_image, parameter[]] call[name[self].set_language, parameter[]] call[name[self].set_last_build_date, parameter[]] call[name[self].set_managing_editor, parameter[]] call[name[self].set_published_date, parameter[]] call[name[self].set_pubsubhubbub, parameter[]] call[name[self].set_ttl, parameter[]] call[name[self].set_web_master, parameter[]]
keyword[def] identifier[set_optional_elements] ( identifier[self] ): literal[string] identifier[self] . identifier[set_categories] () identifier[self] . identifier[set_copyright] () identifier[self] . identifier[set_generator] () identifier[self] . identifier[set_image] () identifier[self] . identifier[set_language] () identifier[self] . identifier[set_last_build_date] () identifier[self] . identifier[set_managing_editor] () identifier[self] . identifier[set_published_date] () identifier[self] . identifier[set_pubsubhubbub] () identifier[self] . identifier[set_ttl] () identifier[self] . identifier[set_web_master] ()
def set_optional_elements(self): """Sets elements considered option by RSS spec""" self.set_categories() self.set_copyright() self.set_generator() self.set_image() self.set_language() self.set_last_build_date() self.set_managing_editor() self.set_published_date() self.set_pubsubhubbub() self.set_ttl() self.set_web_master()
def ipoib_interfaces(): """Return a list of IPOIB capable ethernet interfaces""" interfaces = [] for interface in network_interfaces(): try: driver = re.search('^driver: (.+)$', subprocess.check_output([ 'ethtool', '-i', interface]), re.M).group(1) if driver in IPOIB_DRIVERS: interfaces.append(interface) except Exception: log("Skipping interface %s" % interface, level=INFO) continue return interfaces
def function[ipoib_interfaces, parameter[]]: constant[Return a list of IPOIB capable ethernet interfaces] variable[interfaces] assign[=] list[[]] for taget[name[interface]] in starred[call[name[network_interfaces], parameter[]]] begin[:] <ast.Try object at 0x7da18f812a40> return[name[interfaces]]
keyword[def] identifier[ipoib_interfaces] (): literal[string] identifier[interfaces] =[] keyword[for] identifier[interface] keyword[in] identifier[network_interfaces] (): keyword[try] : identifier[driver] = identifier[re] . identifier[search] ( literal[string] , identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] , identifier[interface] ]), identifier[re] . identifier[M] ). identifier[group] ( literal[int] ) keyword[if] identifier[driver] keyword[in] identifier[IPOIB_DRIVERS] : identifier[interfaces] . identifier[append] ( identifier[interface] ) keyword[except] identifier[Exception] : identifier[log] ( literal[string] % identifier[interface] , identifier[level] = identifier[INFO] ) keyword[continue] keyword[return] identifier[interfaces]
def ipoib_interfaces(): """Return a list of IPOIB capable ethernet interfaces""" interfaces = [] for interface in network_interfaces(): try: driver = re.search('^driver: (.+)$', subprocess.check_output(['ethtool', '-i', interface]), re.M).group(1) if driver in IPOIB_DRIVERS: interfaces.append(interface) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]] except Exception: log('Skipping interface %s' % interface, level=INFO) continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['interface']] return interfaces
def get_tab(self, model_alias, object, tab_code): """ Get tab for given object and tab code :param model_alias: :param object: Object used to render tab :param tab_code: Tab code to use :return: """ model_alias = self.get_model_alias(model_alias) for item in self.tabs[model_alias]: if item.code == tab_code and item.display_filter(object): return item raise Exception('Given tab does not exits or is filtered')
def function[get_tab, parameter[self, model_alias, object, tab_code]]: constant[ Get tab for given object and tab code :param model_alias: :param object: Object used to render tab :param tab_code: Tab code to use :return: ] variable[model_alias] assign[=] call[name[self].get_model_alias, parameter[name[model_alias]]] for taget[name[item]] in starred[call[name[self].tabs][name[model_alias]]] begin[:] if <ast.BoolOp object at 0x7da20c7cb850> begin[:] return[name[item]] <ast.Raise object at 0x7da20c7c9d50>
keyword[def] identifier[get_tab] ( identifier[self] , identifier[model_alias] , identifier[object] , identifier[tab_code] ): literal[string] identifier[model_alias] = identifier[self] . identifier[get_model_alias] ( identifier[model_alias] ) keyword[for] identifier[item] keyword[in] identifier[self] . identifier[tabs] [ identifier[model_alias] ]: keyword[if] identifier[item] . identifier[code] == identifier[tab_code] keyword[and] identifier[item] . identifier[display_filter] ( identifier[object] ): keyword[return] identifier[item] keyword[raise] identifier[Exception] ( literal[string] )
def get_tab(self, model_alias, object, tab_code): """ Get tab for given object and tab code :param model_alias: :param object: Object used to render tab :param tab_code: Tab code to use :return: """ model_alias = self.get_model_alias(model_alias) for item in self.tabs[model_alias]: if item.code == tab_code and item.display_filter(object): return item # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] raise Exception('Given tab does not exits or is filtered')
def list_all(self, per_page=10, omit=None): """List all groups. Since the order of groups is determined by recent activity, this is the recommended way to obtain a list of all groups. See :func:`~groupy.api.groups.Groups.list` for details about ``omit``. :param int per_page: number of groups per page :param int omit: a comma-separated list of fields to exclude :return: a list of groups :rtype: :class:`~groupy.pagers.GroupList` """ return self.list(per_page=per_page, omit=omit).autopage()
def function[list_all, parameter[self, per_page, omit]]: constant[List all groups. Since the order of groups is determined by recent activity, this is the recommended way to obtain a list of all groups. See :func:`~groupy.api.groups.Groups.list` for details about ``omit``. :param int per_page: number of groups per page :param int omit: a comma-separated list of fields to exclude :return: a list of groups :rtype: :class:`~groupy.pagers.GroupList` ] return[call[call[name[self].list, parameter[]].autopage, parameter[]]]
keyword[def] identifier[list_all] ( identifier[self] , identifier[per_page] = literal[int] , identifier[omit] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[list] ( identifier[per_page] = identifier[per_page] , identifier[omit] = identifier[omit] ). identifier[autopage] ()
def list_all(self, per_page=10, omit=None): """List all groups. Since the order of groups is determined by recent activity, this is the recommended way to obtain a list of all groups. See :func:`~groupy.api.groups.Groups.list` for details about ``omit``. :param int per_page: number of groups per page :param int omit: a comma-separated list of fields to exclude :return: a list of groups :rtype: :class:`~groupy.pagers.GroupList` """ return self.list(per_page=per_page, omit=omit).autopage()
def delay( self, identifier: typing.Any, until: typing.Union[int, float]=-1, ) -> bool: """Delay a deferred function until the given time. Args: identifier (typing.Any): The identifier returned from a call to defer or defer_for. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: bool: True if the call is delayed. False if the identifier is invalid or if the deferred call is already executed. """ raise NotImplementedError()
def function[delay, parameter[self, identifier, until]]: constant[Delay a deferred function until the given time. Args: identifier (typing.Any): The identifier returned from a call to defer or defer_for. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: bool: True if the call is delayed. False if the identifier is invalid or if the deferred call is already executed. ] <ast.Raise object at 0x7da2046216c0>
keyword[def] identifier[delay] ( identifier[self] , identifier[identifier] : identifier[typing] . identifier[Any] , identifier[until] : identifier[typing] . identifier[Union] [ identifier[int] , identifier[float] ]=- literal[int] , )-> identifier[bool] : literal[string] keyword[raise] identifier[NotImplementedError] ()
def delay(self, identifier: typing.Any, until: typing.Union[int, float]=-1) -> bool: """Delay a deferred function until the given time. Args: identifier (typing.Any): The identifier returned from a call to defer or defer_for. until (typing.Union[int, float]): A numeric value that represents the clock time when the callback becomes available for execution. Values that are less than the current time result in the function being called at the next opportunity. Returns: bool: True if the call is delayed. False if the identifier is invalid or if the deferred call is already executed. """ raise NotImplementedError()
def session_dump(self, cell, hash, fname_session): """ Dump ipython session to file :param hash: cell hash :param fname_session: output filename :return: """ logging.debug('Cell {}: Dumping session to {}'.format(hash, fname_session)) inject_code = ['import dill', 'dill.dump_session(filename="{}")'.format(fname_session), ] inject_cell = nbf.v4.new_code_cell('\n'.join(inject_code)) reply, outputs = super().run_cell(inject_cell) errors = list(filter(lambda out: out.output_type == 'error', outputs)) if len(errors): logging.info('Cell {}: Warning: serialization failed, cache disabled'.format(hash)) logging.debug( 'Cell {}: Serialization error: {}'.format(hash, CellExecutionError.from_cell_and_msg(cell, errors[0]))) # disable attempts to retrieve cache for subsequent cells self.disable_cache = True # remove partial cache for current cell os.remove(fname_session) return False return True
def function[session_dump, parameter[self, cell, hash, fname_session]]: constant[ Dump ipython session to file :param hash: cell hash :param fname_session: output filename :return: ] call[name[logging].debug, parameter[call[constant[Cell {}: Dumping session to {}].format, parameter[name[hash], name[fname_session]]]]] variable[inject_code] assign[=] list[[<ast.Constant object at 0x7da1b0b59a50>, <ast.Call object at 0x7da1b0b5a860>]] variable[inject_cell] assign[=] call[name[nbf].v4.new_code_cell, parameter[call[constant[ ].join, parameter[name[inject_code]]]]] <ast.Tuple object at 0x7da1b0cf75b0> assign[=] call[call[name[super], parameter[]].run_cell, parameter[name[inject_cell]]] variable[errors] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da1b0cf70d0>, name[outputs]]]]] if call[name[len], parameter[name[errors]]] begin[:] call[name[logging].info, parameter[call[constant[Cell {}: Warning: serialization failed, cache disabled].format, parameter[name[hash]]]]] call[name[logging].debug, parameter[call[constant[Cell {}: Serialization error: {}].format, parameter[name[hash], call[name[CellExecutionError].from_cell_and_msg, parameter[name[cell], call[name[errors]][constant[0]]]]]]]] name[self].disable_cache assign[=] constant[True] call[name[os].remove, parameter[name[fname_session]]] return[constant[False]] return[constant[True]]
keyword[def] identifier[session_dump] ( identifier[self] , identifier[cell] , identifier[hash] , identifier[fname_session] ): literal[string] identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[hash] , identifier[fname_session] )) identifier[inject_code] =[ literal[string] , literal[string] . identifier[format] ( identifier[fname_session] ), ] identifier[inject_cell] = identifier[nbf] . identifier[v4] . identifier[new_code_cell] ( literal[string] . identifier[join] ( identifier[inject_code] )) identifier[reply] , identifier[outputs] = identifier[super] (). identifier[run_cell] ( identifier[inject_cell] ) identifier[errors] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[out] : identifier[out] . identifier[output_type] == literal[string] , identifier[outputs] )) keyword[if] identifier[len] ( identifier[errors] ): identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[hash] )) identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[hash] , identifier[CellExecutionError] . identifier[from_cell_and_msg] ( identifier[cell] , identifier[errors] [ literal[int] ]))) identifier[self] . identifier[disable_cache] = keyword[True] identifier[os] . identifier[remove] ( identifier[fname_session] ) keyword[return] keyword[False] keyword[return] keyword[True]
def session_dump(self, cell, hash, fname_session): """ Dump ipython session to file :param hash: cell hash :param fname_session: output filename :return: """ logging.debug('Cell {}: Dumping session to {}'.format(hash, fname_session)) inject_code = ['import dill', 'dill.dump_session(filename="{}")'.format(fname_session)] inject_cell = nbf.v4.new_code_cell('\n'.join(inject_code)) (reply, outputs) = super().run_cell(inject_cell) errors = list(filter(lambda out: out.output_type == 'error', outputs)) if len(errors): logging.info('Cell {}: Warning: serialization failed, cache disabled'.format(hash)) logging.debug('Cell {}: Serialization error: {}'.format(hash, CellExecutionError.from_cell_and_msg(cell, errors[0]))) # disable attempts to retrieve cache for subsequent cells self.disable_cache = True # remove partial cache for current cell os.remove(fname_session) return False # depends on [control=['if'], data=[]] return True
def dist_abs(self, src, tar, *args, **kwargs): """Return absolute distance. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- int Absolute distance """ return self.dist(src, tar, *args, **kwargs)
def function[dist_abs, parameter[self, src, tar]]: constant[Return absolute distance. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- int Absolute distance ] return[call[name[self].dist, parameter[name[src], name[tar], <ast.Starred object at 0x7da18f723a30>]]]
keyword[def] identifier[dist_abs] ( identifier[self] , identifier[src] , identifier[tar] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[return] identifier[self] . identifier[dist] ( identifier[src] , identifier[tar] ,* identifier[args] ,** identifier[kwargs] )
def dist_abs(self, src, tar, *args, **kwargs): """Return absolute distance. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison *args Variable length argument list. **kwargs Arbitrary keyword arguments. Returns ------- int Absolute distance """ return self.dist(src, tar, *args, **kwargs)
def send_calibrate_accelerometer(self, simple=False): """Request accelerometer calibration. :param simple: if True, perform simple accelerometer calibration """ calibration_command = self.message_factory.command_long_encode( self._handler.target_system, 0, # target_system, target_component mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, # command 0, # confirmation 0, # param 1, 1: gyro calibration, 3: gyro temperature calibration 0, # param 2, 1: magnetometer calibration 0, # param 3, 1: ground pressure calibration 0, # param 4, 1: radio RC calibration, 2: RC trim calibration 4 if simple else 1, # param 5, 1: accelerometer calibration, 2: board level calibration, 3: accelerometer temperature calibration, 4: simple accelerometer calibration 0, # param 6, 2: airspeed calibration 0, # param 7, 1: ESC calibration, 3: barometer temperature calibration ) self.send_mavlink(calibration_command)
def function[send_calibrate_accelerometer, parameter[self, simple]]: constant[Request accelerometer calibration. :param simple: if True, perform simple accelerometer calibration ] variable[calibration_command] assign[=] call[name[self].message_factory.command_long_encode, parameter[name[self]._handler.target_system, constant[0], name[mavutil].mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, constant[0], constant[0], constant[0], constant[0], constant[0], <ast.IfExp object at 0x7da18dc074c0>, constant[0], constant[0]]] call[name[self].send_mavlink, parameter[name[calibration_command]]]
keyword[def] identifier[send_calibrate_accelerometer] ( identifier[self] , identifier[simple] = keyword[False] ): literal[string] identifier[calibration_command] = identifier[self] . identifier[message_factory] . identifier[command_long_encode] ( identifier[self] . identifier[_handler] . identifier[target_system] , literal[int] , identifier[mavutil] . identifier[mavlink] . identifier[MAV_CMD_PREFLIGHT_CALIBRATION] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , literal[int] keyword[if] identifier[simple] keyword[else] literal[int] , literal[int] , literal[int] , ) identifier[self] . identifier[send_mavlink] ( identifier[calibration_command] )
def send_calibrate_accelerometer(self, simple=False): """Request accelerometer calibration. :param simple: if True, perform simple accelerometer calibration """ # target_system, target_component # command # confirmation # param 1, 1: gyro calibration, 3: gyro temperature calibration # param 2, 1: magnetometer calibration # param 3, 1: ground pressure calibration # param 4, 1: radio RC calibration, 2: RC trim calibration # param 5, 1: accelerometer calibration, 2: board level calibration, 3: accelerometer temperature calibration, 4: simple accelerometer calibration # param 6, 2: airspeed calibration # param 7, 1: ESC calibration, 3: barometer temperature calibration calibration_command = self.message_factory.command_long_encode(self._handler.target_system, 0, mavutil.mavlink.MAV_CMD_PREFLIGHT_CALIBRATION, 0, 0, 0, 0, 0, 4 if simple else 1, 0, 0) self.send_mavlink(calibration_command)
def hacked_pep257(to_lint): """ Check for the presence of docstrings, but ignore some of the options """ def ignore(*args, **kwargs): pass pep257.check_blank_before_after_class = ignore pep257.check_blank_after_last_paragraph = ignore pep257.check_blank_after_summary = ignore pep257.check_ends_with_period = ignore pep257.check_one_liners = ignore pep257.check_imperative_mood = ignore original_check_return_type = pep257.check_return_type def better_check_return_type(def_docstring, context, is_script): """ Ignore private methods """ def_name = context.split()[1] if def_name.startswith('_') and not def_name.endswith('__'): original_check_return_type(def_docstring, context, is_script) pep257.check_return_type = better_check_return_type errors = [] for filename in to_lint: with open(filename) as f: source = f.read() if source: errors.extend(pep257.check_source(source, filename)) return '\n'.join([str(error) for error in sorted(errors)])
def function[hacked_pep257, parameter[to_lint]]: constant[ Check for the presence of docstrings, but ignore some of the options ] def function[ignore, parameter[]]: pass name[pep257].check_blank_before_after_class assign[=] name[ignore] name[pep257].check_blank_after_last_paragraph assign[=] name[ignore] name[pep257].check_blank_after_summary assign[=] name[ignore] name[pep257].check_ends_with_period assign[=] name[ignore] name[pep257].check_one_liners assign[=] name[ignore] name[pep257].check_imperative_mood assign[=] name[ignore] variable[original_check_return_type] assign[=] name[pep257].check_return_type def function[better_check_return_type, parameter[def_docstring, context, is_script]]: constant[ Ignore private methods ] variable[def_name] assign[=] call[call[name[context].split, parameter[]]][constant[1]] if <ast.BoolOp object at 0x7da20c6a9660> begin[:] call[name[original_check_return_type], parameter[name[def_docstring], name[context], name[is_script]]] name[pep257].check_return_type assign[=] name[better_check_return_type] variable[errors] assign[=] list[[]] for taget[name[filename]] in starred[name[to_lint]] begin[:] with call[name[open], parameter[name[filename]]] begin[:] variable[source] assign[=] call[name[f].read, parameter[]] if name[source] begin[:] call[name[errors].extend, parameter[call[name[pep257].check_source, parameter[name[source], name[filename]]]]] return[call[constant[ ].join, parameter[<ast.ListComp object at 0x7da20c6aa260>]]]
keyword[def] identifier[hacked_pep257] ( identifier[to_lint] ): literal[string] keyword[def] identifier[ignore] (* identifier[args] ,** identifier[kwargs] ): keyword[pass] identifier[pep257] . identifier[check_blank_before_after_class] = identifier[ignore] identifier[pep257] . identifier[check_blank_after_last_paragraph] = identifier[ignore] identifier[pep257] . identifier[check_blank_after_summary] = identifier[ignore] identifier[pep257] . identifier[check_ends_with_period] = identifier[ignore] identifier[pep257] . identifier[check_one_liners] = identifier[ignore] identifier[pep257] . identifier[check_imperative_mood] = identifier[ignore] identifier[original_check_return_type] = identifier[pep257] . identifier[check_return_type] keyword[def] identifier[better_check_return_type] ( identifier[def_docstring] , identifier[context] , identifier[is_script] ): literal[string] identifier[def_name] = identifier[context] . identifier[split] ()[ literal[int] ] keyword[if] identifier[def_name] . identifier[startswith] ( literal[string] ) keyword[and] keyword[not] identifier[def_name] . identifier[endswith] ( literal[string] ): identifier[original_check_return_type] ( identifier[def_docstring] , identifier[context] , identifier[is_script] ) identifier[pep257] . identifier[check_return_type] = identifier[better_check_return_type] identifier[errors] =[] keyword[for] identifier[filename] keyword[in] identifier[to_lint] : keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[f] : identifier[source] = identifier[f] . identifier[read] () keyword[if] identifier[source] : identifier[errors] . identifier[extend] ( identifier[pep257] . identifier[check_source] ( identifier[source] , identifier[filename] )) keyword[return] literal[string] . identifier[join] ([ identifier[str] ( identifier[error] ) keyword[for] identifier[error] keyword[in] identifier[sorted] ( identifier[errors] )])
def hacked_pep257(to_lint): """ Check for the presence of docstrings, but ignore some of the options """ def ignore(*args, **kwargs): pass pep257.check_blank_before_after_class = ignore pep257.check_blank_after_last_paragraph = ignore pep257.check_blank_after_summary = ignore pep257.check_ends_with_period = ignore pep257.check_one_liners = ignore pep257.check_imperative_mood = ignore original_check_return_type = pep257.check_return_type def better_check_return_type(def_docstring, context, is_script): """ Ignore private methods """ def_name = context.split()[1] if def_name.startswith('_') and (not def_name.endswith('__')): original_check_return_type(def_docstring, context, is_script) # depends on [control=['if'], data=[]] pep257.check_return_type = better_check_return_type errors = [] for filename in to_lint: with open(filename) as f: source = f.read() if source: errors.extend(pep257.check_source(source, filename)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['filename']] return '\n'.join([str(error) for error in sorted(errors)])
def head(self, urls=None, **overrides): """Sets the acceptable HTTP method to HEAD""" if urls is not None: overrides['urls'] = urls return self.where(accept='HEAD', **overrides)
def function[head, parameter[self, urls]]: constant[Sets the acceptable HTTP method to HEAD] if compare[name[urls] is_not constant[None]] begin[:] call[name[overrides]][constant[urls]] assign[=] name[urls] return[call[name[self].where, parameter[]]]
keyword[def] identifier[head] ( identifier[self] , identifier[urls] = keyword[None] ,** identifier[overrides] ): literal[string] keyword[if] identifier[urls] keyword[is] keyword[not] keyword[None] : identifier[overrides] [ literal[string] ]= identifier[urls] keyword[return] identifier[self] . identifier[where] ( identifier[accept] = literal[string] ,** identifier[overrides] )
def head(self, urls=None, **overrides): """Sets the acceptable HTTP method to HEAD""" if urls is not None: overrides['urls'] = urls # depends on [control=['if'], data=['urls']] return self.where(accept='HEAD', **overrides)
async def close(self) -> None: """Complete queued queries/cursors and close the connection.""" await self._execute(self._conn.close) self._running = False self._connection = None
<ast.AsyncFunctionDef object at 0x7da1b1dc77c0>
keyword[async] keyword[def] identifier[close] ( identifier[self] )-> keyword[None] : literal[string] keyword[await] identifier[self] . identifier[_execute] ( identifier[self] . identifier[_conn] . identifier[close] ) identifier[self] . identifier[_running] = keyword[False] identifier[self] . identifier[_connection] = keyword[None]
async def close(self) -> None: """Complete queued queries/cursors and close the connection.""" await self._execute(self._conn.close) self._running = False self._connection = None
def _build_url(*args, **kwargs) -> str: """ Return a valid url. """ resource_url = API_RESOURCES_URLS for key in args: resource_url = resource_url[key] if kwargs: resource_url = resource_url.format(**kwargs) return urljoin(URL, resource_url)
def function[_build_url, parameter[]]: constant[ Return a valid url. ] variable[resource_url] assign[=] name[API_RESOURCES_URLS] for taget[name[key]] in starred[name[args]] begin[:] variable[resource_url] assign[=] call[name[resource_url]][name[key]] if name[kwargs] begin[:] variable[resource_url] assign[=] call[name[resource_url].format, parameter[]] return[call[name[urljoin], parameter[name[URL], name[resource_url]]]]
keyword[def] identifier[_build_url] (* identifier[args] ,** identifier[kwargs] )-> identifier[str] : literal[string] identifier[resource_url] = identifier[API_RESOURCES_URLS] keyword[for] identifier[key] keyword[in] identifier[args] : identifier[resource_url] = identifier[resource_url] [ identifier[key] ] keyword[if] identifier[kwargs] : identifier[resource_url] = identifier[resource_url] . identifier[format] (** identifier[kwargs] ) keyword[return] identifier[urljoin] ( identifier[URL] , identifier[resource_url] )
def _build_url(*args, **kwargs) -> str: """ Return a valid url. """ resource_url = API_RESOURCES_URLS for key in args: resource_url = resource_url[key] # depends on [control=['for'], data=['key']] if kwargs: resource_url = resource_url.format(**kwargs) # depends on [control=['if'], data=[]] return urljoin(URL, resource_url)
def update(self, heap): """Update the item descriptors and items from an incoming heap. Parameters ---------- heap : :class:`spead2.recv.Heap` Incoming heap Returns ------- dict Items that have been updated from this heap, indexed by name """ for descriptor in heap.get_descriptors(): item = Item.from_raw(descriptor, flavour=heap.flavour) self._add_item(item) updated_items = {} for raw_item in heap.get_items(): if raw_item.id <= STREAM_CTRL_ID: continue # Special fields, not real items try: item = self._by_id[raw_item.id] except KeyError: _logger.warning('Item with ID %#x received but there is no descriptor', raw_item.id) else: item.set_from_raw(raw_item) item.version += 1 updated_items[item.name] = item return updated_items
def function[update, parameter[self, heap]]: constant[Update the item descriptors and items from an incoming heap. Parameters ---------- heap : :class:`spead2.recv.Heap` Incoming heap Returns ------- dict Items that have been updated from this heap, indexed by name ] for taget[name[descriptor]] in starred[call[name[heap].get_descriptors, parameter[]]] begin[:] variable[item] assign[=] call[name[Item].from_raw, parameter[name[descriptor]]] call[name[self]._add_item, parameter[name[item]]] variable[updated_items] assign[=] dictionary[[], []] for taget[name[raw_item]] in starred[call[name[heap].get_items, parameter[]]] begin[:] if compare[name[raw_item].id less_or_equal[<=] name[STREAM_CTRL_ID]] begin[:] continue <ast.Try object at 0x7da1b0b2b1c0> return[name[updated_items]]
keyword[def] identifier[update] ( identifier[self] , identifier[heap] ): literal[string] keyword[for] identifier[descriptor] keyword[in] identifier[heap] . identifier[get_descriptors] (): identifier[item] = identifier[Item] . identifier[from_raw] ( identifier[descriptor] , identifier[flavour] = identifier[heap] . identifier[flavour] ) identifier[self] . identifier[_add_item] ( identifier[item] ) identifier[updated_items] ={} keyword[for] identifier[raw_item] keyword[in] identifier[heap] . identifier[get_items] (): keyword[if] identifier[raw_item] . identifier[id] <= identifier[STREAM_CTRL_ID] : keyword[continue] keyword[try] : identifier[item] = identifier[self] . identifier[_by_id] [ identifier[raw_item] . identifier[id] ] keyword[except] identifier[KeyError] : identifier[_logger] . identifier[warning] ( literal[string] , identifier[raw_item] . identifier[id] ) keyword[else] : identifier[item] . identifier[set_from_raw] ( identifier[raw_item] ) identifier[item] . identifier[version] += literal[int] identifier[updated_items] [ identifier[item] . identifier[name] ]= identifier[item] keyword[return] identifier[updated_items]
def update(self, heap): """Update the item descriptors and items from an incoming heap. Parameters ---------- heap : :class:`spead2.recv.Heap` Incoming heap Returns ------- dict Items that have been updated from this heap, indexed by name """ for descriptor in heap.get_descriptors(): item = Item.from_raw(descriptor, flavour=heap.flavour) self._add_item(item) # depends on [control=['for'], data=['descriptor']] updated_items = {} for raw_item in heap.get_items(): if raw_item.id <= STREAM_CTRL_ID: continue # Special fields, not real items # depends on [control=['if'], data=[]] try: item = self._by_id[raw_item.id] # depends on [control=['try'], data=[]] except KeyError: _logger.warning('Item with ID %#x received but there is no descriptor', raw_item.id) # depends on [control=['except'], data=[]] else: item.set_from_raw(raw_item) item.version += 1 updated_items[item.name] = item # depends on [control=['for'], data=['raw_item']] return updated_items
def solve_loop(slsp, u_span, j_coup): """Calculates the quasiparticle for the input loop of: @param slsp: Slave spin Object @param Uspan: local Couloumb interation @param J_coup: Fraction of Uspan of Hund coupling strength""" zet, lam, eps, hlog, mean_f = [], [], [], [], [None] for u in u_span: print(u, j_coup) hlog.append(slsp.selfconsistency(u, j_coup, mean_f[-1])) mean_f.append(slsp.mean_field()) zet.append(slsp.quasiparticle_weight()) lam.append(slsp.param['lambda']) eps.append(orbital_energies(slsp.param, zet[-1])) return np.asarray([zet, lam, eps]), hlog, mean_f
def function[solve_loop, parameter[slsp, u_span, j_coup]]: constant[Calculates the quasiparticle for the input loop of: @param slsp: Slave spin Object @param Uspan: local Couloumb interation @param J_coup: Fraction of Uspan of Hund coupling strength] <ast.Tuple object at 0x7da207f023e0> assign[=] tuple[[<ast.List object at 0x7da207f031f0>, <ast.List object at 0x7da207f007f0>, <ast.List object at 0x7da207f008b0>, <ast.List object at 0x7da207f016f0>, <ast.List object at 0x7da207f02920>]] for taget[name[u]] in starred[name[u_span]] begin[:] call[name[print], parameter[name[u], name[j_coup]]] call[name[hlog].append, parameter[call[name[slsp].selfconsistency, parameter[name[u], name[j_coup], call[name[mean_f]][<ast.UnaryOp object at 0x7da207f029b0>]]]]] call[name[mean_f].append, parameter[call[name[slsp].mean_field, parameter[]]]] call[name[zet].append, parameter[call[name[slsp].quasiparticle_weight, parameter[]]]] call[name[lam].append, parameter[call[name[slsp].param][constant[lambda]]]] call[name[eps].append, parameter[call[name[orbital_energies], parameter[name[slsp].param, call[name[zet]][<ast.UnaryOp object at 0x7da207f013f0>]]]]] return[tuple[[<ast.Call object at 0x7da207f01690>, <ast.Name object at 0x7da207f03400>, <ast.Name object at 0x7da207f00e50>]]]
keyword[def] identifier[solve_loop] ( identifier[slsp] , identifier[u_span] , identifier[j_coup] ): literal[string] identifier[zet] , identifier[lam] , identifier[eps] , identifier[hlog] , identifier[mean_f] =[],[],[],[],[ keyword[None] ] keyword[for] identifier[u] keyword[in] identifier[u_span] : identifier[print] ( identifier[u] , identifier[j_coup] ) identifier[hlog] . identifier[append] ( identifier[slsp] . identifier[selfconsistency] ( identifier[u] , identifier[j_coup] , identifier[mean_f] [- literal[int] ])) identifier[mean_f] . identifier[append] ( identifier[slsp] . identifier[mean_field] ()) identifier[zet] . identifier[append] ( identifier[slsp] . identifier[quasiparticle_weight] ()) identifier[lam] . identifier[append] ( identifier[slsp] . identifier[param] [ literal[string] ]) identifier[eps] . identifier[append] ( identifier[orbital_energies] ( identifier[slsp] . identifier[param] , identifier[zet] [- literal[int] ])) keyword[return] identifier[np] . identifier[asarray] ([ identifier[zet] , identifier[lam] , identifier[eps] ]), identifier[hlog] , identifier[mean_f]
def solve_loop(slsp, u_span, j_coup): """Calculates the quasiparticle for the input loop of: @param slsp: Slave spin Object @param Uspan: local Couloumb interation @param J_coup: Fraction of Uspan of Hund coupling strength""" (zet, lam, eps, hlog, mean_f) = ([], [], [], [], [None]) for u in u_span: print(u, j_coup) hlog.append(slsp.selfconsistency(u, j_coup, mean_f[-1])) mean_f.append(slsp.mean_field()) zet.append(slsp.quasiparticle_weight()) lam.append(slsp.param['lambda']) eps.append(orbital_energies(slsp.param, zet[-1])) # depends on [control=['for'], data=['u']] return (np.asarray([zet, lam, eps]), hlog, mean_f)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): """ Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
def function[asfreq, parameter[self, freq, method, how, normalize, fill_value]]: constant[ Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 ] from relative_module[pandas.core.resample] import module[asfreq] return[call[name[asfreq], parameter[name[self], name[freq]]]]
keyword[def] identifier[asfreq] ( identifier[self] , identifier[freq] , identifier[method] = keyword[None] , identifier[how] = keyword[None] , identifier[normalize] = keyword[False] , identifier[fill_value] = keyword[None] ): literal[string] keyword[from] identifier[pandas] . identifier[core] . identifier[resample] keyword[import] identifier[asfreq] keyword[return] identifier[asfreq] ( identifier[self] , identifier[freq] , identifier[method] = identifier[method] , identifier[how] = identifier[how] , identifier[normalize] = identifier[normalize] , identifier[fill_value] = identifier[fill_value] )
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None): """ Convert TimeSeries to specified frequency. Optionally provide filling method to pad/backfill missing values. Returns the original data conformed to a new index with the specified frequency. ``resample`` is more appropriate if an operation, such as summarization, is necessary to represent the data at the new frequency. Parameters ---------- freq : DateOffset object, or string method : {'backfill'/'bfill', 'pad'/'ffill'}, default None Method to use for filling holes in reindexed Series (note this does not fill NaNs that already were present): * 'pad' / 'ffill': propagate last valid observation forward to next valid * 'backfill' / 'bfill': use NEXT valid observation to fill how : {'start', 'end'}, default end For PeriodIndex only, see PeriodIndex.asfreq normalize : bool, default False Whether to reset output index to midnight fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). .. versionadded:: 0.20.0 Returns ------- converted : same type as caller See Also -------- reindex Notes ----- To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Start by creating a series with 4 one minute timestamps. >>> index = pd.date_range('1/1/2000', periods=4, freq='T') >>> series = pd.Series([0.0, None, 2.0, 3.0], index=index) >>> df = pd.DataFrame({'s':series}) >>> df s 2000-01-01 00:00:00 0.0 2000-01-01 00:01:00 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:03:00 3.0 Upsample the series into 30 second bins. >>> df.asfreq(freq='30S') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 NaN 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 NaN 2000-01-01 00:03:00 3.0 Upsample again, providing a ``fill value``. >>> df.asfreq(freq='30S', fill_value=9.0) s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 9.0 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 9.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 9.0 2000-01-01 00:03:00 3.0 Upsample again, providing a ``method``. >>> df.asfreq(freq='30S', method='bfill') s 2000-01-01 00:00:00 0.0 2000-01-01 00:00:30 NaN 2000-01-01 00:01:00 NaN 2000-01-01 00:01:30 2.0 2000-01-01 00:02:00 2.0 2000-01-01 00:02:30 3.0 2000-01-01 00:03:00 3.0 """ from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value)
def refresh_db(full=False, **kwargs): ''' Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
def function[refresh_db, parameter[full]]: constant[ Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True ] call[name[salt].utils.pkg.clear_rtag, parameter[name[__opts__]]] if name[full] begin[:] return[compare[call[call[name[__salt__]][constant[cmd.retcode]], parameter[constant[/bin/pkg refresh --full]]] equal[==] constant[0]]]
keyword[def] identifier[refresh_db] ( identifier[full] = keyword[False] ,** identifier[kwargs] ): literal[string] identifier[salt] . identifier[utils] . identifier[pkg] . identifier[clear_rtag] ( identifier[__opts__] ) keyword[if] identifier[full] : keyword[return] identifier[__salt__] [ literal[string] ]( literal[string] )== literal[int] keyword[else] : keyword[return] identifier[__salt__] [ literal[string] ]( literal[string] )== literal[int]
def refresh_db(full=False, **kwargs): """ Updates the remote repos database. full : False Set to ``True`` to force a refresh of the pkg DB from all publishers, regardless of the last refresh time. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db salt '*' pkg.refresh_db full=True """ # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) if full: return __salt__['cmd.retcode']('/bin/pkg refresh --full') == 0 # depends on [control=['if'], data=[]] else: return __salt__['cmd.retcode']('/bin/pkg refresh') == 0
def recv_msg(self): '''message receive routine for UDP link''' self.pre_message() s = self.recv() if len(s) > 0: if self.first_byte: self.auto_mavlink_version(s) m = self.mav.parse_char(s) if m is not None: self.post_message(m) return m
def function[recv_msg, parameter[self]]: constant[message receive routine for UDP link] call[name[self].pre_message, parameter[]] variable[s] assign[=] call[name[self].recv, parameter[]] if compare[call[name[len], parameter[name[s]]] greater[>] constant[0]] begin[:] if name[self].first_byte begin[:] call[name[self].auto_mavlink_version, parameter[name[s]]] variable[m] assign[=] call[name[self].mav.parse_char, parameter[name[s]]] if compare[name[m] is_not constant[None]] begin[:] call[name[self].post_message, parameter[name[m]]] return[name[m]]
keyword[def] identifier[recv_msg] ( identifier[self] ): literal[string] identifier[self] . identifier[pre_message] () identifier[s] = identifier[self] . identifier[recv] () keyword[if] identifier[len] ( identifier[s] )> literal[int] : keyword[if] identifier[self] . identifier[first_byte] : identifier[self] . identifier[auto_mavlink_version] ( identifier[s] ) identifier[m] = identifier[self] . identifier[mav] . identifier[parse_char] ( identifier[s] ) keyword[if] identifier[m] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[post_message] ( identifier[m] ) keyword[return] identifier[m]
def recv_msg(self): """message receive routine for UDP link""" self.pre_message() s = self.recv() if len(s) > 0: if self.first_byte: self.auto_mavlink_version(s) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] m = self.mav.parse_char(s) if m is not None: self.post_message(m) # depends on [control=['if'], data=['m']] return m
def get(self, path_info): """Gets the checksum for the specified path info. Checksum will be retrieved from the state database if available. Args: path_info (dict): path info to get the checksum for. Returns: str or None: checksum for the specified path info or None if it doesn't exist in the state database. """ assert path_info["scheme"] == "local" path = path_info["path"] if not os.path.exists(path): return None actual_mtime, actual_size = get_mtime_and_size(path) actual_inode = get_inode(path) existing_record = self.get_state_record_for_inode(actual_inode) if not existing_record: return None mtime, size, checksum, _ = existing_record if self._file_metadata_changed(actual_mtime, mtime, actual_size, size): return None self._update_state_record_timestamp_for_inode(actual_inode) return checksum
def function[get, parameter[self, path_info]]: constant[Gets the checksum for the specified path info. Checksum will be retrieved from the state database if available. Args: path_info (dict): path info to get the checksum for. Returns: str or None: checksum for the specified path info or None if it doesn't exist in the state database. ] assert[compare[call[name[path_info]][constant[scheme]] equal[==] constant[local]]] variable[path] assign[=] call[name[path_info]][constant[path]] if <ast.UnaryOp object at 0x7da1b20b9d20> begin[:] return[constant[None]] <ast.Tuple object at 0x7da1b20ba9e0> assign[=] call[name[get_mtime_and_size], parameter[name[path]]] variable[actual_inode] assign[=] call[name[get_inode], parameter[name[path]]] variable[existing_record] assign[=] call[name[self].get_state_record_for_inode, parameter[name[actual_inode]]] if <ast.UnaryOp object at 0x7da1b20bbdc0> begin[:] return[constant[None]] <ast.Tuple object at 0x7da1b20b8a90> assign[=] name[existing_record] if call[name[self]._file_metadata_changed, parameter[name[actual_mtime], name[mtime], name[actual_size], name[size]]] begin[:] return[constant[None]] call[name[self]._update_state_record_timestamp_for_inode, parameter[name[actual_inode]]] return[name[checksum]]
keyword[def] identifier[get] ( identifier[self] , identifier[path_info] ): literal[string] keyword[assert] identifier[path_info] [ literal[string] ]== literal[string] identifier[path] = identifier[path_info] [ literal[string] ] keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[return] keyword[None] identifier[actual_mtime] , identifier[actual_size] = identifier[get_mtime_and_size] ( identifier[path] ) identifier[actual_inode] = identifier[get_inode] ( identifier[path] ) identifier[existing_record] = identifier[self] . identifier[get_state_record_for_inode] ( identifier[actual_inode] ) keyword[if] keyword[not] identifier[existing_record] : keyword[return] keyword[None] identifier[mtime] , identifier[size] , identifier[checksum] , identifier[_] = identifier[existing_record] keyword[if] identifier[self] . identifier[_file_metadata_changed] ( identifier[actual_mtime] , identifier[mtime] , identifier[actual_size] , identifier[size] ): keyword[return] keyword[None] identifier[self] . identifier[_update_state_record_timestamp_for_inode] ( identifier[actual_inode] ) keyword[return] identifier[checksum]
def get(self, path_info): """Gets the checksum for the specified path info. Checksum will be retrieved from the state database if available. Args: path_info (dict): path info to get the checksum for. Returns: str or None: checksum for the specified path info or None if it doesn't exist in the state database. """ assert path_info['scheme'] == 'local' path = path_info['path'] if not os.path.exists(path): return None # depends on [control=['if'], data=[]] (actual_mtime, actual_size) = get_mtime_and_size(path) actual_inode = get_inode(path) existing_record = self.get_state_record_for_inode(actual_inode) if not existing_record: return None # depends on [control=['if'], data=[]] (mtime, size, checksum, _) = existing_record if self._file_metadata_changed(actual_mtime, mtime, actual_size, size): return None # depends on [control=['if'], data=[]] self._update_state_record_timestamp_for_inode(actual_inode) return checksum
def pl_resolve(ci, cj): """Return all clauses that can be obtained by resolving clauses ci and cj. >>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)): ... ppset(disjuncts(res)) set([A, C, F, ~C]) set([A, B, F, ~B]) """ clauses = [] for di in disjuncts(ci): for dj in disjuncts(cj): if di == ~dj or ~di == dj: dnew = unique(removeall(di, disjuncts(ci)) + removeall(dj, disjuncts(cj))) clauses.append(associate('|', dnew)) return clauses
def function[pl_resolve, parameter[ci, cj]]: constant[Return all clauses that can be obtained by resolving clauses ci and cj. >>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)): ... ppset(disjuncts(res)) set([A, C, F, ~C]) set([A, B, F, ~B]) ] variable[clauses] assign[=] list[[]] for taget[name[di]] in starred[call[name[disjuncts], parameter[name[ci]]]] begin[:] for taget[name[dj]] in starred[call[name[disjuncts], parameter[name[cj]]]] begin[:] if <ast.BoolOp object at 0x7da1afe70a90> begin[:] variable[dnew] assign[=] call[name[unique], parameter[binary_operation[call[name[removeall], parameter[name[di], call[name[disjuncts], parameter[name[ci]]]]] + call[name[removeall], parameter[name[dj], call[name[disjuncts], parameter[name[cj]]]]]]]] call[name[clauses].append, parameter[call[name[associate], parameter[constant[|], name[dnew]]]]] return[name[clauses]]
keyword[def] identifier[pl_resolve] ( identifier[ci] , identifier[cj] ): literal[string] identifier[clauses] =[] keyword[for] identifier[di] keyword[in] identifier[disjuncts] ( identifier[ci] ): keyword[for] identifier[dj] keyword[in] identifier[disjuncts] ( identifier[cj] ): keyword[if] identifier[di] ==~ identifier[dj] keyword[or] ~ identifier[di] == identifier[dj] : identifier[dnew] = identifier[unique] ( identifier[removeall] ( identifier[di] , identifier[disjuncts] ( identifier[ci] ))+ identifier[removeall] ( identifier[dj] , identifier[disjuncts] ( identifier[cj] ))) identifier[clauses] . identifier[append] ( identifier[associate] ( literal[string] , identifier[dnew] )) keyword[return] identifier[clauses]
def pl_resolve(ci, cj): """Return all clauses that can be obtained by resolving clauses ci and cj. >>> for res in pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)): ... ppset(disjuncts(res)) set([A, C, F, ~C]) set([A, B, F, ~B]) """ clauses = [] for di in disjuncts(ci): for dj in disjuncts(cj): if di == ~dj or ~di == dj: dnew = unique(removeall(di, disjuncts(ci)) + removeall(dj, disjuncts(cj))) clauses.append(associate('|', dnew)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dj']] # depends on [control=['for'], data=['di']] return clauses
def write(self, fptr): """Write a fragment list box to file. """ self._validate(writing=True) num_items = len(self.fragment_offset) length = 8 + 2 + num_items * 14 fptr.write(struct.pack('>I4s', length, b'flst')) fptr.write(struct.pack('>H', num_items)) for j in range(num_items): write_buffer = struct.pack('>QIH', self.fragment_offset[j], self.fragment_length[j], self.data_reference[j]) fptr.write(write_buffer)
def function[write, parameter[self, fptr]]: constant[Write a fragment list box to file. ] call[name[self]._validate, parameter[]] variable[num_items] assign[=] call[name[len], parameter[name[self].fragment_offset]] variable[length] assign[=] binary_operation[binary_operation[constant[8] + constant[2]] + binary_operation[name[num_items] * constant[14]]] call[name[fptr].write, parameter[call[name[struct].pack, parameter[constant[>I4s], name[length], constant[b'flst']]]]] call[name[fptr].write, parameter[call[name[struct].pack, parameter[constant[>H], name[num_items]]]]] for taget[name[j]] in starred[call[name[range], parameter[name[num_items]]]] begin[:] variable[write_buffer] assign[=] call[name[struct].pack, parameter[constant[>QIH], call[name[self].fragment_offset][name[j]], call[name[self].fragment_length][name[j]], call[name[self].data_reference][name[j]]]] call[name[fptr].write, parameter[name[write_buffer]]]
keyword[def] identifier[write] ( identifier[self] , identifier[fptr] ): literal[string] identifier[self] . identifier[_validate] ( identifier[writing] = keyword[True] ) identifier[num_items] = identifier[len] ( identifier[self] . identifier[fragment_offset] ) identifier[length] = literal[int] + literal[int] + identifier[num_items] * literal[int] identifier[fptr] . identifier[write] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[length] , literal[string] )) identifier[fptr] . identifier[write] ( identifier[struct] . identifier[pack] ( literal[string] , identifier[num_items] )) keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[num_items] ): identifier[write_buffer] = identifier[struct] . identifier[pack] ( literal[string] , identifier[self] . identifier[fragment_offset] [ identifier[j] ], identifier[self] . identifier[fragment_length] [ identifier[j] ], identifier[self] . identifier[data_reference] [ identifier[j] ]) identifier[fptr] . identifier[write] ( identifier[write_buffer] )
def write(self, fptr): """Write a fragment list box to file. """ self._validate(writing=True) num_items = len(self.fragment_offset) length = 8 + 2 + num_items * 14 fptr.write(struct.pack('>I4s', length, b'flst')) fptr.write(struct.pack('>H', num_items)) for j in range(num_items): write_buffer = struct.pack('>QIH', self.fragment_offset[j], self.fragment_length[j], self.data_reference[j]) fptr.write(write_buffer) # depends on [control=['for'], data=['j']]
def CLASSDEF(self, node): """Check names used in a class definition, including its decorators, base classes, and the body of its definition. Additionally, add its name to the current scope. """ for deco in node.decorator_list: self.handleNode(deco, node) for baseNode in node.bases: self.handleNode(baseNode, node) if not PY2: for keywordNode in node.keywords: self.handleNode(keywordNode, node) self.push_scope(ClassScope) if self.settings.get('run_doctests', False): self.defer_function(lambda: self.handle_doctests(node)) for stmt in node.body: self.handleNode(stmt, node) self.pop_scope() self.add_binding(node, ClassDefinition(node.name, node))
def function[CLASSDEF, parameter[self, node]]: constant[Check names used in a class definition, including its decorators, base classes, and the body of its definition. Additionally, add its name to the current scope. ] for taget[name[deco]] in starred[name[node].decorator_list] begin[:] call[name[self].handleNode, parameter[name[deco], name[node]]] for taget[name[baseNode]] in starred[name[node].bases] begin[:] call[name[self].handleNode, parameter[name[baseNode], name[node]]] if <ast.UnaryOp object at 0x7da1b1939570> begin[:] for taget[name[keywordNode]] in starred[name[node].keywords] begin[:] call[name[self].handleNode, parameter[name[keywordNode], name[node]]] call[name[self].push_scope, parameter[name[ClassScope]]] if call[name[self].settings.get, parameter[constant[run_doctests], constant[False]]] begin[:] call[name[self].defer_function, parameter[<ast.Lambda object at 0x7da1b193b460>]] for taget[name[stmt]] in starred[name[node].body] begin[:] call[name[self].handleNode, parameter[name[stmt], name[node]]] call[name[self].pop_scope, parameter[]] call[name[self].add_binding, parameter[name[node], call[name[ClassDefinition], parameter[name[node].name, name[node]]]]]
keyword[def] identifier[CLASSDEF] ( identifier[self] , identifier[node] ): literal[string] keyword[for] identifier[deco] keyword[in] identifier[node] . identifier[decorator_list] : identifier[self] . identifier[handleNode] ( identifier[deco] , identifier[node] ) keyword[for] identifier[baseNode] keyword[in] identifier[node] . identifier[bases] : identifier[self] . identifier[handleNode] ( identifier[baseNode] , identifier[node] ) keyword[if] keyword[not] identifier[PY2] : keyword[for] identifier[keywordNode] keyword[in] identifier[node] . identifier[keywords] : identifier[self] . identifier[handleNode] ( identifier[keywordNode] , identifier[node] ) identifier[self] . identifier[push_scope] ( identifier[ClassScope] ) keyword[if] identifier[self] . identifier[settings] . identifier[get] ( literal[string] , keyword[False] ): identifier[self] . identifier[defer_function] ( keyword[lambda] : identifier[self] . identifier[handle_doctests] ( identifier[node] )) keyword[for] identifier[stmt] keyword[in] identifier[node] . identifier[body] : identifier[self] . identifier[handleNode] ( identifier[stmt] , identifier[node] ) identifier[self] . identifier[pop_scope] () identifier[self] . identifier[add_binding] ( identifier[node] , identifier[ClassDefinition] ( identifier[node] . identifier[name] , identifier[node] ))
def CLASSDEF(self, node): """Check names used in a class definition, including its decorators, base classes, and the body of its definition. Additionally, add its name to the current scope. """ for deco in node.decorator_list: self.handleNode(deco, node) # depends on [control=['for'], data=['deco']] for baseNode in node.bases: self.handleNode(baseNode, node) # depends on [control=['for'], data=['baseNode']] if not PY2: for keywordNode in node.keywords: self.handleNode(keywordNode, node) # depends on [control=['for'], data=['keywordNode']] # depends on [control=['if'], data=[]] self.push_scope(ClassScope) if self.settings.get('run_doctests', False): self.defer_function(lambda : self.handle_doctests(node)) # depends on [control=['if'], data=[]] for stmt in node.body: self.handleNode(stmt, node) # depends on [control=['for'], data=['stmt']] self.pop_scope() self.add_binding(node, ClassDefinition(node.name, node))
def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate
def function[resolve_ssl_version, parameter[candidate]]: constant[ like resolve_cert_reqs ] if compare[name[candidate] is constant[None]] begin[:] return[name[PROTOCOL_SSLv23]] if call[name[isinstance], parameter[name[candidate], name[str]]] begin[:] variable[res] assign[=] call[name[getattr], parameter[name[ssl], name[candidate], constant[None]]] if compare[name[res] is constant[None]] begin[:] variable[res] assign[=] call[name[getattr], parameter[name[ssl], binary_operation[constant[PROTOCOL_] + name[candidate]]]] return[name[res]] return[name[candidate]]
keyword[def] identifier[resolve_ssl_version] ( identifier[candidate] ): literal[string] keyword[if] identifier[candidate] keyword[is] keyword[None] : keyword[return] identifier[PROTOCOL_SSLv23] keyword[if] identifier[isinstance] ( identifier[candidate] , identifier[str] ): identifier[res] = identifier[getattr] ( identifier[ssl] , identifier[candidate] , keyword[None] ) keyword[if] identifier[res] keyword[is] keyword[None] : identifier[res] = identifier[getattr] ( identifier[ssl] , literal[string] + identifier[candidate] ) keyword[return] identifier[res] keyword[return] identifier[candidate]
def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 # depends on [control=['if'], data=[]] if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) # depends on [control=['if'], data=['res']] return res # depends on [control=['if'], data=[]] return candidate
def _old_forney(self, omega, X, k=None): '''Computes the error magnitudes (only works with errors or erasures under t = floor((n-k)/2), not with erasures above (n-k)//2)''' # XXX Is floor division okay here? Should this be ceiling? if not k: k = self.k t = (self.n - k) // 2 Y = [] for l, Xl in enumerate(X): # Compute the sequence product and multiply its inverse in prod = GF2int(1) # just to init the product (1 is the neutral term for multiplication) Xl_inv = Xl.inverse() for ji in _range(t): # do not change to _range(len(X)) as can be seen in some papers, it won't give the correct result! (sometimes yes, but not always) if ji == l: continue if ji < len(X): Xj = X[ji] else: # if above the maximum degree of the polynomial, then all coefficients above are just 0 (that's logical...) Xj = GF2int(0) prod = prod * (Xl - Xj) #if (ji != l): # prod = prod * (GF2int(1) - X[ji]*(Xl.inverse())) # Compute Yl Yl = Xl**t * omega.evaluate(Xl_inv) * Xl_inv * prod.inverse() Y.append(Yl) return Y
def function[_old_forney, parameter[self, omega, X, k]]: constant[Computes the error magnitudes (only works with errors or erasures under t = floor((n-k)/2), not with erasures above (n-k)//2)] if <ast.UnaryOp object at 0x7da18dc07340> begin[:] variable[k] assign[=] name[self].k variable[t] assign[=] binary_operation[binary_operation[name[self].n - name[k]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]] variable[Y] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18dc06cb0>, <ast.Name object at 0x7da18dc042e0>]]] in starred[call[name[enumerate], parameter[name[X]]]] begin[:] variable[prod] assign[=] call[name[GF2int], parameter[constant[1]]] variable[Xl_inv] assign[=] call[name[Xl].inverse, parameter[]] for taget[name[ji]] in starred[call[name[_range], parameter[name[t]]]] begin[:] if compare[name[ji] equal[==] name[l]] begin[:] continue if compare[name[ji] less[<] call[name[len], parameter[name[X]]]] begin[:] variable[Xj] assign[=] call[name[X]][name[ji]] variable[prod] assign[=] binary_operation[name[prod] * binary_operation[name[Xl] - name[Xj]]] variable[Yl] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[Xl] ** name[t]] * call[name[omega].evaluate, parameter[name[Xl_inv]]]] * name[Xl_inv]] * call[name[prod].inverse, parameter[]]] call[name[Y].append, parameter[name[Yl]]] return[name[Y]]
keyword[def] identifier[_old_forney] ( identifier[self] , identifier[omega] , identifier[X] , identifier[k] = keyword[None] ): literal[string] keyword[if] keyword[not] identifier[k] : identifier[k] = identifier[self] . identifier[k] identifier[t] =( identifier[self] . identifier[n] - identifier[k] )// literal[int] identifier[Y] =[] keyword[for] identifier[l] , identifier[Xl] keyword[in] identifier[enumerate] ( identifier[X] ): identifier[prod] = identifier[GF2int] ( literal[int] ) identifier[Xl_inv] = identifier[Xl] . identifier[inverse] () keyword[for] identifier[ji] keyword[in] identifier[_range] ( identifier[t] ): keyword[if] identifier[ji] == identifier[l] : keyword[continue] keyword[if] identifier[ji] < identifier[len] ( identifier[X] ): identifier[Xj] = identifier[X] [ identifier[ji] ] keyword[else] : identifier[Xj] = identifier[GF2int] ( literal[int] ) identifier[prod] = identifier[prod] *( identifier[Xl] - identifier[Xj] ) identifier[Yl] = identifier[Xl] ** identifier[t] * identifier[omega] . identifier[evaluate] ( identifier[Xl_inv] )* identifier[Xl_inv] * identifier[prod] . identifier[inverse] () identifier[Y] . identifier[append] ( identifier[Yl] ) keyword[return] identifier[Y]
def _old_forney(self, omega, X, k=None): """Computes the error magnitudes (only works with errors or erasures under t = floor((n-k)/2), not with erasures above (n-k)//2)""" # XXX Is floor division okay here? Should this be ceiling? if not k: k = self.k # depends on [control=['if'], data=[]] t = (self.n - k) // 2 Y = [] for (l, Xl) in enumerate(X): # Compute the sequence product and multiply its inverse in prod = GF2int(1) # just to init the product (1 is the neutral term for multiplication) Xl_inv = Xl.inverse() for ji in _range(t): # do not change to _range(len(X)) as can be seen in some papers, it won't give the correct result! (sometimes yes, but not always) if ji == l: continue # depends on [control=['if'], data=[]] if ji < len(X): Xj = X[ji] # depends on [control=['if'], data=['ji']] else: # if above the maximum degree of the polynomial, then all coefficients above are just 0 (that's logical...) Xj = GF2int(0) prod = prod * (Xl - Xj) # depends on [control=['for'], data=['ji']] #if (ji != l): # prod = prod * (GF2int(1) - X[ji]*(Xl.inverse())) # Compute Yl Yl = Xl ** t * omega.evaluate(Xl_inv) * Xl_inv * prod.inverse() Y.append(Yl) # depends on [control=['for'], data=[]] return Y
def mean_squared_error(data, ground_truth, mask=None, normalized=False, force_lower_is_better=True): r"""Return mean squared L2 distance between ``data`` and ``ground_truth``. See also `this Wikipedia article <https://en.wikipedia.org/wiki/Mean_squared_error>`_. Parameters ---------- data : `Tensor` or `array-like` Input data to compare to the ground truth. If not a `Tensor`, an unweighted tensor space will be assumed. ground_truth : `array-like` Reference to which ``data`` should be compared. mask : `array-like`, optional If given, ``data * mask`` is compared to ``ground_truth * mask``. normalized : bool, optional If ``True``, the output values are mapped to the interval :math:`[0, 1]` (see `Notes` for details). force_lower_is_better : bool, optional If ``True``, it is ensured that lower values correspond to better matches. For the mean squared error, this is already the case, and the flag is only present for compatibility to other figures of merit. Returns ------- mse : float FOM value, where a lower value means a better match. Notes ----- The FOM evaluates .. math:: \mathrm{MSE}(f, g) = \frac{\| f - g \|_2^2}{\| 1 \|_2^2}, where :math:`\| 1 \|^2_2` is the volume of the domain of definition of the functions. For :math:`\mathbb{R}^n` type spaces, this is equal to the number of elements :math:`n`. The normalized form is .. math:: \mathrm{MSE_N} = \frac{\| f - g \|_2^2}{(\| f \|_2 + \| g \|_2)^2}. The normalized variant takes values in :math:`[0, 1]`. """ if not hasattr(data, 'space'): data = odl.vector(data) space = data.space ground_truth = space.element(ground_truth) l2norm = odl.solvers.L2Norm(space) if mask is not None: data = data * mask ground_truth = ground_truth * mask diff = data - ground_truth fom = l2norm(diff) ** 2 if normalized: fom /= (l2norm(data) + l2norm(ground_truth)) ** 2 else: fom /= l2norm(space.one()) ** 2 # Ignore `force_lower_is_better` since that's already the case return fom
def function[mean_squared_error, parameter[data, ground_truth, mask, normalized, force_lower_is_better]]: constant[Return mean squared L2 distance between ``data`` and ``ground_truth``. See also `this Wikipedia article <https://en.wikipedia.org/wiki/Mean_squared_error>`_. Parameters ---------- data : `Tensor` or `array-like` Input data to compare to the ground truth. If not a `Tensor`, an unweighted tensor space will be assumed. ground_truth : `array-like` Reference to which ``data`` should be compared. mask : `array-like`, optional If given, ``data * mask`` is compared to ``ground_truth * mask``. normalized : bool, optional If ``True``, the output values are mapped to the interval :math:`[0, 1]` (see `Notes` for details). force_lower_is_better : bool, optional If ``True``, it is ensured that lower values correspond to better matches. For the mean squared error, this is already the case, and the flag is only present for compatibility to other figures of merit. Returns ------- mse : float FOM value, where a lower value means a better match. Notes ----- The FOM evaluates .. math:: \mathrm{MSE}(f, g) = \frac{\| f - g \|_2^2}{\| 1 \|_2^2}, where :math:`\| 1 \|^2_2` is the volume of the domain of definition of the functions. For :math:`\mathbb{R}^n` type spaces, this is equal to the number of elements :math:`n`. The normalized form is .. math:: \mathrm{MSE_N} = \frac{\| f - g \|_2^2}{(\| f \|_2 + \| g \|_2)^2}. The normalized variant takes values in :math:`[0, 1]`. ] if <ast.UnaryOp object at 0x7da1b1eed750> begin[:] variable[data] assign[=] call[name[odl].vector, parameter[name[data]]] variable[space] assign[=] name[data].space variable[ground_truth] assign[=] call[name[space].element, parameter[name[ground_truth]]] variable[l2norm] assign[=] call[name[odl].solvers.L2Norm, parameter[name[space]]] if compare[name[mask] is_not constant[None]] begin[:] variable[data] assign[=] binary_operation[name[data] * name[mask]] variable[ground_truth] assign[=] binary_operation[name[ground_truth] * name[mask]] variable[diff] assign[=] binary_operation[name[data] - name[ground_truth]] variable[fom] assign[=] binary_operation[call[name[l2norm], parameter[name[diff]]] ** constant[2]] if name[normalized] begin[:] <ast.AugAssign object at 0x7da1b1e9ba60> return[name[fom]]
keyword[def] identifier[mean_squared_error] ( identifier[data] , identifier[ground_truth] , identifier[mask] = keyword[None] , identifier[normalized] = keyword[False] , identifier[force_lower_is_better] = keyword[True] ): literal[string] keyword[if] keyword[not] identifier[hasattr] ( identifier[data] , literal[string] ): identifier[data] = identifier[odl] . identifier[vector] ( identifier[data] ) identifier[space] = identifier[data] . identifier[space] identifier[ground_truth] = identifier[space] . identifier[element] ( identifier[ground_truth] ) identifier[l2norm] = identifier[odl] . identifier[solvers] . identifier[L2Norm] ( identifier[space] ) keyword[if] identifier[mask] keyword[is] keyword[not] keyword[None] : identifier[data] = identifier[data] * identifier[mask] identifier[ground_truth] = identifier[ground_truth] * identifier[mask] identifier[diff] = identifier[data] - identifier[ground_truth] identifier[fom] = identifier[l2norm] ( identifier[diff] )** literal[int] keyword[if] identifier[normalized] : identifier[fom] /=( identifier[l2norm] ( identifier[data] )+ identifier[l2norm] ( identifier[ground_truth] ))** literal[int] keyword[else] : identifier[fom] /= identifier[l2norm] ( identifier[space] . identifier[one] ())** literal[int] keyword[return] identifier[fom]
def mean_squared_error(data, ground_truth, mask=None, normalized=False, force_lower_is_better=True): """Return mean squared L2 distance between ``data`` and ``ground_truth``. See also `this Wikipedia article <https://en.wikipedia.org/wiki/Mean_squared_error>`_. Parameters ---------- data : `Tensor` or `array-like` Input data to compare to the ground truth. If not a `Tensor`, an unweighted tensor space will be assumed. ground_truth : `array-like` Reference to which ``data`` should be compared. mask : `array-like`, optional If given, ``data * mask`` is compared to ``ground_truth * mask``. normalized : bool, optional If ``True``, the output values are mapped to the interval :math:`[0, 1]` (see `Notes` for details). force_lower_is_better : bool, optional If ``True``, it is ensured that lower values correspond to better matches. For the mean squared error, this is already the case, and the flag is only present for compatibility to other figures of merit. Returns ------- mse : float FOM value, where a lower value means a better match. Notes ----- The FOM evaluates .. math:: \\mathrm{MSE}(f, g) = \\frac{\\| f - g \\|_2^2}{\\| 1 \\|_2^2}, where :math:`\\| 1 \\|^2_2` is the volume of the domain of definition of the functions. For :math:`\\mathbb{R}^n` type spaces, this is equal to the number of elements :math:`n`. The normalized form is .. math:: \\mathrm{MSE_N} = \\frac{\\| f - g \\|_2^2}{(\\| f \\|_2 + \\| g \\|_2)^2}. The normalized variant takes values in :math:`[0, 1]`. """ if not hasattr(data, 'space'): data = odl.vector(data) # depends on [control=['if'], data=[]] space = data.space ground_truth = space.element(ground_truth) l2norm = odl.solvers.L2Norm(space) if mask is not None: data = data * mask ground_truth = ground_truth * mask # depends on [control=['if'], data=['mask']] diff = data - ground_truth fom = l2norm(diff) ** 2 if normalized: fom /= (l2norm(data) + l2norm(ground_truth)) ** 2 # depends on [control=['if'], data=[]] else: fom /= l2norm(space.one()) ** 2 # Ignore `force_lower_is_better` since that's already the case return fom
def convert_html_to_text(value, preserve_urls=False): r""" >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com?timestamp=1234">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com?timestamp=1234)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &#38; click here ... </body></html>''', preserve_urls=True) 'Look & click here' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click on ... <a href="https://example.com">https://example.com</a> ... </body></html>''', preserve_urls=True) 'Look & click on https://example.com' >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''') "I'm here,\nclick me" >>> convert_html_to_text( ... ''' ... <html><body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body></html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" >>> convert_html_to_text( ... ''' ... <html> ... <head> ... <title>I'm here</title> ... </head> ... <body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body> ... </html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" """ s = MLStripper(preserve_urls=preserve_urls) s.feed(value) s.close() return s.get_data()
def function[convert_html_to_text, parameter[value, preserve_urls]]: constant[ >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com?timestamp=1234">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com?timestamp=1234)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &#38; click here ... </body></html>''', preserve_urls=True) 'Look & click here' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click on ... <a href="https://example.com">https://example.com</a> ... </body></html>''', preserve_urls=True) 'Look & click on https://example.com' >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''') "I'm here,\nclick me" >>> convert_html_to_text( ... ''' ... <html><body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body></html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" >>> convert_html_to_text( ... ''' ... <html> ... <head> ... <title>I'm here</title> ... </head> ... <body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body> ... </html>''', preserve_urls=True) "I'm here!\nClick me (https://example.com)\n" ] variable[s] assign[=] call[name[MLStripper], parameter[]] call[name[s].feed, parameter[name[value]]] call[name[s].close, parameter[]] return[call[name[s].get_data, parameter[]]]
keyword[def] identifier[convert_html_to_text] ( identifier[value] , identifier[preserve_urls] = keyword[False] ): literal[string] identifier[s] = identifier[MLStripper] ( identifier[preserve_urls] = identifier[preserve_urls] ) identifier[s] . identifier[feed] ( identifier[value] ) identifier[s] . identifier[close] () keyword[return] identifier[s] . identifier[get_data] ()
def convert_html_to_text(value, preserve_urls=False): """ >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click ... <a href="https://example.com?timestamp=1234">here</a> ... </body></html>''', preserve_urls=True) 'Look & click here (https://example.com?timestamp=1234)' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &#38; click here ... </body></html>''', preserve_urls=True) 'Look & click here' >>> convert_html_to_text( ... ''' ... <html><body> ... Look &amp; click on ... <a href="https://example.com">https://example.com</a> ... </body></html>''', preserve_urls=True) 'Look & click on https://example.com' >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''', preserve_urls=True) "I'm here,\\nclick me (https://example.com)" >>> convert_html_to_text( ... ''' ... <html><body> ... I'm here, <br/> click ... <a href="https://example.com">me</a> ... </body></html>''') "I'm here,\\nclick me" >>> convert_html_to_text( ... ''' ... <html><body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body></html>''', preserve_urls=True) "I'm here!\\nClick me (https://example.com)\\n" >>> convert_html_to_text( ... ''' ... <html> ... <head> ... <title>I'm here</title> ... </head> ... <body> ... <p>I'm here!</p> ... <p>Click <a href="https://example.com">me</a></p> ... </body> ... </html>''', preserve_urls=True) "I'm here!\\nClick me (https://example.com)\\n" """ s = MLStripper(preserve_urls=preserve_urls) s.feed(value) s.close() return s.get_data()
def get_config_groups(self, groups_conf, groups_pillar_name): ''' get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar ''' # Get groups # Default to returning something that'll never match ret_groups = { 'default': { 'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {} } } # allow for empty groups in the config file, and instead let some/all of this come # from pillar data. if not groups_conf: use_groups = {} else: use_groups = groups_conf # First obtain group lists from pillars, then in case there is any overlap, iterate over the groups # that come from pillars. The configuration in files on disk/from startup # will override any configs from pillars. They are meant to be complementary not to provide overrides. log.debug('use_groups %s', use_groups) try: groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()) except AttributeError: log.warning('Failed to get groups from %s: %s or from config: %s', groups_pillar_name, self._groups_from_pillar(groups_pillar_name), use_groups ) groups_gen = [] for name, config in groups_gen: log.info('Trying to get %s and %s to be useful', name, config) ret_groups.setdefault(name, { 'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {} }) try: ret_groups[name]['users'].update(set(config.get('users', []))) ret_groups[name]['commands'].update(set(config.get('commands', []))) ret_groups[name]['aliases'].update(config.get('aliases', {})) ret_groups[name]['default_target'].update(config.get('default_target', {})) ret_groups[name]['targets'].update(config.get('targets', {})) except (IndexError, AttributeError): log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name) log.debug('Got the groups: %s', ret_groups) return ret_groups
def function[get_config_groups, parameter[self, groups_conf, groups_pillar_name]]: constant[ get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar ] variable[ret_groups] assign[=] dictionary[[<ast.Constant object at 0x7da1b2344400>], [<ast.Dict object at 0x7da1b2347cd0>]] if <ast.UnaryOp object at 0x7da1b23472e0> begin[:] variable[use_groups] assign[=] dictionary[[], []] call[name[log].debug, parameter[constant[use_groups %s], name[use_groups]]] <ast.Try object at 0x7da1b2344550> for taget[tuple[[<ast.Name object at 0x7da1b20ba950>, <ast.Name object at 0x7da1b20ba920>]]] in starred[name[groups_gen]] begin[:] call[name[log].info, parameter[constant[Trying to get %s and %s to be useful], name[name], name[config]]] call[name[ret_groups].setdefault, parameter[name[name], dictionary[[<ast.Constant object at 0x7da1b20bae90>, <ast.Constant object at 0x7da1b20ba8f0>, <ast.Constant object at 0x7da1b20b8910>, <ast.Constant object at 0x7da1b20bb520>, <ast.Constant object at 0x7da1b20ba5c0>], [<ast.Call object at 0x7da1b20b9f60>, <ast.Call object at 0x7da1b20b9810>, <ast.Dict object at 0x7da1b20b9e70>, <ast.Dict object at 0x7da1b20ba410>, <ast.Dict object at 0x7da1b20bb010>]]]] <ast.Try object at 0x7da1b20b97e0> call[name[log].debug, parameter[constant[Got the groups: %s], name[ret_groups]]] return[name[ret_groups]]
keyword[def] identifier[get_config_groups] ( identifier[self] , identifier[groups_conf] , identifier[groups_pillar_name] ): literal[string] identifier[ret_groups] ={ literal[string] :{ literal[string] : identifier[set] (), literal[string] : identifier[set] (), literal[string] :{}, literal[string] :{}, literal[string] :{} } } keyword[if] keyword[not] identifier[groups_conf] : identifier[use_groups] ={} keyword[else] : identifier[use_groups] = identifier[groups_conf] identifier[log] . identifier[debug] ( literal[string] , identifier[use_groups] ) keyword[try] : identifier[groups_gen] = identifier[itertools] . identifier[chain] ( identifier[self] . identifier[_groups_from_pillar] ( identifier[groups_pillar_name] ). identifier[items] (), identifier[use_groups] . identifier[items] ()) keyword[except] identifier[AttributeError] : identifier[log] . identifier[warning] ( literal[string] , identifier[groups_pillar_name] , identifier[self] . identifier[_groups_from_pillar] ( identifier[groups_pillar_name] ), identifier[use_groups] ) identifier[groups_gen] =[] keyword[for] identifier[name] , identifier[config] keyword[in] identifier[groups_gen] : identifier[log] . identifier[info] ( literal[string] , identifier[name] , identifier[config] ) identifier[ret_groups] . identifier[setdefault] ( identifier[name] ,{ literal[string] : identifier[set] (), literal[string] : identifier[set] (), literal[string] :{}, literal[string] :{}, literal[string] :{} }) keyword[try] : identifier[ret_groups] [ identifier[name] ][ literal[string] ]. identifier[update] ( identifier[set] ( identifier[config] . identifier[get] ( literal[string] ,[]))) identifier[ret_groups] [ identifier[name] ][ literal[string] ]. identifier[update] ( identifier[set] ( identifier[config] . identifier[get] ( literal[string] ,[]))) identifier[ret_groups] [ identifier[name] ][ literal[string] ]. identifier[update] ( identifier[config] . identifier[get] ( literal[string] ,{})) identifier[ret_groups] [ identifier[name] ][ literal[string] ]. identifier[update] ( identifier[config] . identifier[get] ( literal[string] ,{})) identifier[ret_groups] [ identifier[name] ][ literal[string] ]. identifier[update] ( identifier[config] . identifier[get] ( literal[string] ,{})) keyword[except] ( identifier[IndexError] , identifier[AttributeError] ): identifier[log] . identifier[warning] ( literal[string] , identifier[name] ) identifier[log] . identifier[debug] ( literal[string] , identifier[ret_groups] ) keyword[return] identifier[ret_groups]
def get_config_groups(self, groups_conf, groups_pillar_name): """ get info from groups in config, and from the named pillar todo: add specification for the minion to use to recover pillar """ # Get groups # Default to returning something that'll never match ret_groups = {'default': {'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {}}} # allow for empty groups in the config file, and instead let some/all of this come # from pillar data. if not groups_conf: use_groups = {} # depends on [control=['if'], data=[]] else: use_groups = groups_conf # First obtain group lists from pillars, then in case there is any overlap, iterate over the groups # that come from pillars. The configuration in files on disk/from startup # will override any configs from pillars. They are meant to be complementary not to provide overrides. log.debug('use_groups %s', use_groups) try: groups_gen = itertools.chain(self._groups_from_pillar(groups_pillar_name).items(), use_groups.items()) # depends on [control=['try'], data=[]] except AttributeError: log.warning('Failed to get groups from %s: %s or from config: %s', groups_pillar_name, self._groups_from_pillar(groups_pillar_name), use_groups) groups_gen = [] # depends on [control=['except'], data=[]] for (name, config) in groups_gen: log.info('Trying to get %s and %s to be useful', name, config) ret_groups.setdefault(name, {'users': set(), 'commands': set(), 'aliases': {}, 'default_target': {}, 'targets': {}}) try: ret_groups[name]['users'].update(set(config.get('users', []))) ret_groups[name]['commands'].update(set(config.get('commands', []))) ret_groups[name]['aliases'].update(config.get('aliases', {})) ret_groups[name]['default_target'].update(config.get('default_target', {})) ret_groups[name]['targets'].update(config.get('targets', {})) # depends on [control=['try'], data=[]] except (IndexError, AttributeError): log.warning("Couldn't use group %s. Check that targets is a dictionary and not a list", name) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] log.debug('Got the groups: %s', ret_groups) return ret_groups
def readFile(cls, filepath): """Try different encoding to open a file in readonly mode""" for mode in ("utf-8", 'gbk', 'cp1252', 'windows-1252', 'latin-1'): try: with open(filepath, mode='r', encoding=mode) as f: content = f.read() cit.info('以 {} 格式打开文件'.format(mode)) return content except UnicodeDecodeError: cit.warn('打开文件:尝试 {} 格式失败'.format(mode)) return None
def function[readFile, parameter[cls, filepath]]: constant[Try different encoding to open a file in readonly mode] for taget[name[mode]] in starred[tuple[[<ast.Constant object at 0x7da20c6c4a60>, <ast.Constant object at 0x7da20c6c79d0>, <ast.Constant object at 0x7da20c6c42e0>, <ast.Constant object at 0x7da20c6c7e50>, <ast.Constant object at 0x7da20c6c6560>]]] begin[:] <ast.Try object at 0x7da20c6c77f0> return[constant[None]]
keyword[def] identifier[readFile] ( identifier[cls] , identifier[filepath] ): literal[string] keyword[for] identifier[mode] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ): keyword[try] : keyword[with] identifier[open] ( identifier[filepath] , identifier[mode] = literal[string] , identifier[encoding] = identifier[mode] ) keyword[as] identifier[f] : identifier[content] = identifier[f] . identifier[read] () identifier[cit] . identifier[info] ( literal[string] . identifier[format] ( identifier[mode] )) keyword[return] identifier[content] keyword[except] identifier[UnicodeDecodeError] : identifier[cit] . identifier[warn] ( literal[string] . identifier[format] ( identifier[mode] )) keyword[return] keyword[None]
def readFile(cls, filepath): """Try different encoding to open a file in readonly mode""" for mode in ('utf-8', 'gbk', 'cp1252', 'windows-1252', 'latin-1'): try: with open(filepath, mode='r', encoding=mode) as f: content = f.read() cit.info('以 {} 格式打开文件'.format(mode)) return content # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]] except UnicodeDecodeError: cit.warn('打开文件:尝试 {} 格式失败'.format(mode)) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['mode']] return None
def check_permission(permission, brain_or_object): """Check whether the security context allows the given permission on the given brain or object. N.B.: This includes also acquired permissions :param permission: Permission name :brain_or_object: Catalog brain or object :returns: True if the permission is granted """ sm = get_security_manager() obj = api.get_object(brain_or_object) return sm.checkPermission(permission, obj) == 1
def function[check_permission, parameter[permission, brain_or_object]]: constant[Check whether the security context allows the given permission on the given brain or object. N.B.: This includes also acquired permissions :param permission: Permission name :brain_or_object: Catalog brain or object :returns: True if the permission is granted ] variable[sm] assign[=] call[name[get_security_manager], parameter[]] variable[obj] assign[=] call[name[api].get_object, parameter[name[brain_or_object]]] return[compare[call[name[sm].checkPermission, parameter[name[permission], name[obj]]] equal[==] constant[1]]]
keyword[def] identifier[check_permission] ( identifier[permission] , identifier[brain_or_object] ): literal[string] identifier[sm] = identifier[get_security_manager] () identifier[obj] = identifier[api] . identifier[get_object] ( identifier[brain_or_object] ) keyword[return] identifier[sm] . identifier[checkPermission] ( identifier[permission] , identifier[obj] )== literal[int]
def check_permission(permission, brain_or_object): """Check whether the security context allows the given permission on the given brain or object. N.B.: This includes also acquired permissions :param permission: Permission name :brain_or_object: Catalog brain or object :returns: True if the permission is granted """ sm = get_security_manager() obj = api.get_object(brain_or_object) return sm.checkPermission(permission, obj) == 1
def p_expr_trig(p): """ bexpr : math_fn bexpr %prec UMINUS """ p[0] = make_builtin(p.lineno(1), p[1], make_typecast(TYPE.float_, p[2], p.lineno(1)), {'SIN': math.sin, 'COS': math.cos, 'TAN': math.tan, 'ASN': math.asin, 'ACS': math.acos, 'ATN': math.atan, 'LN': lambda y: math.log(y, math.exp(1)), # LN(x) 'EXP': math.exp, 'SQR': math.sqrt }[p[1]])
def function[p_expr_trig, parameter[p]]: constant[ bexpr : math_fn bexpr %prec UMINUS ] call[name[p]][constant[0]] assign[=] call[name[make_builtin], parameter[call[name[p].lineno, parameter[constant[1]]], call[name[p]][constant[1]], call[name[make_typecast], parameter[name[TYPE].float_, call[name[p]][constant[2]], call[name[p].lineno, parameter[constant[1]]]]], call[dictionary[[<ast.Constant object at 0x7da1b0651930>, <ast.Constant object at 0x7da1b0651bd0>, <ast.Constant object at 0x7da1b0653dc0>, <ast.Constant object at 0x7da1b06507f0>, <ast.Constant object at 0x7da1b0651690>, <ast.Constant object at 0x7da1b0653d30>, <ast.Constant object at 0x7da1b0651720>, <ast.Constant object at 0x7da1b0652e00>, <ast.Constant object at 0x7da1b0651660>], [<ast.Attribute object at 0x7da1b0653d00>, <ast.Attribute object at 0x7da1b0650880>, <ast.Attribute object at 0x7da1b0651b40>, <ast.Attribute object at 0x7da1b0651c60>, <ast.Attribute object at 0x7da1b06fa440>, <ast.Attribute object at 0x7da1b06f9450>, <ast.Lambda object at 0x7da1b06f8460>, <ast.Attribute object at 0x7da1b06faad0>, <ast.Attribute object at 0x7da1b06f8be0>]]][call[name[p]][constant[1]]]]]
keyword[def] identifier[p_expr_trig] ( identifier[p] ): literal[string] identifier[p] [ literal[int] ]= identifier[make_builtin] ( identifier[p] . identifier[lineno] ( literal[int] ), identifier[p] [ literal[int] ], identifier[make_typecast] ( identifier[TYPE] . identifier[float_] , identifier[p] [ literal[int] ], identifier[p] . identifier[lineno] ( literal[int] )), { literal[string] : identifier[math] . identifier[sin] , literal[string] : identifier[math] . identifier[cos] , literal[string] : identifier[math] . identifier[tan] , literal[string] : identifier[math] . identifier[asin] , literal[string] : identifier[math] . identifier[acos] , literal[string] : identifier[math] . identifier[atan] , literal[string] : keyword[lambda] identifier[y] : identifier[math] . identifier[log] ( identifier[y] , identifier[math] . identifier[exp] ( literal[int] )), literal[string] : identifier[math] . identifier[exp] , literal[string] : identifier[math] . identifier[sqrt] }[ identifier[p] [ literal[int] ]])
def p_expr_trig(p): """ bexpr : math_fn bexpr %prec UMINUS """ # LN(x) p[0] = make_builtin(p.lineno(1), p[1], make_typecast(TYPE.float_, p[2], p.lineno(1)), {'SIN': math.sin, 'COS': math.cos, 'TAN': math.tan, 'ASN': math.asin, 'ACS': math.acos, 'ATN': math.atan, 'LN': lambda y: math.log(y, math.exp(1)), 'EXP': math.exp, 'SQR': math.sqrt}[p[1]])
def _parse_session_run_index(self, event): """Parses the session_run_index value from the event proto. Args: event: The event with metadata that contains the session_run_index. Returns: The int session_run_index value. Or constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined. """ metadata_string = event.log_message.message try: metadata = json.loads(metadata_string) except ValueError as e: logger.error( "Could not decode metadata string '%s' for step value: %s", metadata_string, e) return constants.SENTINEL_FOR_UNDETERMINED_STEP try: return metadata["session_run_index"] except KeyError: logger.error( "The session_run_index is missing from the metadata: %s", metadata_string) return constants.SENTINEL_FOR_UNDETERMINED_STEP
def function[_parse_session_run_index, parameter[self, event]]: constant[Parses the session_run_index value from the event proto. Args: event: The event with metadata that contains the session_run_index. Returns: The int session_run_index value. Or constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined. ] variable[metadata_string] assign[=] name[event].log_message.message <ast.Try object at 0x7da1b21e88e0> <ast.Try object at 0x7da1b21e9630>
keyword[def] identifier[_parse_session_run_index] ( identifier[self] , identifier[event] ): literal[string] identifier[metadata_string] = identifier[event] . identifier[log_message] . identifier[message] keyword[try] : identifier[metadata] = identifier[json] . identifier[loads] ( identifier[metadata_string] ) keyword[except] identifier[ValueError] keyword[as] identifier[e] : identifier[logger] . identifier[error] ( literal[string] , identifier[metadata_string] , identifier[e] ) keyword[return] identifier[constants] . identifier[SENTINEL_FOR_UNDETERMINED_STEP] keyword[try] : keyword[return] identifier[metadata] [ literal[string] ] keyword[except] identifier[KeyError] : identifier[logger] . identifier[error] ( literal[string] , identifier[metadata_string] ) keyword[return] identifier[constants] . identifier[SENTINEL_FOR_UNDETERMINED_STEP]
def _parse_session_run_index(self, event): """Parses the session_run_index value from the event proto. Args: event: The event with metadata that contains the session_run_index. Returns: The int session_run_index value. Or constants.SENTINEL_FOR_UNDETERMINED_STEP if it could not be determined. """ metadata_string = event.log_message.message try: metadata = json.loads(metadata_string) # depends on [control=['try'], data=[]] except ValueError as e: logger.error("Could not decode metadata string '%s' for step value: %s", metadata_string, e) return constants.SENTINEL_FOR_UNDETERMINED_STEP # depends on [control=['except'], data=['e']] try: return metadata['session_run_index'] # depends on [control=['try'], data=[]] except KeyError: logger.error('The session_run_index is missing from the metadata: %s', metadata_string) return constants.SENTINEL_FOR_UNDETERMINED_STEP # depends on [control=['except'], data=[]]
def _save_yaml_file(self, file, val): """ Save data to yaml file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | unicode | list | dict :raises IOError: Failed to save """ try: save_yaml_file(file, val) except: self.exception("Failed to save to {}".format(file)) raise IOError("Saving file failed")
def function[_save_yaml_file, parameter[self, file, val]]: constant[ Save data to yaml file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | unicode | list | dict :raises IOError: Failed to save ] <ast.Try object at 0x7da1b10cdc60>
keyword[def] identifier[_save_yaml_file] ( identifier[self] , identifier[file] , identifier[val] ): literal[string] keyword[try] : identifier[save_yaml_file] ( identifier[file] , identifier[val] ) keyword[except] : identifier[self] . identifier[exception] ( literal[string] . identifier[format] ( identifier[file] )) keyword[raise] identifier[IOError] ( literal[string] )
def _save_yaml_file(self, file, val): """ Save data to yaml file :param file: Writable object or path to file :type file: FileIO | str | unicode :param val: Value or struct to save :type val: None | int | float | str | unicode | list | dict :raises IOError: Failed to save """ try: save_yaml_file(file, val) # depends on [control=['try'], data=[]] except: self.exception('Failed to save to {}'.format(file)) raise IOError('Saving file failed') # depends on [control=['except'], data=[]]
def safe_add(a, b): """safe version of add""" if isinstance(a, str) and isinstance(b, str) and len(a) + len(b) > MAX_STR_LEN: raise RuntimeError("String length exceeded, max string length is {}".format(MAX_STR_LEN)) return a + b
def function[safe_add, parameter[a, b]]: constant[safe version of add] if <ast.BoolOp object at 0x7da1b12c6d10> begin[:] <ast.Raise object at 0x7da1b12c44c0> return[binary_operation[name[a] + name[b]]]
keyword[def] identifier[safe_add] ( identifier[a] , identifier[b] ): literal[string] keyword[if] identifier[isinstance] ( identifier[a] , identifier[str] ) keyword[and] identifier[isinstance] ( identifier[b] , identifier[str] ) keyword[and] identifier[len] ( identifier[a] )+ identifier[len] ( identifier[b] )> identifier[MAX_STR_LEN] : keyword[raise] identifier[RuntimeError] ( literal[string] . identifier[format] ( identifier[MAX_STR_LEN] )) keyword[return] identifier[a] + identifier[b]
def safe_add(a, b): """safe version of add""" if isinstance(a, str) and isinstance(b, str) and (len(a) + len(b) > MAX_STR_LEN): raise RuntimeError('String length exceeded, max string length is {}'.format(MAX_STR_LEN)) # depends on [control=['if'], data=[]] return a + b
def deprecated(message=None): """A decorator for deprecated functions""" def _decorator(func, message=message): if message is None: message = '%s is deprecated' % func.__name__ def newfunc(*args, **kwds): warnings.warn(message, DeprecationWarning, stacklevel=2) return func(*args, **kwds) return newfunc return _decorator
def function[deprecated, parameter[message]]: constant[A decorator for deprecated functions] def function[_decorator, parameter[func, message]]: if compare[name[message] is constant[None]] begin[:] variable[message] assign[=] binary_operation[constant[%s is deprecated] <ast.Mod object at 0x7da2590d6920> name[func].__name__] def function[newfunc, parameter[]]: call[name[warnings].warn, parameter[name[message], name[DeprecationWarning]]] return[call[name[func], parameter[<ast.Starred object at 0x7da1b2344790>]]] return[name[newfunc]] return[name[_decorator]]
keyword[def] identifier[deprecated] ( identifier[message] = keyword[None] ): literal[string] keyword[def] identifier[_decorator] ( identifier[func] , identifier[message] = identifier[message] ): keyword[if] identifier[message] keyword[is] keyword[None] : identifier[message] = literal[string] % identifier[func] . identifier[__name__] keyword[def] identifier[newfunc] (* identifier[args] ,** identifier[kwds] ): identifier[warnings] . identifier[warn] ( identifier[message] , identifier[DeprecationWarning] , identifier[stacklevel] = literal[int] ) keyword[return] identifier[func] (* identifier[args] ,** identifier[kwds] ) keyword[return] identifier[newfunc] keyword[return] identifier[_decorator]
def deprecated(message=None): """A decorator for deprecated functions""" def _decorator(func, message=message): if message is None: message = '%s is deprecated' % func.__name__ # depends on [control=['if'], data=['message']] def newfunc(*args, **kwds): warnings.warn(message, DeprecationWarning, stacklevel=2) return func(*args, **kwds) return newfunc return _decorator
def sample_u(self, q): r"""Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.sigma): raise ValueError("length of q must equal the number of parameters!") if q.ndim != 1: raise ValueError("q must be one-dimensional!") if (q < 0).any() or (q > 1).any(): raise ValueError("q must be within [0, 1]!") return scipy.asarray([scipy.stats.lognorm.ppf(v, s, loc=0, scale=em) for v, s, em in zip(q, self.sigma, self.emu)])
def function[sample_u, parameter[self, q]]: constant[Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. ] variable[q] assign[=] call[name[scipy].atleast_1d, parameter[name[q]]] if compare[call[name[len], parameter[name[q]]] not_equal[!=] call[name[len], parameter[name[self].sigma]]] begin[:] <ast.Raise object at 0x7da20c795420> if compare[name[q].ndim not_equal[!=] constant[1]] begin[:] <ast.Raise object at 0x7da20c7963e0> if <ast.BoolOp object at 0x7da20c795480> begin[:] <ast.Raise object at 0x7da20c795510> return[call[name[scipy].asarray, parameter[<ast.ListComp object at 0x7da20c794f10>]]]
keyword[def] identifier[sample_u] ( identifier[self] , identifier[q] ): literal[string] identifier[q] = identifier[scipy] . identifier[atleast_1d] ( identifier[q] ) keyword[if] identifier[len] ( identifier[q] )!= identifier[len] ( identifier[self] . identifier[sigma] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[q] . identifier[ndim] != literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] ( identifier[q] < literal[int] ). identifier[any] () keyword[or] ( identifier[q] > literal[int] ). identifier[any] (): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[return] identifier[scipy] . identifier[asarray] ([ identifier[scipy] . identifier[stats] . identifier[lognorm] . identifier[ppf] ( identifier[v] , identifier[s] , identifier[loc] = literal[int] , identifier[scale] = identifier[em] ) keyword[for] identifier[v] , identifier[s] , identifier[em] keyword[in] identifier[zip] ( identifier[q] , identifier[self] . identifier[sigma] , identifier[self] . identifier[emu] )])
def sample_u(self, q): """Extract a sample from random variates uniform on :math:`[0, 1]`. For a univariate distribution, this is simply evaluating the inverse CDF. To facilitate efficient sampling, this function returns a *vector* of PPF values, one value for each variable. Basically, the idea is that, given a vector :math:`q` of `num_params` values each of which is distributed uniformly on :math:`[0, 1]`, this function will return corresponding samples for each variable. Parameters ---------- q : array of float Values between 0 and 1 to evaluate inverse CDF at. """ q = scipy.atleast_1d(q) if len(q) != len(self.sigma): raise ValueError('length of q must equal the number of parameters!') # depends on [control=['if'], data=[]] if q.ndim != 1: raise ValueError('q must be one-dimensional!') # depends on [control=['if'], data=[]] if (q < 0).any() or (q > 1).any(): raise ValueError('q must be within [0, 1]!') # depends on [control=['if'], data=[]] return scipy.asarray([scipy.stats.lognorm.ppf(v, s, loc=0, scale=em) for (v, s, em) in zip(q, self.sigma, self.emu)])
def process_exception(self, request, e): """ Logs exception error message and sends email to ADMINS if hostname is not testserver and DEBUG=False. :param request: HttpRequest :param e: Exception """ from jutil.email import send_email assert isinstance(request, HttpRequest) full_path = request.get_full_path() user = request.user msg = '{full_path}\n{err} (IP={ip}, user={user}) {trace}'.format(full_path=full_path, user=user, ip=get_real_ip(request), err=e, trace=str(traceback.format_exc())) logger.error(msg) hostname = request.get_host() if not settings.DEBUG and hostname != 'testserver': send_email(settings.ADMINS, 'Error @ {}'.format(hostname), msg) return None
def function[process_exception, parameter[self, request, e]]: constant[ Logs exception error message and sends email to ADMINS if hostname is not testserver and DEBUG=False. :param request: HttpRequest :param e: Exception ] from relative_module[jutil.email] import module[send_email] assert[call[name[isinstance], parameter[name[request], name[HttpRequest]]]] variable[full_path] assign[=] call[name[request].get_full_path, parameter[]] variable[user] assign[=] name[request].user variable[msg] assign[=] call[constant[{full_path} {err} (IP={ip}, user={user}) {trace}].format, parameter[]] call[name[logger].error, parameter[name[msg]]] variable[hostname] assign[=] call[name[request].get_host, parameter[]] if <ast.BoolOp object at 0x7da1b1076710> begin[:] call[name[send_email], parameter[name[settings].ADMINS, call[constant[Error @ {}].format, parameter[name[hostname]]], name[msg]]] return[constant[None]]
keyword[def] identifier[process_exception] ( identifier[self] , identifier[request] , identifier[e] ): literal[string] keyword[from] identifier[jutil] . identifier[email] keyword[import] identifier[send_email] keyword[assert] identifier[isinstance] ( identifier[request] , identifier[HttpRequest] ) identifier[full_path] = identifier[request] . identifier[get_full_path] () identifier[user] = identifier[request] . identifier[user] identifier[msg] = literal[string] . identifier[format] ( identifier[full_path] = identifier[full_path] , identifier[user] = identifier[user] , identifier[ip] = identifier[get_real_ip] ( identifier[request] ), identifier[err] = identifier[e] , identifier[trace] = identifier[str] ( identifier[traceback] . identifier[format_exc] ())) identifier[logger] . identifier[error] ( identifier[msg] ) identifier[hostname] = identifier[request] . identifier[get_host] () keyword[if] keyword[not] identifier[settings] . identifier[DEBUG] keyword[and] identifier[hostname] != literal[string] : identifier[send_email] ( identifier[settings] . identifier[ADMINS] , literal[string] . identifier[format] ( identifier[hostname] ), identifier[msg] ) keyword[return] keyword[None]
def process_exception(self, request, e): """ Logs exception error message and sends email to ADMINS if hostname is not testserver and DEBUG=False. :param request: HttpRequest :param e: Exception """ from jutil.email import send_email assert isinstance(request, HttpRequest) full_path = request.get_full_path() user = request.user msg = '{full_path}\n{err} (IP={ip}, user={user}) {trace}'.format(full_path=full_path, user=user, ip=get_real_ip(request), err=e, trace=str(traceback.format_exc())) logger.error(msg) hostname = request.get_host() if not settings.DEBUG and hostname != 'testserver': send_email(settings.ADMINS, 'Error @ {}'.format(hostname), msg) # depends on [control=['if'], data=[]] return None
def get_offset(self): """ Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset. """ resp = requests.head(self.url, headers=self.headers) offset = resp.headers.get('upload-offset') if offset is None: msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) return int(offset)
def function[get_offset, parameter[self]]: constant[ Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset. ] variable[resp] assign[=] call[name[requests].head, parameter[name[self].url]] variable[offset] assign[=] call[name[resp].headers.get, parameter[constant[upload-offset]]] if compare[name[offset] is constant[None]] begin[:] variable[msg] assign[=] call[constant[Attempt to retrieve offset fails with status {}].format, parameter[name[resp].status_code]] <ast.Raise object at 0x7da20e954280> return[call[name[int], parameter[name[offset]]]]
keyword[def] identifier[get_offset] ( identifier[self] ): literal[string] identifier[resp] = identifier[requests] . identifier[head] ( identifier[self] . identifier[url] , identifier[headers] = identifier[self] . identifier[headers] ) identifier[offset] = identifier[resp] . identifier[headers] . identifier[get] ( literal[string] ) keyword[if] identifier[offset] keyword[is] keyword[None] : identifier[msg] = literal[string] . identifier[format] ( identifier[resp] . identifier[status_code] ) keyword[raise] identifier[TusCommunicationError] ( identifier[msg] , identifier[resp] . identifier[status_code] , identifier[resp] . identifier[content] ) keyword[return] identifier[int] ( identifier[offset] )
def get_offset(self): """ Return offset from tus server. This is different from the instance attribute 'offset' because this makes an http request to the tus server to retrieve the offset. """ resp = requests.head(self.url, headers=self.headers) offset = resp.headers.get('upload-offset') if offset is None: msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code) raise TusCommunicationError(msg, resp.status_code, resp.content) # depends on [control=['if'], data=[]] return int(offset)
def _flush_ignored_control(self): """flush ignored control replies""" while self._ignored_control_replies > 0: self.session.recv(self._control_socket) self._ignored_control_replies -= 1
def function[_flush_ignored_control, parameter[self]]: constant[flush ignored control replies] while compare[name[self]._ignored_control_replies greater[>] constant[0]] begin[:] call[name[self].session.recv, parameter[name[self]._control_socket]] <ast.AugAssign object at 0x7da1b26ae2c0>
keyword[def] identifier[_flush_ignored_control] ( identifier[self] ): literal[string] keyword[while] identifier[self] . identifier[_ignored_control_replies] > literal[int] : identifier[self] . identifier[session] . identifier[recv] ( identifier[self] . identifier[_control_socket] ) identifier[self] . identifier[_ignored_control_replies] -= literal[int]
def _flush_ignored_control(self): """flush ignored control replies""" while self._ignored_control_replies > 0: self.session.recv(self._control_socket) self._ignored_control_replies -= 1 # depends on [control=['while'], data=[]]
def item_options(self, **kwargs): """ Handle collection OPTIONS request. Singular route requests are handled a bit differently because singular views may handle POST requests despite being registered as item routes. """ actions = self._item_actions.copy() if self._resource.is_singular: actions['create'] = ('POST',) methods = self._get_handled_methods(actions) return self._set_options_headers(methods)
def function[item_options, parameter[self]]: constant[ Handle collection OPTIONS request. Singular route requests are handled a bit differently because singular views may handle POST requests despite being registered as item routes. ] variable[actions] assign[=] call[name[self]._item_actions.copy, parameter[]] if name[self]._resource.is_singular begin[:] call[name[actions]][constant[create]] assign[=] tuple[[<ast.Constant object at 0x7da2041d9180>]] variable[methods] assign[=] call[name[self]._get_handled_methods, parameter[name[actions]]] return[call[name[self]._set_options_headers, parameter[name[methods]]]]
keyword[def] identifier[item_options] ( identifier[self] ,** identifier[kwargs] ): literal[string] identifier[actions] = identifier[self] . identifier[_item_actions] . identifier[copy] () keyword[if] identifier[self] . identifier[_resource] . identifier[is_singular] : identifier[actions] [ literal[string] ]=( literal[string] ,) identifier[methods] = identifier[self] . identifier[_get_handled_methods] ( identifier[actions] ) keyword[return] identifier[self] . identifier[_set_options_headers] ( identifier[methods] )
def item_options(self, **kwargs): """ Handle collection OPTIONS request. Singular route requests are handled a bit differently because singular views may handle POST requests despite being registered as item routes. """ actions = self._item_actions.copy() if self._resource.is_singular: actions['create'] = ('POST',) # depends on [control=['if'], data=[]] methods = self._get_handled_methods(actions) return self._set_options_headers(methods)
def git_status_all_repos(cat, hard=True, origin=False, clean=True): """Perform a 'git status' in each data repository. """ log = cat.log log.debug("gitter.git_status_all_repos()") all_repos = cat.PATHS.get_all_repo_folders() for repo_name in all_repos: log.info("Repo in: '{}'".format(repo_name)) # Get the initial git SHA sha_beg = get_sha(repo_name) log.debug("Current SHA: '{}'".format(sha_beg)) log.info("Fetching") fetch(repo_name, log=cat.log) git_comm = ["git", "status"] _call_command_in_repo( git_comm, repo_name, cat.log, fail=True, log_flag=True) sha_end = get_sha(repo_name) if sha_end != sha_beg: log.info("Updated SHA: '{}'".format(sha_end)) return
def function[git_status_all_repos, parameter[cat, hard, origin, clean]]: constant[Perform a 'git status' in each data repository. ] variable[log] assign[=] name[cat].log call[name[log].debug, parameter[constant[gitter.git_status_all_repos()]]] variable[all_repos] assign[=] call[name[cat].PATHS.get_all_repo_folders, parameter[]] for taget[name[repo_name]] in starred[name[all_repos]] begin[:] call[name[log].info, parameter[call[constant[Repo in: '{}'].format, parameter[name[repo_name]]]]] variable[sha_beg] assign[=] call[name[get_sha], parameter[name[repo_name]]] call[name[log].debug, parameter[call[constant[Current SHA: '{}'].format, parameter[name[sha_beg]]]]] call[name[log].info, parameter[constant[Fetching]]] call[name[fetch], parameter[name[repo_name]]] variable[git_comm] assign[=] list[[<ast.Constant object at 0x7da1b0fc7c40>, <ast.Constant object at 0x7da1b0fc4e20>]] call[name[_call_command_in_repo], parameter[name[git_comm], name[repo_name], name[cat].log]] variable[sha_end] assign[=] call[name[get_sha], parameter[name[repo_name]]] if compare[name[sha_end] not_equal[!=] name[sha_beg]] begin[:] call[name[log].info, parameter[call[constant[Updated SHA: '{}'].format, parameter[name[sha_end]]]]] return[None]
keyword[def] identifier[git_status_all_repos] ( identifier[cat] , identifier[hard] = keyword[True] , identifier[origin] = keyword[False] , identifier[clean] = keyword[True] ): literal[string] identifier[log] = identifier[cat] . identifier[log] identifier[log] . identifier[debug] ( literal[string] ) identifier[all_repos] = identifier[cat] . identifier[PATHS] . identifier[get_all_repo_folders] () keyword[for] identifier[repo_name] keyword[in] identifier[all_repos] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[repo_name] )) identifier[sha_beg] = identifier[get_sha] ( identifier[repo_name] ) identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[sha_beg] )) identifier[log] . identifier[info] ( literal[string] ) identifier[fetch] ( identifier[repo_name] , identifier[log] = identifier[cat] . identifier[log] ) identifier[git_comm] =[ literal[string] , literal[string] ] identifier[_call_command_in_repo] ( identifier[git_comm] , identifier[repo_name] , identifier[cat] . identifier[log] , identifier[fail] = keyword[True] , identifier[log_flag] = keyword[True] ) identifier[sha_end] = identifier[get_sha] ( identifier[repo_name] ) keyword[if] identifier[sha_end] != identifier[sha_beg] : identifier[log] . identifier[info] ( literal[string] . identifier[format] ( identifier[sha_end] )) keyword[return]
def git_status_all_repos(cat, hard=True, origin=False, clean=True): """Perform a 'git status' in each data repository. """ log = cat.log log.debug('gitter.git_status_all_repos()') all_repos = cat.PATHS.get_all_repo_folders() for repo_name in all_repos: log.info("Repo in: '{}'".format(repo_name)) # Get the initial git SHA sha_beg = get_sha(repo_name) log.debug("Current SHA: '{}'".format(sha_beg)) log.info('Fetching') fetch(repo_name, log=cat.log) git_comm = ['git', 'status'] _call_command_in_repo(git_comm, repo_name, cat.log, fail=True, log_flag=True) sha_end = get_sha(repo_name) if sha_end != sha_beg: log.info("Updated SHA: '{}'".format(sha_end)) # depends on [control=['if'], data=['sha_end']] # depends on [control=['for'], data=['repo_name']] return
def get(self, section, option): """Gets an option value for a given section. Args: section (str): section name option (str): option name Returns: :class:`Option`: Option object holding key/value pair """ if not self.has_section(section): raise NoSectionError(section) from None section = self.__getitem__(section) option = self.optionxform(option) try: value = section[option] except KeyError: raise NoOptionError(option, section) return value
def function[get, parameter[self, section, option]]: constant[Gets an option value for a given section. Args: section (str): section name option (str): option name Returns: :class:`Option`: Option object holding key/value pair ] if <ast.UnaryOp object at 0x7da20e963880> begin[:] <ast.Raise object at 0x7da20e960c40> variable[section] assign[=] call[name[self].__getitem__, parameter[name[section]]] variable[option] assign[=] call[name[self].optionxform, parameter[name[option]]] <ast.Try object at 0x7da1b0fde2c0> return[name[value]]
keyword[def] identifier[get] ( identifier[self] , identifier[section] , identifier[option] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[has_section] ( identifier[section] ): keyword[raise] identifier[NoSectionError] ( identifier[section] ) keyword[from] keyword[None] identifier[section] = identifier[self] . identifier[__getitem__] ( identifier[section] ) identifier[option] = identifier[self] . identifier[optionxform] ( identifier[option] ) keyword[try] : identifier[value] = identifier[section] [ identifier[option] ] keyword[except] identifier[KeyError] : keyword[raise] identifier[NoOptionError] ( identifier[option] , identifier[section] ) keyword[return] identifier[value]
def get(self, section, option): """Gets an option value for a given section. Args: section (str): section name option (str): option name Returns: :class:`Option`: Option object holding key/value pair """ if not self.has_section(section): raise NoSectionError(section) from None # depends on [control=['if'], data=[]] section = self.__getitem__(section) option = self.optionxform(option) try: value = section[option] # depends on [control=['try'], data=[]] except KeyError: raise NoOptionError(option, section) # depends on [control=['except'], data=[]] return value
def get_default_calendar(self): """ Returns the default calendar for the current user :rtype: Calendar """ url = self.build_url(self._endpoints.get('default_calendar')) response = self.con.get(url) if not response: return None data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return self.calendar_constructor(parent=self, **{self._cloud_data_key: data})
def function[get_default_calendar, parameter[self]]: constant[ Returns the default calendar for the current user :rtype: Calendar ] variable[url] assign[=] call[name[self].build_url, parameter[call[name[self]._endpoints.get, parameter[constant[default_calendar]]]]] variable[response] assign[=] call[name[self].con.get, parameter[name[url]]] if <ast.UnaryOp object at 0x7da1b1b2b940> begin[:] return[constant[None]] variable[data] assign[=] call[name[response].json, parameter[]] return[call[name[self].calendar_constructor, parameter[]]]
keyword[def] identifier[get_default_calendar] ( identifier[self] ): literal[string] identifier[url] = identifier[self] . identifier[build_url] ( identifier[self] . identifier[_endpoints] . identifier[get] ( literal[string] )) identifier[response] = identifier[self] . identifier[con] . identifier[get] ( identifier[url] ) keyword[if] keyword[not] identifier[response] : keyword[return] keyword[None] identifier[data] = identifier[response] . identifier[json] () keyword[return] identifier[self] . identifier[calendar_constructor] ( identifier[parent] = identifier[self] , **{ identifier[self] . identifier[_cloud_data_key] : identifier[data] })
def get_default_calendar(self): """ Returns the default calendar for the current user :rtype: Calendar """ url = self.build_url(self._endpoints.get('default_calendar')) response = self.con.get(url) if not response: return None # depends on [control=['if'], data=[]] data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return self.calendar_constructor(parent=self, **{self._cloud_data_key: data})
def make_csv_tables(self): """ Builds the report as a list of csv tables with titles. """ logger.info('Generate csv report tables') report_parts = [] for sr in self.subreports: for data_item in sr.report_data: report_parts.append(TextPart(fmt='csv', text=data_item.csv, ext='csv')) return report_parts
def function[make_csv_tables, parameter[self]]: constant[ Builds the report as a list of csv tables with titles. ] call[name[logger].info, parameter[constant[Generate csv report tables]]] variable[report_parts] assign[=] list[[]] for taget[name[sr]] in starred[name[self].subreports] begin[:] for taget[name[data_item]] in starred[name[sr].report_data] begin[:] call[name[report_parts].append, parameter[call[name[TextPart], parameter[]]]] return[name[report_parts]]
keyword[def] identifier[make_csv_tables] ( identifier[self] ): literal[string] identifier[logger] . identifier[info] ( literal[string] ) identifier[report_parts] =[] keyword[for] identifier[sr] keyword[in] identifier[self] . identifier[subreports] : keyword[for] identifier[data_item] keyword[in] identifier[sr] . identifier[report_data] : identifier[report_parts] . identifier[append] ( identifier[TextPart] ( identifier[fmt] = literal[string] , identifier[text] = identifier[data_item] . identifier[csv] , identifier[ext] = literal[string] )) keyword[return] identifier[report_parts]
def make_csv_tables(self): """ Builds the report as a list of csv tables with titles. """ logger.info('Generate csv report tables') report_parts = [] for sr in self.subreports: for data_item in sr.report_data: report_parts.append(TextPart(fmt='csv', text=data_item.csv, ext='csv')) # depends on [control=['for'], data=['data_item']] # depends on [control=['for'], data=['sr']] return report_parts
def write(self, stream, message): '''write will write a message to a stream, first checking the encoding ''' if isinstance(message, bytes): message = message.decode('utf-8') stream.write(message)
def function[write, parameter[self, stream, message]]: constant[write will write a message to a stream, first checking the encoding ] if call[name[isinstance], parameter[name[message], name[bytes]]] begin[:] variable[message] assign[=] call[name[message].decode, parameter[constant[utf-8]]] call[name[stream].write, parameter[name[message]]]
keyword[def] identifier[write] ( identifier[self] , identifier[stream] , identifier[message] ): literal[string] keyword[if] identifier[isinstance] ( identifier[message] , identifier[bytes] ): identifier[message] = identifier[message] . identifier[decode] ( literal[string] ) identifier[stream] . identifier[write] ( identifier[message] )
def write(self, stream, message): """write will write a message to a stream, first checking the encoding """ if isinstance(message, bytes): message = message.decode('utf-8') # depends on [control=['if'], data=[]] stream.write(message)
def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deepcopy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, 'get_params'): if not safe: return copy.deepcopy(estimator) else: raise TypeError("Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn estimator " "as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) # TODO: this is a brute force method to make things work for parameter studies. #1135 # But this can potentially use a lot of memory in case of large input data, which is also copied then. # we need a way to distinguish input parameters from derived model parameters, which is currently only ensured for # estimators in the coordinates package. if hasattr(estimator, '_estimated') and estimator._estimated: return copy.deepcopy(estimator) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in new_object_params.items(): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is param2: # this should always happen continue if isinstance(param1, np.ndarray): # For most ndarrays, we do not test for complete equality if not isinstance(param2, type(param1)): equality_test = False elif (param1.ndim > 0 and param1.shape[0] > 0 and isinstance(param2, np.ndarray) and param2.ndim > 0 and param2.shape[0] > 0): equality_test = ( param1.shape == param2.shape and param1.dtype == param2.dtype and (_first_and_last_element(param1) == _first_and_last_element(param2)) ) else: equality_test = np.all(param1 == param2) elif sparse.issparse(param1): # For sparse matrices equality doesn't work if not sparse.issparse(param2): equality_test = False elif param1.size == 0 or param2.size == 0: equality_test = ( param1.__class__ == param2.__class__ and param1.size == 0 and param2.size == 0 ) else: equality_test = ( param1.__class__ == param2.__class__ and (_first_and_last_element(param1) == _first_and_last_element(param2)) and param1.nnz == param2.nnz and param1.shape == param2.shape ) else: # fall back on standard equality equality_test = param1 == param2 if equality_test: warnings.warn("Estimator %s modifies parameters in __init__." " This behavior is deprecated as of 0.18 and " "support for this behavior will be removed in 0.20." % type(estimator).__name__, DeprecationWarning) else: raise RuntimeError('Cannot clone object %s, as the constructor ' 'does not seem to set parameter %s' % (estimator, name)) return new_object
def function[clone, parameter[estimator, safe]]: constant[Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deepcopy on objects that are not estimators. ] variable[estimator_type] assign[=] call[name[type], parameter[name[estimator]]] if compare[name[estimator_type] in tuple[[<ast.Name object at 0x7da18f00f340>, <ast.Name object at 0x7da18f00d7b0>, <ast.Name object at 0x7da18f00e4d0>, <ast.Name object at 0x7da18f00e110>]]] begin[:] return[call[name[estimator_type], parameter[<ast.ListComp object at 0x7da18f00c070>]]] if <ast.BoolOp object at 0x7da18f00fa60> begin[:] return[call[name[copy].deepcopy, parameter[name[estimator]]]] variable[klass] assign[=] name[estimator].__class__ variable[new_object_params] assign[=] call[name[estimator].get_params, parameter[]] for taget[tuple[[<ast.Name object at 0x7da18f00f8b0>, <ast.Name object at 0x7da18f00dc60>]]] in starred[call[name[new_object_params].items, parameter[]]] begin[:] call[name[new_object_params]][name[name]] assign[=] call[name[clone], parameter[name[param]]] variable[new_object] assign[=] call[name[klass], parameter[]] variable[params_set] assign[=] call[name[new_object].get_params, parameter[]] for taget[name[name]] in starred[name[new_object_params]] begin[:] variable[param1] assign[=] call[name[new_object_params]][name[name]] variable[param2] assign[=] call[name[params_set]][name[name]] if compare[name[param1] is name[param2]] begin[:] continue if call[name[isinstance], parameter[name[param1], name[np].ndarray]] begin[:] if <ast.UnaryOp object at 0x7da18f00e500> begin[:] variable[equality_test] assign[=] constant[False] if name[equality_test] begin[:] call[name[warnings].warn, parameter[binary_operation[constant[Estimator %s modifies parameters in __init__. This behavior is deprecated as of 0.18 and support for this behavior will be removed in 0.20.] <ast.Mod object at 0x7da2590d6920> call[name[type], parameter[name[estimator]]].__name__], name[DeprecationWarning]]] return[name[new_object]]
keyword[def] identifier[clone] ( identifier[estimator] , identifier[safe] = keyword[True] ): literal[string] identifier[estimator_type] = identifier[type] ( identifier[estimator] ) keyword[if] identifier[estimator_type] keyword[in] ( identifier[list] , identifier[tuple] , identifier[set] , identifier[frozenset] ): keyword[return] identifier[estimator_type] ([ identifier[clone] ( identifier[e] , identifier[safe] = identifier[safe] ) keyword[for] identifier[e] keyword[in] identifier[estimator] ]) keyword[elif] keyword[not] identifier[hasattr] ( identifier[estimator] , literal[string] ): keyword[if] keyword[not] identifier[safe] : keyword[return] identifier[copy] . identifier[deepcopy] ( identifier[estimator] ) keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] literal[string] literal[string] %( identifier[repr] ( identifier[estimator] ), identifier[type] ( identifier[estimator] ))) keyword[if] identifier[hasattr] ( identifier[estimator] , literal[string] ) keyword[and] identifier[estimator] . identifier[_estimated] : keyword[return] identifier[copy] . identifier[deepcopy] ( identifier[estimator] ) identifier[klass] = identifier[estimator] . identifier[__class__] identifier[new_object_params] = identifier[estimator] . identifier[get_params] ( identifier[deep] = keyword[False] ) keyword[for] identifier[name] , identifier[param] keyword[in] identifier[new_object_params] . identifier[items] (): identifier[new_object_params] [ identifier[name] ]= identifier[clone] ( identifier[param] , identifier[safe] = keyword[False] ) identifier[new_object] = identifier[klass] (** identifier[new_object_params] ) identifier[params_set] = identifier[new_object] . identifier[get_params] ( identifier[deep] = keyword[False] ) keyword[for] identifier[name] keyword[in] identifier[new_object_params] : identifier[param1] = identifier[new_object_params] [ identifier[name] ] identifier[param2] = identifier[params_set] [ identifier[name] ] keyword[if] identifier[param1] keyword[is] identifier[param2] : keyword[continue] keyword[if] identifier[isinstance] ( identifier[param1] , identifier[np] . identifier[ndarray] ): keyword[if] keyword[not] identifier[isinstance] ( identifier[param2] , identifier[type] ( identifier[param1] )): identifier[equality_test] = keyword[False] keyword[elif] ( identifier[param1] . identifier[ndim] > literal[int] keyword[and] identifier[param1] . identifier[shape] [ literal[int] ]> literal[int] keyword[and] identifier[isinstance] ( identifier[param2] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[param2] . identifier[ndim] > literal[int] keyword[and] identifier[param2] . identifier[shape] [ literal[int] ]> literal[int] ): identifier[equality_test] =( identifier[param1] . identifier[shape] == identifier[param2] . identifier[shape] keyword[and] identifier[param1] . identifier[dtype] == identifier[param2] . identifier[dtype] keyword[and] ( identifier[_first_and_last_element] ( identifier[param1] )== identifier[_first_and_last_element] ( identifier[param2] )) ) keyword[else] : identifier[equality_test] = identifier[np] . identifier[all] ( identifier[param1] == identifier[param2] ) keyword[elif] identifier[sparse] . identifier[issparse] ( identifier[param1] ): keyword[if] keyword[not] identifier[sparse] . identifier[issparse] ( identifier[param2] ): identifier[equality_test] = keyword[False] keyword[elif] identifier[param1] . identifier[size] == literal[int] keyword[or] identifier[param2] . identifier[size] == literal[int] : identifier[equality_test] =( identifier[param1] . identifier[__class__] == identifier[param2] . identifier[__class__] keyword[and] identifier[param1] . identifier[size] == literal[int] keyword[and] identifier[param2] . identifier[size] == literal[int] ) keyword[else] : identifier[equality_test] =( identifier[param1] . identifier[__class__] == identifier[param2] . identifier[__class__] keyword[and] ( identifier[_first_and_last_element] ( identifier[param1] )== identifier[_first_and_last_element] ( identifier[param2] )) keyword[and] identifier[param1] . identifier[nnz] == identifier[param2] . identifier[nnz] keyword[and] identifier[param1] . identifier[shape] == identifier[param2] . identifier[shape] ) keyword[else] : identifier[equality_test] = identifier[param1] == identifier[param2] keyword[if] identifier[equality_test] : identifier[warnings] . identifier[warn] ( literal[string] literal[string] literal[string] % identifier[type] ( identifier[estimator] ). identifier[__name__] , identifier[DeprecationWarning] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] % ( identifier[estimator] , identifier[name] )) keyword[return] identifier[new_object]
def clone(estimator, safe=True): """Constructs a new estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It yields a new estimator with the same parameters that has not been fit on any data. Parameters ---------- estimator : estimator object, or list, tuple or set of objects The estimator or group of estimators to be cloned safe : boolean, optional If safe is false, clone will fall back to a deepcopy on objects that are not estimators. """ estimator_type = type(estimator) # XXX: not handling dictionaries if estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) # depends on [control=['if'], data=['estimator_type']] elif not hasattr(estimator, 'get_params'): if not safe: return copy.deepcopy(estimator) # depends on [control=['if'], data=[]] else: raise TypeError("Cannot clone object '%s' (type %s): it does not seem to be a scikit-learn estimator as it does not implement a 'get_params' methods." % (repr(estimator), type(estimator))) # depends on [control=['if'], data=[]] # TODO: this is a brute force method to make things work for parameter studies. #1135 # But this can potentially use a lot of memory in case of large input data, which is also copied then. # we need a way to distinguish input parameters from derived model parameters, which is currently only ensured for # estimators in the coordinates package. if hasattr(estimator, '_estimated') and estimator._estimated: return copy.deepcopy(estimator) # depends on [control=['if'], data=[]] klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for (name, param) in new_object_params.items(): new_object_params[name] = clone(param, safe=False) # depends on [control=['for'], data=[]] new_object = klass(**new_object_params) params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is param2: # this should always happen continue # depends on [control=['if'], data=[]] if isinstance(param1, np.ndarray): # For most ndarrays, we do not test for complete equality if not isinstance(param2, type(param1)): equality_test = False # depends on [control=['if'], data=[]] elif param1.ndim > 0 and param1.shape[0] > 0 and isinstance(param2, np.ndarray) and (param2.ndim > 0) and (param2.shape[0] > 0): equality_test = param1.shape == param2.shape and param1.dtype == param2.dtype and (_first_and_last_element(param1) == _first_and_last_element(param2)) # depends on [control=['if'], data=[]] else: equality_test = np.all(param1 == param2) # depends on [control=['if'], data=[]] elif sparse.issparse(param1): # For sparse matrices equality doesn't work if not sparse.issparse(param2): equality_test = False # depends on [control=['if'], data=[]] elif param1.size == 0 or param2.size == 0: equality_test = param1.__class__ == param2.__class__ and param1.size == 0 and (param2.size == 0) # depends on [control=['if'], data=[]] else: equality_test = param1.__class__ == param2.__class__ and _first_and_last_element(param1) == _first_and_last_element(param2) and (param1.nnz == param2.nnz) and (param1.shape == param2.shape) # depends on [control=['if'], data=[]] else: # fall back on standard equality equality_test = param1 == param2 if equality_test: warnings.warn('Estimator %s modifies parameters in __init__. This behavior is deprecated as of 0.18 and support for this behavior will be removed in 0.20.' % type(estimator).__name__, DeprecationWarning) # depends on [control=['if'], data=[]] else: raise RuntimeError('Cannot clone object %s, as the constructor does not seem to set parameter %s' % (estimator, name)) # depends on [control=['for'], data=['name']] return new_object
def add_cssfile(self, src: str) -> None: """Add CSS file to load at this document's header.""" self.head.appendChild(Link(rel='stylesheet', href=src))
def function[add_cssfile, parameter[self, src]]: constant[Add CSS file to load at this document's header.] call[name[self].head.appendChild, parameter[call[name[Link], parameter[]]]]
keyword[def] identifier[add_cssfile] ( identifier[self] , identifier[src] : identifier[str] )-> keyword[None] : literal[string] identifier[self] . identifier[head] . identifier[appendChild] ( identifier[Link] ( identifier[rel] = literal[string] , identifier[href] = identifier[src] ))
def add_cssfile(self, src: str) -> None: """Add CSS file to load at this document's header.""" self.head.appendChild(Link(rel='stylesheet', href=src))
def today(self, symbol): """ GET /today/:symbol curl "https://api.bitfinex.com/v1/today/btcusd" {"low":"550.09","high":"572.2398","volume":"7305.33119836"} """ data = self._get(self.url_for(PATH_TODAY, (symbol))) # convert all values to floats return self._convert_to_floats(data)
def function[today, parameter[self, symbol]]: constant[ GET /today/:symbol curl "https://api.bitfinex.com/v1/today/btcusd" {"low":"550.09","high":"572.2398","volume":"7305.33119836"} ] variable[data] assign[=] call[name[self]._get, parameter[call[name[self].url_for, parameter[name[PATH_TODAY], name[symbol]]]]] return[call[name[self]._convert_to_floats, parameter[name[data]]]]
keyword[def] identifier[today] ( identifier[self] , identifier[symbol] ): literal[string] identifier[data] = identifier[self] . identifier[_get] ( identifier[self] . identifier[url_for] ( identifier[PATH_TODAY] ,( identifier[symbol] ))) keyword[return] identifier[self] . identifier[_convert_to_floats] ( identifier[data] )
def today(self, symbol): """ GET /today/:symbol curl "https://api.bitfinex.com/v1/today/btcusd" {"low":"550.09","high":"572.2398","volume":"7305.33119836"} """ data = self._get(self.url_for(PATH_TODAY, symbol)) # convert all values to floats return self._convert_to_floats(data)
def listTheExtras(self, deleteAlso): """ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. """ # get list of extras extras = configobj.get_extra_values(self) # extras is in format: [(sections, key), (sections, key), ] # but we need: [(sections, key, result), ...] - set all results to # a bool just to make it the right shape. BUT, since we are in # here anyway, make that bool mean something - hide info in it about # whether that extra item is a section (1) or just a single par (0) # # simplified, this is: expanded = [ (x+(abool,)) for x in extras] expanded = [ (x+ \ ( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \ ) for x in extras] retval = '' if expanded: retval = flattened2str(expanded, extra=1) # but before we return, delete them (from ourself!) if requested to if deleteAlso: for tup_to_del in extras: target = self # descend the tree to the dict where this items is located. # (this works because target is not a copy (because the dict # type is mutable)) location = tup_to_del[0] for subdict in location: target = target[subdict] # delete it target.pop(tup_to_del[1]) return retval
def function[listTheExtras, parameter[self, deleteAlso]]: constant[ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. ] variable[extras] assign[=] call[name[configobj].get_extra_values, parameter[name[self]]] variable[expanded] assign[=] <ast.ListComp object at 0x7da1b0e31870> variable[retval] assign[=] constant[] if name[expanded] begin[:] variable[retval] assign[=] call[name[flattened2str], parameter[name[expanded]]] if name[deleteAlso] begin[:] for taget[name[tup_to_del]] in starred[name[extras]] begin[:] variable[target] assign[=] name[self] variable[location] assign[=] call[name[tup_to_del]][constant[0]] for taget[name[subdict]] in starred[name[location]] begin[:] variable[target] assign[=] call[name[target]][name[subdict]] call[name[target].pop, parameter[call[name[tup_to_del]][constant[1]]]] return[name[retval]]
keyword[def] identifier[listTheExtras] ( identifier[self] , identifier[deleteAlso] ): literal[string] identifier[extras] = identifier[configobj] . identifier[get_extra_values] ( identifier[self] ) identifier[expanded] =[( identifier[x] +( identifier[bool] ( identifier[len] ( identifier[x] [ literal[int] ])< literal[int] keyword[and] identifier[hasattr] ( identifier[self] [ identifier[x] [ literal[int] ]], literal[string] )),)) keyword[for] identifier[x] keyword[in] identifier[extras] ] identifier[retval] = literal[string] keyword[if] identifier[expanded] : identifier[retval] = identifier[flattened2str] ( identifier[expanded] , identifier[extra] = literal[int] ) keyword[if] identifier[deleteAlso] : keyword[for] identifier[tup_to_del] keyword[in] identifier[extras] : identifier[target] = identifier[self] identifier[location] = identifier[tup_to_del] [ literal[int] ] keyword[for] identifier[subdict] keyword[in] identifier[location] : identifier[target] = identifier[target] [ identifier[subdict] ] identifier[target] . identifier[pop] ( identifier[tup_to_del] [ literal[int] ]) keyword[return] identifier[retval]
def listTheExtras(self, deleteAlso): """ Use ConfigObj's get_extra_values() call to find any extra/unknown parameters we may have loaded. Return a string similar to findTheLost. If deleteAlso is True, this will also delete any extra/unknown items. """ # get list of extras extras = configobj.get_extra_values(self) # extras is in format: [(sections, key), (sections, key), ] # but we need: [(sections, key, result), ...] - set all results to # a bool just to make it the right shape. BUT, since we are in # here anyway, make that bool mean something - hide info in it about # whether that extra item is a section (1) or just a single par (0) # # simplified, this is: expanded = [ (x+(abool,)) for x in extras] expanded = [x + (bool(len(x[0]) < 1 and hasattr(self[x[1]], 'keys')),) for x in extras] retval = '' if expanded: retval = flattened2str(expanded, extra=1) # depends on [control=['if'], data=[]] # but before we return, delete them (from ourself!) if requested to if deleteAlso: for tup_to_del in extras: target = self # descend the tree to the dict where this items is located. # (this works because target is not a copy (because the dict # type is mutable)) location = tup_to_del[0] for subdict in location: target = target[subdict] # depends on [control=['for'], data=['subdict']] # delete it target.pop(tup_to_del[1]) # depends on [control=['for'], data=['tup_to_del']] # depends on [control=['if'], data=[]] return retval
def get_backend(config): """ Returns a backend instance based on the Daemon config file. """ backend_string = get_conf(config, 'Dagobahd.backend', None) if backend_string is None: from ..backend.base import BaseBackend return BaseBackend() elif backend_string.lower() == 'mongo': backend_kwargs = {} for conf_kwarg in ['host', 'port', 'db', 'dagobah_collection', 'job_collection', 'log_collection']: backend_kwargs[conf_kwarg] = get_conf(config, 'MongoBackend.%s' % conf_kwarg) backend_kwargs['port'] = int(backend_kwargs['port']) try: from ..backend.mongo import MongoBackend except: raise ImportError('Could not initialize the MongoDB Backend. Are you sure' + ' the optional drivers are installed? If not, try running ' + '"pip install pymongo" to install them.') return MongoBackend(**backend_kwargs) raise ValueError('unknown backend type specified in conf')
def function[get_backend, parameter[config]]: constant[ Returns a backend instance based on the Daemon config file. ] variable[backend_string] assign[=] call[name[get_conf], parameter[name[config], constant[Dagobahd.backend], constant[None]]] if compare[name[backend_string] is constant[None]] begin[:] from relative_module[backend.base] import module[BaseBackend] return[call[name[BaseBackend], parameter[]]] <ast.Raise object at 0x7da1b0b7e080>
keyword[def] identifier[get_backend] ( identifier[config] ): literal[string] identifier[backend_string] = identifier[get_conf] ( identifier[config] , literal[string] , keyword[None] ) keyword[if] identifier[backend_string] keyword[is] keyword[None] : keyword[from] .. identifier[backend] . identifier[base] keyword[import] identifier[BaseBackend] keyword[return] identifier[BaseBackend] () keyword[elif] identifier[backend_string] . identifier[lower] ()== literal[string] : identifier[backend_kwargs] ={} keyword[for] identifier[conf_kwarg] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: identifier[backend_kwargs] [ identifier[conf_kwarg] ]= identifier[get_conf] ( identifier[config] , literal[string] % identifier[conf_kwarg] ) identifier[backend_kwargs] [ literal[string] ]= identifier[int] ( identifier[backend_kwargs] [ literal[string] ]) keyword[try] : keyword[from] .. identifier[backend] . identifier[mongo] keyword[import] identifier[MongoBackend] keyword[except] : keyword[raise] identifier[ImportError] ( literal[string] + literal[string] + literal[string] ) keyword[return] identifier[MongoBackend] (** identifier[backend_kwargs] ) keyword[raise] identifier[ValueError] ( literal[string] )
def get_backend(config): """ Returns a backend instance based on the Daemon config file. """ backend_string = get_conf(config, 'Dagobahd.backend', None) if backend_string is None: from ..backend.base import BaseBackend return BaseBackend() # depends on [control=['if'], data=[]] elif backend_string.lower() == 'mongo': backend_kwargs = {} for conf_kwarg in ['host', 'port', 'db', 'dagobah_collection', 'job_collection', 'log_collection']: backend_kwargs[conf_kwarg] = get_conf(config, 'MongoBackend.%s' % conf_kwarg) # depends on [control=['for'], data=['conf_kwarg']] backend_kwargs['port'] = int(backend_kwargs['port']) try: from ..backend.mongo import MongoBackend # depends on [control=['try'], data=[]] except: raise ImportError('Could not initialize the MongoDB Backend. Are you sure' + ' the optional drivers are installed? If not, try running ' + '"pip install pymongo" to install them.') # depends on [control=['except'], data=[]] return MongoBackend(**backend_kwargs) # depends on [control=['if'], data=[]] raise ValueError('unknown backend type specified in conf')
def isScreenOn(self): """ Checks if the screen is ON. @return: True if the device screen is ON """ self.__checkTransport() screenOnRE = re.compile('mScreenOnFully=(true|false)') m = screenOnRE.search(self.shell('dumpsys window policy')) if m: return m.group(1) == 'true' raise RuntimeError("Couldn't determine screen ON state")
def function[isScreenOn, parameter[self]]: constant[ Checks if the screen is ON. @return: True if the device screen is ON ] call[name[self].__checkTransport, parameter[]] variable[screenOnRE] assign[=] call[name[re].compile, parameter[constant[mScreenOnFully=(true|false)]]] variable[m] assign[=] call[name[screenOnRE].search, parameter[call[name[self].shell, parameter[constant[dumpsys window policy]]]]] if name[m] begin[:] return[compare[call[name[m].group, parameter[constant[1]]] equal[==] constant[true]]] <ast.Raise object at 0x7da1b1e68ac0>
keyword[def] identifier[isScreenOn] ( identifier[self] ): literal[string] identifier[self] . identifier[__checkTransport] () identifier[screenOnRE] = identifier[re] . identifier[compile] ( literal[string] ) identifier[m] = identifier[screenOnRE] . identifier[search] ( identifier[self] . identifier[shell] ( literal[string] )) keyword[if] identifier[m] : keyword[return] identifier[m] . identifier[group] ( literal[int] )== literal[string] keyword[raise] identifier[RuntimeError] ( literal[string] )
def isScreenOn(self): """ Checks if the screen is ON. @return: True if the device screen is ON """ self.__checkTransport() screenOnRE = re.compile('mScreenOnFully=(true|false)') m = screenOnRE.search(self.shell('dumpsys window policy')) if m: return m.group(1) == 'true' # depends on [control=['if'], data=[]] raise RuntimeError("Couldn't determine screen ON state")
def write_roc(roc_structure, inputfilename, options, fw_type = None): """ writes ROC output :param roc_structure: a.k.a auc_structure, generated in /common_tools/classification/make_auc_structure() :param inputfilename: name of the file specified on the command line that contains the ensemble. This will be used to generate the name of the file that contains the ROC data. :param options: :return: """ # check if ./ROC_DATA exists. If not, create it rocdir = os.path.join(os.getcwd(), 'ROC_DATA') if not os.path.exists(rocdir): os.makedirs(rocdir) # generate the root name of the file that will hold the roc data if len(inputfilename.split('.')) == 1: rootname = inputfilename.split('.') else: rootname = inputfilename.split('.csv')[0] # add '.csv' to the root name to give the name of the file if fw_type: filename = '%s_fw_roc.csv' % rootname else: filename = '%s_roc.csv' % rootname # the path to the file file = os.path.join(rocdir, filename) # open file and create a csv writer object f = open(file, 'w') rocwriter = csv.writer(f) # create header header = ['id', 'score', 'score source', 'status', 'fpf', 'tpf'] rocwriter.writerow(header) # write contents for tup in roc_structure: rocwriter.writerow(list(tup)) f.close()
def function[write_roc, parameter[roc_structure, inputfilename, options, fw_type]]: constant[ writes ROC output :param roc_structure: a.k.a auc_structure, generated in /common_tools/classification/make_auc_structure() :param inputfilename: name of the file specified on the command line that contains the ensemble. This will be used to generate the name of the file that contains the ROC data. :param options: :return: ] variable[rocdir] assign[=] call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], constant[ROC_DATA]]] if <ast.UnaryOp object at 0x7da20e9577c0> begin[:] call[name[os].makedirs, parameter[name[rocdir]]] if compare[call[name[len], parameter[call[name[inputfilename].split, parameter[constant[.]]]]] equal[==] constant[1]] begin[:] variable[rootname] assign[=] call[name[inputfilename].split, parameter[constant[.]]] if name[fw_type] begin[:] variable[filename] assign[=] binary_operation[constant[%s_fw_roc.csv] <ast.Mod object at 0x7da2590d6920> name[rootname]] variable[file] assign[=] call[name[os].path.join, parameter[name[rocdir], name[filename]]] variable[f] assign[=] call[name[open], parameter[name[file], constant[w]]] variable[rocwriter] assign[=] call[name[csv].writer, parameter[name[f]]] variable[header] assign[=] list[[<ast.Constant object at 0x7da20e957c10>, <ast.Constant object at 0x7da20e956740>, <ast.Constant object at 0x7da20e957760>, <ast.Constant object at 0x7da20e957c40>, <ast.Constant object at 0x7da20e956a40>, <ast.Constant object at 0x7da20e956b30>]] call[name[rocwriter].writerow, parameter[name[header]]] for taget[name[tup]] in starred[name[roc_structure]] begin[:] call[name[rocwriter].writerow, parameter[call[name[list], parameter[name[tup]]]]] call[name[f].close, parameter[]]
keyword[def] identifier[write_roc] ( identifier[roc_structure] , identifier[inputfilename] , identifier[options] , identifier[fw_type] = keyword[None] ): literal[string] identifier[rocdir] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), literal[string] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[rocdir] ): identifier[os] . identifier[makedirs] ( identifier[rocdir] ) keyword[if] identifier[len] ( identifier[inputfilename] . identifier[split] ( literal[string] ))== literal[int] : identifier[rootname] = identifier[inputfilename] . identifier[split] ( literal[string] ) keyword[else] : identifier[rootname] = identifier[inputfilename] . identifier[split] ( literal[string] )[ literal[int] ] keyword[if] identifier[fw_type] : identifier[filename] = literal[string] % identifier[rootname] keyword[else] : identifier[filename] = literal[string] % identifier[rootname] identifier[file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rocdir] , identifier[filename] ) identifier[f] = identifier[open] ( identifier[file] , literal[string] ) identifier[rocwriter] = identifier[csv] . identifier[writer] ( identifier[f] ) identifier[header] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[rocwriter] . identifier[writerow] ( identifier[header] ) keyword[for] identifier[tup] keyword[in] identifier[roc_structure] : identifier[rocwriter] . identifier[writerow] ( identifier[list] ( identifier[tup] )) identifier[f] . identifier[close] ()
def write_roc(roc_structure, inputfilename, options, fw_type=None): """ writes ROC output :param roc_structure: a.k.a auc_structure, generated in /common_tools/classification/make_auc_structure() :param inputfilename: name of the file specified on the command line that contains the ensemble. This will be used to generate the name of the file that contains the ROC data. :param options: :return: """ # check if ./ROC_DATA exists. If not, create it rocdir = os.path.join(os.getcwd(), 'ROC_DATA') if not os.path.exists(rocdir): os.makedirs(rocdir) # depends on [control=['if'], data=[]] # generate the root name of the file that will hold the roc data if len(inputfilename.split('.')) == 1: rootname = inputfilename.split('.') # depends on [control=['if'], data=[]] else: rootname = inputfilename.split('.csv')[0] # add '.csv' to the root name to give the name of the file if fw_type: filename = '%s_fw_roc.csv' % rootname # depends on [control=['if'], data=[]] else: filename = '%s_roc.csv' % rootname # the path to the file file = os.path.join(rocdir, filename) # open file and create a csv writer object f = open(file, 'w') rocwriter = csv.writer(f) # create header header = ['id', 'score', 'score source', 'status', 'fpf', 'tpf'] rocwriter.writerow(header) # write contents for tup in roc_structure: rocwriter.writerow(list(tup)) # depends on [control=['for'], data=['tup']] f.close()
def init(ctx, client, directory, name, force, use_external_storage): """Initialize a project.""" if not client.use_external_storage: use_external_storage = False ctx.obj = client = attr.evolve( client, path=directory, use_external_storage=use_external_storage, ) msg = 'Initialized empty project in {path}' branch_name = None stack = contextlib.ExitStack() if force and client.repo: msg = 'Initialized project in {path} (branch {branch_name})' merge_args = ['--no-ff', '-s', 'recursive', '-X', 'ours'] try: commit = client.find_previous_commit( str(client.renku_metadata_path), ) branch_name = 'renku/init/' + str(commit) except KeyError: from git import NULL_TREE commit = NULL_TREE branch_name = 'renku/init/root' merge_args.append('--allow-unrelated-histories') ctx.obj = client = stack.enter_context( client.worktree( branch_name=branch_name, commit=commit, merge_args=merge_args, ) ) try: with client.lock: path = client.init_repository(name=name, force=force) except FileExistsError: raise click.UsageError( 'Renku repository is not empty. ' 'Please use --force flag to use the directory as Renku ' 'repository.' ) stack.enter_context(client.commit()) with stack: # Install Git hooks. from .githooks import install ctx.invoke(install, force=force) # Create all necessary template files. from .runner import template ctx.invoke(template, force=force) click.echo(msg.format(path=path, branch_name=branch_name))
def function[init, parameter[ctx, client, directory, name, force, use_external_storage]]: constant[Initialize a project.] if <ast.UnaryOp object at 0x7da1b02e4c70> begin[:] variable[use_external_storage] assign[=] constant[False] name[ctx].obj assign[=] call[name[attr].evolve, parameter[name[client]]] variable[msg] assign[=] constant[Initialized empty project in {path}] variable[branch_name] assign[=] constant[None] variable[stack] assign[=] call[name[contextlib].ExitStack, parameter[]] if <ast.BoolOp object at 0x7da1b02e75b0> begin[:] variable[msg] assign[=] constant[Initialized project in {path} (branch {branch_name})] variable[merge_args] assign[=] list[[<ast.Constant object at 0x7da1b02e4af0>, <ast.Constant object at 0x7da1b02e67d0>, <ast.Constant object at 0x7da1b02e43a0>, <ast.Constant object at 0x7da1b02e6fb0>, <ast.Constant object at 0x7da1b02e4460>]] <ast.Try object at 0x7da1b02e63b0> name[ctx].obj assign[=] call[name[stack].enter_context, parameter[call[name[client].worktree, parameter[]]]] <ast.Try object at 0x7da1b02e52d0> call[name[stack].enter_context, parameter[call[name[client].commit, parameter[]]]] with name[stack] begin[:] from relative_module[githooks] import module[install] call[name[ctx].invoke, parameter[name[install]]] from relative_module[runner] import module[template] call[name[ctx].invoke, parameter[name[template]]] call[name[click].echo, parameter[call[name[msg].format, parameter[]]]]
keyword[def] identifier[init] ( identifier[ctx] , identifier[client] , identifier[directory] , identifier[name] , identifier[force] , identifier[use_external_storage] ): literal[string] keyword[if] keyword[not] identifier[client] . identifier[use_external_storage] : identifier[use_external_storage] = keyword[False] identifier[ctx] . identifier[obj] = identifier[client] = identifier[attr] . identifier[evolve] ( identifier[client] , identifier[path] = identifier[directory] , identifier[use_external_storage] = identifier[use_external_storage] , ) identifier[msg] = literal[string] identifier[branch_name] = keyword[None] identifier[stack] = identifier[contextlib] . identifier[ExitStack] () keyword[if] identifier[force] keyword[and] identifier[client] . identifier[repo] : identifier[msg] = literal[string] identifier[merge_args] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] keyword[try] : identifier[commit] = identifier[client] . identifier[find_previous_commit] ( identifier[str] ( identifier[client] . identifier[renku_metadata_path] ), ) identifier[branch_name] = literal[string] + identifier[str] ( identifier[commit] ) keyword[except] identifier[KeyError] : keyword[from] identifier[git] keyword[import] identifier[NULL_TREE] identifier[commit] = identifier[NULL_TREE] identifier[branch_name] = literal[string] identifier[merge_args] . identifier[append] ( literal[string] ) identifier[ctx] . identifier[obj] = identifier[client] = identifier[stack] . identifier[enter_context] ( identifier[client] . identifier[worktree] ( identifier[branch_name] = identifier[branch_name] , identifier[commit] = identifier[commit] , identifier[merge_args] = identifier[merge_args] , ) ) keyword[try] : keyword[with] identifier[client] . identifier[lock] : identifier[path] = identifier[client] . identifier[init_repository] ( identifier[name] = identifier[name] , identifier[force] = identifier[force] ) keyword[except] identifier[FileExistsError] : keyword[raise] identifier[click] . identifier[UsageError] ( literal[string] literal[string] literal[string] ) identifier[stack] . identifier[enter_context] ( identifier[client] . identifier[commit] ()) keyword[with] identifier[stack] : keyword[from] . identifier[githooks] keyword[import] identifier[install] identifier[ctx] . identifier[invoke] ( identifier[install] , identifier[force] = identifier[force] ) keyword[from] . identifier[runner] keyword[import] identifier[template] identifier[ctx] . identifier[invoke] ( identifier[template] , identifier[force] = identifier[force] ) identifier[click] . identifier[echo] ( identifier[msg] . identifier[format] ( identifier[path] = identifier[path] , identifier[branch_name] = identifier[branch_name] ))
def init(ctx, client, directory, name, force, use_external_storage): """Initialize a project.""" if not client.use_external_storage: use_external_storage = False # depends on [control=['if'], data=[]] ctx.obj = client = attr.evolve(client, path=directory, use_external_storage=use_external_storage) msg = 'Initialized empty project in {path}' branch_name = None stack = contextlib.ExitStack() if force and client.repo: msg = 'Initialized project in {path} (branch {branch_name})' merge_args = ['--no-ff', '-s', 'recursive', '-X', 'ours'] try: commit = client.find_previous_commit(str(client.renku_metadata_path)) branch_name = 'renku/init/' + str(commit) # depends on [control=['try'], data=[]] except KeyError: from git import NULL_TREE commit = NULL_TREE branch_name = 'renku/init/root' merge_args.append('--allow-unrelated-histories') # depends on [control=['except'], data=[]] ctx.obj = client = stack.enter_context(client.worktree(branch_name=branch_name, commit=commit, merge_args=merge_args)) # depends on [control=['if'], data=[]] try: with client.lock: path = client.init_repository(name=name, force=force) # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]] except FileExistsError: raise click.UsageError('Renku repository is not empty. Please use --force flag to use the directory as Renku repository.') # depends on [control=['except'], data=[]] stack.enter_context(client.commit()) with stack: # Install Git hooks. from .githooks import install ctx.invoke(install, force=force) # Create all necessary template files. from .runner import template ctx.invoke(template, force=force) # depends on [control=['with'], data=[]] click.echo(msg.format(path=path, branch_name=branch_name))
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None): """ Return package path. Use uuid to generate package's directory name. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Returns: str: Path to the root directory. """ if book_id is None: book_id = str(uuid.uuid4()) return os.path.join(prefix, book_id)
def function[_get_package_name, parameter[prefix, book_id]]: constant[ Return package path. Use uuid to generate package's directory name. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Returns: str: Path to the root directory. ] if compare[name[book_id] is constant[None]] begin[:] variable[book_id] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]] return[call[name[os].path.join, parameter[name[prefix], name[book_id]]]]
keyword[def] identifier[_get_package_name] ( identifier[prefix] = identifier[settings] . identifier[TEMP_DIR] , identifier[book_id] = keyword[None] ): literal[string] keyword[if] identifier[book_id] keyword[is] keyword[None] : identifier[book_id] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()) keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , identifier[book_id] )
def _get_package_name(prefix=settings.TEMP_DIR, book_id=None): """ Return package path. Use uuid to generate package's directory name. Args: book_id (str, default None): UUID of the book. prefix (str, default settings.TEMP_DIR): Where the package will be stored. Default :attr:`settings.TEMP_DIR`. Returns: str: Path to the root directory. """ if book_id is None: book_id = str(uuid.uuid4()) # depends on [control=['if'], data=['book_id']] return os.path.join(prefix, book_id)
def Target_setDiscoverTargets(self, discover): """ Function path: Target.setDiscoverTargets Domain: Target Method name: setDiscoverTargets Parameters: Required arguments: 'discover' (type: boolean) -> Whether to discover available targets. No return value. Description: Controls whether to discover available targets and notify via <code>targetCreated/targetInfoChanged/targetDestroyed</code> events. """ assert isinstance(discover, (bool,) ), "Argument 'discover' must be of type '['bool']'. Received type: '%s'" % type( discover) subdom_funcs = self.synchronous_command('Target.setDiscoverTargets', discover=discover) return subdom_funcs
def function[Target_setDiscoverTargets, parameter[self, discover]]: constant[ Function path: Target.setDiscoverTargets Domain: Target Method name: setDiscoverTargets Parameters: Required arguments: 'discover' (type: boolean) -> Whether to discover available targets. No return value. Description: Controls whether to discover available targets and notify via <code>targetCreated/targetInfoChanged/targetDestroyed</code> events. ] assert[call[name[isinstance], parameter[name[discover], tuple[[<ast.Name object at 0x7da1b101f1f0>]]]]] variable[subdom_funcs] assign[=] call[name[self].synchronous_command, parameter[constant[Target.setDiscoverTargets]]] return[name[subdom_funcs]]
keyword[def] identifier[Target_setDiscoverTargets] ( identifier[self] , identifier[discover] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[discover] ,( identifier[bool] ,) ), literal[string] % identifier[type] ( identifier[discover] ) identifier[subdom_funcs] = identifier[self] . identifier[synchronous_command] ( literal[string] , identifier[discover] = identifier[discover] ) keyword[return] identifier[subdom_funcs]
def Target_setDiscoverTargets(self, discover): """ Function path: Target.setDiscoverTargets Domain: Target Method name: setDiscoverTargets Parameters: Required arguments: 'discover' (type: boolean) -> Whether to discover available targets. No return value. Description: Controls whether to discover available targets and notify via <code>targetCreated/targetInfoChanged/targetDestroyed</code> events. """ assert isinstance(discover, (bool,)), "Argument 'discover' must be of type '['bool']'. Received type: '%s'" % type(discover) subdom_funcs = self.synchronous_command('Target.setDiscoverTargets', discover=discover) return subdom_funcs
def run_game_of_life(years, width, height, time_delay, silent="N"): """ run a single game of life for 'years' and log start and end living cells to aikif """ lfe = mod_grid.GameOfLife(width, height, ['.', 'x'], 1) set_random_starting_grid(lfe) lg.record_source(lfe, 'game_of_life_console.py') print(lfe) start_cells = lfe.count_filled_positions() for ndx, dummy_idx in enumerate(range(years)): lfe.update_gol() if silent == "N": print_there(1,1, "Game of Life - Iteration # " + str(ndx)) print_there(1, 2, lfe) time.sleep(time_delay) end_cells = lfe.count_filled_positions() return start_cells, end_cells
def function[run_game_of_life, parameter[years, width, height, time_delay, silent]]: constant[ run a single game of life for 'years' and log start and end living cells to aikif ] variable[lfe] assign[=] call[name[mod_grid].GameOfLife, parameter[name[width], name[height], list[[<ast.Constant object at 0x7da18fe914e0>, <ast.Constant object at 0x7da18fe913c0>]], constant[1]]] call[name[set_random_starting_grid], parameter[name[lfe]]] call[name[lg].record_source, parameter[name[lfe], constant[game_of_life_console.py]]] call[name[print], parameter[name[lfe]]] variable[start_cells] assign[=] call[name[lfe].count_filled_positions, parameter[]] for taget[tuple[[<ast.Name object at 0x7da18fe929b0>, <ast.Name object at 0x7da18fe91f90>]]] in starred[call[name[enumerate], parameter[call[name[range], parameter[name[years]]]]]] begin[:] call[name[lfe].update_gol, parameter[]] if compare[name[silent] equal[==] constant[N]] begin[:] call[name[print_there], parameter[constant[1], constant[1], binary_operation[constant[Game of Life - Iteration # ] + call[name[str], parameter[name[ndx]]]]]] call[name[print_there], parameter[constant[1], constant[2], name[lfe]]] call[name[time].sleep, parameter[name[time_delay]]] variable[end_cells] assign[=] call[name[lfe].count_filled_positions, parameter[]] return[tuple[[<ast.Name object at 0x7da18fe905e0>, <ast.Name object at 0x7da18fe92e30>]]]
keyword[def] identifier[run_game_of_life] ( identifier[years] , identifier[width] , identifier[height] , identifier[time_delay] , identifier[silent] = literal[string] ): literal[string] identifier[lfe] = identifier[mod_grid] . identifier[GameOfLife] ( identifier[width] , identifier[height] ,[ literal[string] , literal[string] ], literal[int] ) identifier[set_random_starting_grid] ( identifier[lfe] ) identifier[lg] . identifier[record_source] ( identifier[lfe] , literal[string] ) identifier[print] ( identifier[lfe] ) identifier[start_cells] = identifier[lfe] . identifier[count_filled_positions] () keyword[for] identifier[ndx] , identifier[dummy_idx] keyword[in] identifier[enumerate] ( identifier[range] ( identifier[years] )): identifier[lfe] . identifier[update_gol] () keyword[if] identifier[silent] == literal[string] : identifier[print_there] ( literal[int] , literal[int] , literal[string] + identifier[str] ( identifier[ndx] )) identifier[print_there] ( literal[int] , literal[int] , identifier[lfe] ) identifier[time] . identifier[sleep] ( identifier[time_delay] ) identifier[end_cells] = identifier[lfe] . identifier[count_filled_positions] () keyword[return] identifier[start_cells] , identifier[end_cells]
def run_game_of_life(years, width, height, time_delay, silent='N'): """ run a single game of life for 'years' and log start and end living cells to aikif """ lfe = mod_grid.GameOfLife(width, height, ['.', 'x'], 1) set_random_starting_grid(lfe) lg.record_source(lfe, 'game_of_life_console.py') print(lfe) start_cells = lfe.count_filled_positions() for (ndx, dummy_idx) in enumerate(range(years)): lfe.update_gol() if silent == 'N': print_there(1, 1, 'Game of Life - Iteration # ' + str(ndx)) print_there(1, 2, lfe) time.sleep(time_delay) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] end_cells = lfe.count_filled_positions() return (start_cells, end_cells)
def set(self, key, value): """Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set. """ # the configuration changes, so we invalidate the cached config self._full_config = None self._override[key] = value
def function[set, parameter[self, key, value]]: constant[Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set. ] name[self]._full_config assign[=] constant[None] call[name[self]._override][name[key]] assign[=] name[value]
keyword[def] identifier[set] ( identifier[self] , identifier[key] , identifier[value] ): literal[string] identifier[self] . identifier[_full_config] = keyword[None] identifier[self] . identifier[_override] [ identifier[key] ]= identifier[value]
def set(self, key, value): """Set a value in the `Bison` configuration. Args: key (str): The configuration key to set a new value for. value: The value to set. """ # the configuration changes, so we invalidate the cached config self._full_config = None self._override[key] = value
def cp(hdfs_src, hdfs_dst): """Copy a file :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful """ cmd = "hadoop fs -cp %s %s" % (hdfs_src, hdfs_dst) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
def function[cp, parameter[hdfs_src, hdfs_dst]]: constant[Copy a file :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful ] variable[cmd] assign[=] binary_operation[constant[hadoop fs -cp %s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0e25fc0>, <ast.Name object at 0x7da1b0e27b20>]]] <ast.Tuple object at 0x7da1b0e25990> assign[=] call[name[_checked_hadoop_fs_command], parameter[name[cmd]]]
keyword[def] identifier[cp] ( identifier[hdfs_src] , identifier[hdfs_dst] ): literal[string] identifier[cmd] = literal[string] %( identifier[hdfs_src] , identifier[hdfs_dst] ) identifier[rcode] , identifier[stdout] , identifier[stderr] = identifier[_checked_hadoop_fs_command] ( identifier[cmd] )
def cp(hdfs_src, hdfs_dst): """Copy a file :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful """ cmd = 'hadoop fs -cp %s %s' % (hdfs_src, hdfs_dst) (rcode, stdout, stderr) = _checked_hadoop_fs_command(cmd)
def update_fw_local_cache(self, net, direc, start): """Update the fw dict with Net ID and service IP. """ fw_dict = self.get_fw_dict() if direc == 'in': fw_dict.update({'in_network_id': net, 'in_service_ip': start}) else: fw_dict.update({'out_network_id': net, 'out_service_ip': start}) self.update_fw_dict(fw_dict)
def function[update_fw_local_cache, parameter[self, net, direc, start]]: constant[Update the fw dict with Net ID and service IP. ] variable[fw_dict] assign[=] call[name[self].get_fw_dict, parameter[]] if compare[name[direc] equal[==] constant[in]] begin[:] call[name[fw_dict].update, parameter[dictionary[[<ast.Constant object at 0x7da2041dbeb0>, <ast.Constant object at 0x7da2041dab00>], [<ast.Name object at 0x7da2041d8bb0>, <ast.Name object at 0x7da2041da350>]]]] call[name[self].update_fw_dict, parameter[name[fw_dict]]]
keyword[def] identifier[update_fw_local_cache] ( identifier[self] , identifier[net] , identifier[direc] , identifier[start] ): literal[string] identifier[fw_dict] = identifier[self] . identifier[get_fw_dict] () keyword[if] identifier[direc] == literal[string] : identifier[fw_dict] . identifier[update] ({ literal[string] : identifier[net] , literal[string] : identifier[start] }) keyword[else] : identifier[fw_dict] . identifier[update] ({ literal[string] : identifier[net] , literal[string] : identifier[start] }) identifier[self] . identifier[update_fw_dict] ( identifier[fw_dict] )
def update_fw_local_cache(self, net, direc, start): """Update the fw dict with Net ID and service IP. """ fw_dict = self.get_fw_dict() if direc == 'in': fw_dict.update({'in_network_id': net, 'in_service_ip': start}) # depends on [control=['if'], data=[]] else: fw_dict.update({'out_network_id': net, 'out_service_ip': start}) self.update_fw_dict(fw_dict)
def _get_splunk_search_props(search): ''' Get splunk search properties from an object ''' props = search.content props["app"] = search.access.app props["sharing"] = search.access.sharing return props
def function[_get_splunk_search_props, parameter[search]]: constant[ Get splunk search properties from an object ] variable[props] assign[=] name[search].content call[name[props]][constant[app]] assign[=] name[search].access.app call[name[props]][constant[sharing]] assign[=] name[search].access.sharing return[name[props]]
keyword[def] identifier[_get_splunk_search_props] ( identifier[search] ): literal[string] identifier[props] = identifier[search] . identifier[content] identifier[props] [ literal[string] ]= identifier[search] . identifier[access] . identifier[app] identifier[props] [ literal[string] ]= identifier[search] . identifier[access] . identifier[sharing] keyword[return] identifier[props]
def _get_splunk_search_props(search): """ Get splunk search properties from an object """ props = search.content props['app'] = search.access.app props['sharing'] = search.access.sharing return props
def parse_args(self, *args, **kwargs): """Parse the arguments as usual, then add default processing.""" if _debug: ConfigArgumentParser._debug("parse_args") # pass along to the parent class result_args = ArgumentParser.parse_args(self, *args, **kwargs) # read in the configuration file config = _ConfigParser() config.read(result_args.ini) if _debug: _log.debug(" - config: %r", config) # check for BACpypes section if not config.has_section('BACpypes'): raise RuntimeError("INI file with BACpypes section required") # convert the contents to an object ini_obj = type('ini', (object,), dict(config.items('BACpypes'))) if _debug: _log.debug(" - ini_obj: %r", ini_obj) # add the object to the parsed arguments setattr(result_args, 'ini', ini_obj) # return what was parsed return result_args
def function[parse_args, parameter[self]]: constant[Parse the arguments as usual, then add default processing.] if name[_debug] begin[:] call[name[ConfigArgumentParser]._debug, parameter[constant[parse_args]]] variable[result_args] assign[=] call[name[ArgumentParser].parse_args, parameter[name[self], <ast.Starred object at 0x7da18ede6c80>]] variable[config] assign[=] call[name[_ConfigParser], parameter[]] call[name[config].read, parameter[name[result_args].ini]] if name[_debug] begin[:] call[name[_log].debug, parameter[constant[ - config: %r], name[config]]] if <ast.UnaryOp object at 0x7da18ede4c40> begin[:] <ast.Raise object at 0x7da18ede5ae0> variable[ini_obj] assign[=] call[name[type], parameter[constant[ini], tuple[[<ast.Name object at 0x7da18ede59c0>]], call[name[dict], parameter[call[name[config].items, parameter[constant[BACpypes]]]]]]] if name[_debug] begin[:] call[name[_log].debug, parameter[constant[ - ini_obj: %r], name[ini_obj]]] call[name[setattr], parameter[name[result_args], constant[ini], name[ini_obj]]] return[name[result_args]]
keyword[def] identifier[parse_args] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[_debug] : identifier[ConfigArgumentParser] . identifier[_debug] ( literal[string] ) identifier[result_args] = identifier[ArgumentParser] . identifier[parse_args] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ) identifier[config] = identifier[_ConfigParser] () identifier[config] . identifier[read] ( identifier[result_args] . identifier[ini] ) keyword[if] identifier[_debug] : identifier[_log] . identifier[debug] ( literal[string] , identifier[config] ) keyword[if] keyword[not] identifier[config] . identifier[has_section] ( literal[string] ): keyword[raise] identifier[RuntimeError] ( literal[string] ) identifier[ini_obj] = identifier[type] ( literal[string] ,( identifier[object] ,), identifier[dict] ( identifier[config] . identifier[items] ( literal[string] ))) keyword[if] identifier[_debug] : identifier[_log] . identifier[debug] ( literal[string] , identifier[ini_obj] ) identifier[setattr] ( identifier[result_args] , literal[string] , identifier[ini_obj] ) keyword[return] identifier[result_args]
def parse_args(self, *args, **kwargs): """Parse the arguments as usual, then add default processing.""" if _debug: ConfigArgumentParser._debug('parse_args') # depends on [control=['if'], data=[]] # pass along to the parent class result_args = ArgumentParser.parse_args(self, *args, **kwargs) # read in the configuration file config = _ConfigParser() config.read(result_args.ini) if _debug: _log.debug(' - config: %r', config) # depends on [control=['if'], data=[]] # check for BACpypes section if not config.has_section('BACpypes'): raise RuntimeError('INI file with BACpypes section required') # depends on [control=['if'], data=[]] # convert the contents to an object ini_obj = type('ini', (object,), dict(config.items('BACpypes'))) if _debug: _log.debug(' - ini_obj: %r', ini_obj) # depends on [control=['if'], data=[]] # add the object to the parsed arguments setattr(result_args, 'ini', ini_obj) # return what was parsed return result_args
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'environment_id') and self.environment_id is not None: _dict['environment_id'] = self.environment_id if hasattr(self, 'session_token') and self.session_token is not None: _dict['session_token'] = self.session_token if hasattr(self, 'client_timestamp') and self.client_timestamp is not None: _dict['client_timestamp'] = datetime_to_string( self.client_timestamp) if hasattr(self, 'display_rank') and self.display_rank is not None: _dict['display_rank'] = self.display_rank if hasattr(self, 'collection_id') and self.collection_id is not None: _dict['collection_id'] = self.collection_id if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id if hasattr(self, 'query_id') and self.query_id is not None: _dict['query_id'] = self.query_id return _dict
def function[_to_dict, parameter[self]]: constant[Return a json dictionary representing this model.] variable[_dict] assign[=] dictionary[[], []] if <ast.BoolOp object at 0x7da18f09d3c0> begin[:] call[name[_dict]][constant[environment_id]] assign[=] name[self].environment_id if <ast.BoolOp object at 0x7da18f09de10> begin[:] call[name[_dict]][constant[session_token]] assign[=] name[self].session_token if <ast.BoolOp object at 0x7da18f09e0e0> begin[:] call[name[_dict]][constant[client_timestamp]] assign[=] call[name[datetime_to_string], parameter[name[self].client_timestamp]] if <ast.BoolOp object at 0x7da18f09efb0> begin[:] call[name[_dict]][constant[display_rank]] assign[=] name[self].display_rank if <ast.BoolOp object at 0x7da18f09c5b0> begin[:] call[name[_dict]][constant[collection_id]] assign[=] name[self].collection_id if <ast.BoolOp object at 0x7da18f09e740> begin[:] call[name[_dict]][constant[document_id]] assign[=] name[self].document_id if <ast.BoolOp object at 0x7da18f09ce50> begin[:] call[name[_dict]][constant[query_id]] assign[=] name[self].query_id return[name[_dict]]
keyword[def] identifier[_to_dict] ( identifier[self] ): literal[string] identifier[_dict] ={} keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[environment_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[environment_id] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[session_token] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[session_token] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[client_timestamp] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[datetime_to_string] ( identifier[self] . identifier[client_timestamp] ) keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[display_rank] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[display_rank] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[collection_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[collection_id] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[document_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[document_id] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[query_id] keyword[is] keyword[not] keyword[None] : identifier[_dict] [ literal[string] ]= identifier[self] . identifier[query_id] keyword[return] identifier[_dict]
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'environment_id') and self.environment_id is not None: _dict['environment_id'] = self.environment_id # depends on [control=['if'], data=[]] if hasattr(self, 'session_token') and self.session_token is not None: _dict['session_token'] = self.session_token # depends on [control=['if'], data=[]] if hasattr(self, 'client_timestamp') and self.client_timestamp is not None: _dict['client_timestamp'] = datetime_to_string(self.client_timestamp) # depends on [control=['if'], data=[]] if hasattr(self, 'display_rank') and self.display_rank is not None: _dict['display_rank'] = self.display_rank # depends on [control=['if'], data=[]] if hasattr(self, 'collection_id') and self.collection_id is not None: _dict['collection_id'] = self.collection_id # depends on [control=['if'], data=[]] if hasattr(self, 'document_id') and self.document_id is not None: _dict['document_id'] = self.document_id # depends on [control=['if'], data=[]] if hasattr(self, 'query_id') and self.query_id is not None: _dict['query_id'] = self.query_id # depends on [control=['if'], data=[]] return _dict
def get_fqhostname(): ''' Returns the fully qualified hostname ''' # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo( socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME ) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) if fqdn is None: fqdn = socket.getfqdn() return fqdn
def function[get_fqhostname, parameter[]]: constant[ Returns the fully qualified hostname ] variable[fqdn] assign[=] constant[None] <ast.Try object at 0x7da1b1f7b100> if compare[name[fqdn] is constant[None]] begin[:] variable[fqdn] assign[=] call[name[socket].getfqdn, parameter[]] return[name[fqdn]]
keyword[def] identifier[get_fqhostname] (): literal[string] identifier[fqdn] = keyword[None] keyword[try] : identifier[addrinfo] = identifier[socket] . identifier[getaddrinfo] ( identifier[socket] . identifier[gethostname] (), literal[int] , identifier[socket] . identifier[AF_UNSPEC] , identifier[socket] . identifier[SOCK_STREAM] , identifier[socket] . identifier[SOL_TCP] , identifier[socket] . identifier[AI_CANONNAME] ) keyword[for] identifier[info] keyword[in] identifier[addrinfo] : keyword[if] identifier[len] ( identifier[info] )> literal[int] keyword[and] identifier[info] [ literal[int] ]: identifier[fqdn] = identifier[info] [ literal[int] ] keyword[break] keyword[except] identifier[socket] . identifier[gaierror] : keyword[pass] keyword[except] identifier[socket] . identifier[error] keyword[as] identifier[err] : identifier[log] . identifier[debug] ( literal[string] , identifier[err] ) keyword[if] identifier[fqdn] keyword[is] keyword[None] : identifier[fqdn] = identifier[socket] . identifier[getfqdn] () keyword[return] identifier[fqdn]
def get_fqhostname(): """ Returns the fully qualified hostname """ # try getaddrinfo() fqdn = None try: addrinfo = socket.getaddrinfo(socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME) for info in addrinfo: # info struct [family, socktype, proto, canonname, sockaddr] # On Windows `canonname` can be an empty string # This can cause the function to return `None` if len(info) > 3 and info[3]: fqdn = info[3] break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['info']] # depends on [control=['try'], data=[]] except socket.gaierror: pass # NOTE: this used to log.error() but it was later disabled # depends on [control=['except'], data=[]] except socket.error as err: log.debug('socket.getaddrinfo() failure while finding fqdn: %s', err) # depends on [control=['except'], data=['err']] if fqdn is None: fqdn = socket.getfqdn() # depends on [control=['if'], data=['fqdn']] return fqdn
def derivative(self, x): """Return the derivative at ``x``. The derivative of the right scalar operator multiplication follows the chain rule: ``OperatorRightScalarMult(op, s).derivative(y) == OperatorLeftScalarMult(op.derivative(s * y), s)`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1]) >>> left_mul_op = OperatorRightScalarMult(operator, 3) >>> derivative = left_mul_op.derivative([0, 0, 0]) >>> derivative([1, 1, 1]) rn(3).element([ 3., 3., 3.]) """ return self.scalar * self.operator.derivative(self.scalar * x)
def function[derivative, parameter[self, x]]: constant[Return the derivative at ``x``. The derivative of the right scalar operator multiplication follows the chain rule: ``OperatorRightScalarMult(op, s).derivative(y) == OperatorLeftScalarMult(op.derivative(s * y), s)`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1]) >>> left_mul_op = OperatorRightScalarMult(operator, 3) >>> derivative = left_mul_op.derivative([0, 0, 0]) >>> derivative([1, 1, 1]) rn(3).element([ 3., 3., 3.]) ] return[binary_operation[name[self].scalar * call[name[self].operator.derivative, parameter[binary_operation[name[self].scalar * name[x]]]]]]
keyword[def] identifier[derivative] ( identifier[self] , identifier[x] ): literal[string] keyword[return] identifier[self] . identifier[scalar] * identifier[self] . identifier[operator] . identifier[derivative] ( identifier[self] . identifier[scalar] * identifier[x] )
def derivative(self, x): """Return the derivative at ``x``. The derivative of the right scalar operator multiplication follows the chain rule: ``OperatorRightScalarMult(op, s).derivative(y) == OperatorLeftScalarMult(op.derivative(s * y), s)`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Examples -------- >>> space = odl.rn(3) >>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1]) >>> left_mul_op = OperatorRightScalarMult(operator, 3) >>> derivative = left_mul_op.derivative([0, 0, 0]) >>> derivative([1, 1, 1]) rn(3).element([ 3., 3., 3.]) """ return self.scalar * self.operator.derivative(self.scalar * x)
def flush(self): """Delete all run-time data generated by this crawler.""" Queue.flush(self) Event.delete(self) Crawl.flush(self)
def function[flush, parameter[self]]: constant[Delete all run-time data generated by this crawler.] call[name[Queue].flush, parameter[name[self]]] call[name[Event].delete, parameter[name[self]]] call[name[Crawl].flush, parameter[name[self]]]
keyword[def] identifier[flush] ( identifier[self] ): literal[string] identifier[Queue] . identifier[flush] ( identifier[self] ) identifier[Event] . identifier[delete] ( identifier[self] ) identifier[Crawl] . identifier[flush] ( identifier[self] )
def flush(self): """Delete all run-time data generated by this crawler.""" Queue.flush(self) Event.delete(self) Crawl.flush(self)
def close(self): """Close the tough connection. You are allowed to close a tough connection by default and it will not complain if you close it more than once. You can disallow closing connections by setting the closeable parameter to something false. In this case, closing tough connections will be silently ignored. """ if self._closeable: self._close() elif self._transaction: self._reset()
def function[close, parameter[self]]: constant[Close the tough connection. You are allowed to close a tough connection by default and it will not complain if you close it more than once. You can disallow closing connections by setting the closeable parameter to something false. In this case, closing tough connections will be silently ignored. ] if name[self]._closeable begin[:] call[name[self]._close, parameter[]]
keyword[def] identifier[close] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_closeable] : identifier[self] . identifier[_close] () keyword[elif] identifier[self] . identifier[_transaction] : identifier[self] . identifier[_reset] ()
def close(self): """Close the tough connection. You are allowed to close a tough connection by default and it will not complain if you close it more than once. You can disallow closing connections by setting the closeable parameter to something false. In this case, closing tough connections will be silently ignored. """ if self._closeable: self._close() # depends on [control=['if'], data=[]] elif self._transaction: self._reset() # depends on [control=['if'], data=[]]