Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
4,300
BoGoEngine/bogo-python
bogo/core.py
_transform
def _transform(comps, trans): """ Transform the given string with transform type trans """ logging.debug("== In _transform(%s, %s) ==", comps, trans) components = list(comps) action, parameter = _get_action(trans) if action == _Action.ADD_MARK and \ components[2] == "" and \ mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^": action, parameter = _Action.ADD_CHAR, trans[0] if action == _Action.ADD_ACCENT: logging.debug("add_accent(%s, %s)", components, parameter) components = accent.add_accent(components, parameter) elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans): logging.debug("add_mark(%s, %s)", components, parameter) components = mark.add_mark(components, parameter) # Handle uơ in "huơ", "thuở", "quở" # If the current word has no last consonant and the first consonant # is one of "h", "th" and the vowel is "ươ" then change the vowel into # "uơ", keeping case and accent. If an alphabet character is then added # into the word then change back to "ươ". # # NOTE: In the dictionary, these are the only words having this strange # vowel so we don't need to worry about other cases. if accent.remove_accent_string(components[1]).lower() == "ươ" and \ not components[2] and components[0].lower() in ["", "h", "th", "kh"]: # Backup accents ac = accent.get_accent_string(components[1]) components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1] components = accent.add_accent(components, ac) elif action == _Action.ADD_CHAR: if trans[0] == "<": if not components[2]: # Only allow ư, ơ or ươ sitting alone in the middle part # and ['g', 'i', '']. If we want to type giowf = 'giờ', separate() # will create ['g', 'i', '']. Therefore we have to allow # components[1] == 'i'. if (components[0].lower(), components[1].lower()) == ('g', 'i'): components[0] += components[1] components[1] = '' if not components[1] or \ (components[1].lower(), trans[1].lower()) == ('ư', 'ơ'): components[1] += trans[1] else: components = utils.append_comps(components, parameter) if parameter.isalpha() and \ accent.remove_accent_string(components[1]).lower().startswith("uơ"): ac = accent.get_accent_string(components[1]) components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \ ('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:] components = accent.add_accent(components, ac) elif action == _Action.UNDO: components = _reverse(components, trans[1:]) if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()): # If there is any accent, remove and reapply it # because it is likely to be misplaced in previous transformations ac = accent.get_accent_string(components[1]) if ac != accent.Accent.NONE: components = accent.add_accent(components, Accent.NONE) components = accent.add_accent(components, ac) logging.debug("After transform: %s", components) return components
python
def _transform(comps, trans): """ Transform the given string with transform type trans """ logging.debug("== In _transform(%s, %s) ==", comps, trans) components = list(comps) action, parameter = _get_action(trans) if action == _Action.ADD_MARK and \ components[2] == "" and \ mark.strip(components[1]).lower() in ['oe', 'oa'] and trans == "o^": action, parameter = _Action.ADD_CHAR, trans[0] if action == _Action.ADD_ACCENT: logging.debug("add_accent(%s, %s)", components, parameter) components = accent.add_accent(components, parameter) elif action == _Action.ADD_MARK and mark.is_valid_mark(components, trans): logging.debug("add_mark(%s, %s)", components, parameter) components = mark.add_mark(components, parameter) # Handle uơ in "huơ", "thuở", "quở" # If the current word has no last consonant and the first consonant # is one of "h", "th" and the vowel is "ươ" then change the vowel into # "uơ", keeping case and accent. If an alphabet character is then added # into the word then change back to "ươ". # # NOTE: In the dictionary, these are the only words having this strange # vowel so we don't need to worry about other cases. if accent.remove_accent_string(components[1]).lower() == "ươ" and \ not components[2] and components[0].lower() in ["", "h", "th", "kh"]: # Backup accents ac = accent.get_accent_string(components[1]) components[1] = ("u", "U")[components[1][0].isupper()] + components[1][1] components = accent.add_accent(components, ac) elif action == _Action.ADD_CHAR: if trans[0] == "<": if not components[2]: # Only allow ư, ơ or ươ sitting alone in the middle part # and ['g', 'i', '']. If we want to type giowf = 'giờ', separate() # will create ['g', 'i', '']. Therefore we have to allow # components[1] == 'i'. if (components[0].lower(), components[1].lower()) == ('g', 'i'): components[0] += components[1] components[1] = '' if not components[1] or \ (components[1].lower(), trans[1].lower()) == ('ư', 'ơ'): components[1] += trans[1] else: components = utils.append_comps(components, parameter) if parameter.isalpha() and \ accent.remove_accent_string(components[1]).lower().startswith("uơ"): ac = accent.get_accent_string(components[1]) components[1] = ('ư', 'Ư')[components[1][0].isupper()] + \ ('ơ', 'Ơ')[components[1][1].isupper()] + components[1][2:] components = accent.add_accent(components, ac) elif action == _Action.UNDO: components = _reverse(components, trans[1:]) if action == _Action.ADD_MARK or (action == _Action.ADD_CHAR and parameter.isalpha()): # If there is any accent, remove and reapply it # because it is likely to be misplaced in previous transformations ac = accent.get_accent_string(components[1]) if ac != accent.Accent.NONE: components = accent.add_accent(components, Accent.NONE) components = accent.add_accent(components, ac) logging.debug("After transform: %s", components) return components
['def', '_transform', '(', 'comps', ',', 'trans', ')', ':', 'logging', '.', 'debug', '(', '"== In _transform(%s, %s) =="', ',', 'comps', ',', 'trans', ')', 'components', '=', 'list', '(', 'comps', ')', 'action', ',', 'parameter', '=', '_get_action', '(', 'trans', ')', 'if', 'action', '==', '_Action', '.', 'ADD_MARK', 'and', 'components', '[', '2', ']', '==', '""', 'and', 'mark', '.', 'strip', '(', 'components', '[', '1', ']', ')', '.', 'lower', '(', ')', 'in', '[', "'oe'", ',', "'oa'", ']', 'and', 'trans', '==', '"o^"', ':', 'action', ',', 'parameter', '=', '_Action', '.', 'ADD_CHAR', ',', 'trans', '[', '0', ']', 'if', 'action', '==', '_Action', '.', 'ADD_ACCENT', ':', 'logging', '.', 'debug', '(', '"add_accent(%s, %s)"', ',', 'components', ',', 'parameter', ')', 'components', '=', 'accent', '.', 'add_accent', '(', 'components', ',', 'parameter', ')', 'elif', 'action', '==', '_Action', '.', 'ADD_MARK', 'and', 'mark', '.', 'is_valid_mark', '(', 'components', ',', 'trans', ')', ':', 'logging', '.', 'debug', '(', '"add_mark(%s, %s)"', ',', 'components', ',', 'parameter', ')', 'components', '=', 'mark', '.', 'add_mark', '(', 'components', ',', 'parameter', ')', '# Handle uơ in "huơ", "thuở", "quở"', '# If the current word has no last consonant and the first consonant', '# is one of "h", "th" and the vowel is "ươ" then change the vowel into', '# "uơ", keeping case and accent. If an alphabet character is then added', '# into the word then change back to "ươ".', '#', '# NOTE: In the dictionary, these are the only words having this strange', "# vowel so we don't need to worry about other cases.", 'if', 'accent', '.', 'remove_accent_string', '(', 'components', '[', '1', ']', ')', '.', 'lower', '(', ')', '==', '"ươ" a', 'd \\', 'not', 'components', '[', '2', ']', 'and', 'components', '[', '0', ']', '.', 'lower', '(', ')', 'in', '[', '""', ',', '"h"', ',', '"th"', ',', '"kh"', ']', ':', '# Backup accents', 'ac', '=', 'accent', '.', 'get_accent_string', '(', 'components', '[', '1', ']', ')', 'components', '[', '1', ']', '=', '(', '"u"', ',', '"U"', ')', '[', 'components', '[', '1', ']', '[', '0', ']', '.', 'isupper', '(', ')', ']', '+', 'components', '[', '1', ']', '[', '1', ']', 'components', '=', 'accent', '.', 'add_accent', '(', 'components', ',', 'ac', ')', 'elif', 'action', '==', '_Action', '.', 'ADD_CHAR', ':', 'if', 'trans', '[', '0', ']', '==', '"<"', ':', 'if', 'not', 'components', '[', '2', ']', ':', '# Only allow ư, ơ or ươ sitting alone in the middle part', "# and ['g', 'i', '']. If we want to type giowf = 'giờ', separate()", "# will create ['g', 'i', '']. Therefore we have to allow", "# components[1] == 'i'.", 'if', '(', 'components', '[', '0', ']', '.', 'lower', '(', ')', ',', 'components', '[', '1', ']', '.', 'lower', '(', ')', ')', '==', '(', "'g'", ',', "'i'", ')', ':', 'components', '[', '0', ']', '+=', 'components', '[', '1', ']', 'components', '[', '1', ']', '=', "''", 'if', 'not', 'components', '[', '1', ']', 'or', '(', 'components', '[', '1', ']', '.', 'lower', '(', ')', ',', 'trans', '[', '1', ']', '.', 'lower', '(', ')', ')', '==', '(', "'ư',", ' ', "ơ'):", '', '', 'components', '[', '1', ']', '+=', 'trans', '[', '1', ']', 'else', ':', 'components', '=', 'utils', '.', 'append_comps', '(', 'components', ',', 'parameter', ')', 'if', 'parameter', '.', 'isalpha', '(', ')', 'and', 'accent', '.', 'remove_accent_string', '(', 'components', '[', '1', ']', ')', '.', 'lower', '(', ')', '.', 'startswith', '(', '"uơ")', ':', '', 'ac', '=', 'accent', '.', 'get_accent_string', '(', 'components', '[', '1', ']', ')', 'components', '[', '1', ']', '=', '(', "'ư',", ' ', "Ư')[", 'c', 'o', 'mponents[1', ']', '[', '0', ']', '.', 'i', 's', 'upper()', ']', ' ', '+', '\\', '(', "'ơ',", ' ', "Ơ')[", 'c', 'o', 'mponents[1', ']', '[', '1', ']', '.', 'i', 's', 'upper()', ']', ' ', '+', 'c', 'mponents[1', ']', '[', '2', ':', ']', '', '', 'components', '=', 'accent', '.', 'add_accent', '(', 'components', ',', 'ac', ')', 'elif', 'action', '==', '_Action', '.', 'UNDO', ':', 'components', '=', '_reverse', '(', 'components', ',', 'trans', '[', '1', ':', ']', ')', 'if', 'action', '==', '_Action', '.', 'ADD_MARK', 'or', '(', 'action', '==', '_Action', '.', 'ADD_CHAR', 'and', 'parameter', '.', 'isalpha', '(', ')', ')', ':', '# If there is any accent, remove and reapply it', '# because it is likely to be misplaced in previous transformations', 'ac', '=', 'accent', '.', 'get_accent_string', '(', 'components', '[', '1', ']', ')', 'if', 'ac', '!=', 'accent', '.', 'Accent', '.', 'NONE', ':', 'components', '=', 'accent', '.', 'add_accent', '(', 'components', ',', 'Accent', '.', 'NONE', ')', 'components', '=', 'accent', '.', 'add_accent', '(', 'components', ',', 'ac', ')', 'logging', '.', 'debug', '(', '"After transform: %s"', ',', 'components', ')', 'return', 'components']
Transform the given string with transform type trans
['Transform', 'the', 'given', 'string', 'with', 'transform', 'type', 'trans']
train
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L365-L434
4,301
inveniosoftware/invenio-access
invenio_access/alembic/2069a982633b_add_on_delete_cascade_constraint.py
upgrade
def upgrade(): """Upgrade database.""" op.create_index(op.f('ix_access_actionsroles_role_id'), 'access_actionsroles', ['role_id'], unique=False) op.drop_constraint(u'fk_access_actionsroles_role_id_accounts_role', 'access_actionsroles', type_='foreignkey') op.create_foreign_key(op.f('fk_access_actionsroles_role_id_accounts_role'), 'access_actionsroles', 'accounts_role', ['role_id'], ['id'], ondelete='CASCADE') op.create_index(op.f('ix_access_actionsusers_user_id'), 'access_actionsusers', ['user_id'], unique=False) op.drop_constraint(u'fk_access_actionsusers_user_id_accounts_user', 'access_actionsusers', type_='foreignkey') op.create_foreign_key(op.f('fk_access_actionsusers_user_id_accounts_user'), 'access_actionsusers', 'accounts_user', ['user_id'], ['id'], ondelete='CASCADE')
python
def upgrade(): """Upgrade database.""" op.create_index(op.f('ix_access_actionsroles_role_id'), 'access_actionsroles', ['role_id'], unique=False) op.drop_constraint(u'fk_access_actionsroles_role_id_accounts_role', 'access_actionsroles', type_='foreignkey') op.create_foreign_key(op.f('fk_access_actionsroles_role_id_accounts_role'), 'access_actionsroles', 'accounts_role', ['role_id'], ['id'], ondelete='CASCADE') op.create_index(op.f('ix_access_actionsusers_user_id'), 'access_actionsusers', ['user_id'], unique=False) op.drop_constraint(u'fk_access_actionsusers_user_id_accounts_user', 'access_actionsusers', type_='foreignkey') op.create_foreign_key(op.f('fk_access_actionsusers_user_id_accounts_user'), 'access_actionsusers', 'accounts_user', ['user_id'], ['id'], ondelete='CASCADE')
['def', 'upgrade', '(', ')', ':', 'op', '.', 'create_index', '(', 'op', '.', 'f', '(', "'ix_access_actionsroles_role_id'", ')', ',', "'access_actionsroles'", ',', '[', "'role_id'", ']', ',', 'unique', '=', 'False', ')', 'op', '.', 'drop_constraint', '(', "u'fk_access_actionsroles_role_id_accounts_role'", ',', "'access_actionsroles'", ',', 'type_', '=', "'foreignkey'", ')', 'op', '.', 'create_foreign_key', '(', 'op', '.', 'f', '(', "'fk_access_actionsroles_role_id_accounts_role'", ')', ',', "'access_actionsroles'", ',', "'accounts_role'", ',', '[', "'role_id'", ']', ',', '[', "'id'", ']', ',', 'ondelete', '=', "'CASCADE'", ')', 'op', '.', 'create_index', '(', 'op', '.', 'f', '(', "'ix_access_actionsusers_user_id'", ')', ',', "'access_actionsusers'", ',', '[', "'user_id'", ']', ',', 'unique', '=', 'False', ')', 'op', '.', 'drop_constraint', '(', "u'fk_access_actionsusers_user_id_accounts_user'", ',', "'access_actionsusers'", ',', 'type_', '=', "'foreignkey'", ')', 'op', '.', 'create_foreign_key', '(', 'op', '.', 'f', '(', "'fk_access_actionsusers_user_id_accounts_user'", ')', ',', "'access_actionsusers'", ',', "'accounts_user'", ',', '[', "'user_id'", ']', ',', '[', "'id'", ']', ',', 'ondelete', '=', "'CASCADE'", ')']
Upgrade database.
['Upgrade', 'database', '.']
train
https://github.com/inveniosoftware/invenio-access/blob/3b033a4bdc110eb2f7e9f08f0744a780884bfc80/invenio_access/alembic/2069a982633b_add_on_delete_cascade_constraint.py#L20-L35
4,302
delph-in/pydelphin
delphin/tdl.py
Conjunction.features
def features(self, expand=False): """Return the list of feature-value pairs in the conjunction.""" featvals = [] for term in self._terms: if isinstance(term, AVM): featvals.extend(term.features(expand=expand)) return featvals
python
def features(self, expand=False): """Return the list of feature-value pairs in the conjunction.""" featvals = [] for term in self._terms: if isinstance(term, AVM): featvals.extend(term.features(expand=expand)) return featvals
['def', 'features', '(', 'self', ',', 'expand', '=', 'False', ')', ':', 'featvals', '=', '[', ']', 'for', 'term', 'in', 'self', '.', '_terms', ':', 'if', 'isinstance', '(', 'term', ',', 'AVM', ')', ':', 'featvals', '.', 'extend', '(', 'term', '.', 'features', '(', 'expand', '=', 'expand', ')', ')', 'return', 'featvals']
Return the list of feature-value pairs in the conjunction.
['Return', 'the', 'list', 'of', 'feature', '-', 'value', 'pairs', 'in', 'the', 'conjunction', '.']
train
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/tdl.py#L599-L605
4,303
ziwenxie/netease-dl
netease/weapi.py
Crawler.search_user
def search_user(self, user_name, quiet=False, limit=9): """Search user by user name. :params user_name: user name. :params quiet: automatically select the best one. :params limit: user count returned by weapi. :return: a User object. """ result = self.search(user_name, search_type=1002, limit=limit) if result['result']['userprofileCount'] <= 0: LOG.warning('User %s not existed!', user_name) raise SearchNotFound('user {} not existed'.format(user_name)) else: users = result['result']['userprofiles'] if quiet: user_id, user_name = users[0]['userId'], users[0]['nickname'] user = User(user_id, user_name) return user else: return self.display.select_one_user(users)
python
def search_user(self, user_name, quiet=False, limit=9): """Search user by user name. :params user_name: user name. :params quiet: automatically select the best one. :params limit: user count returned by weapi. :return: a User object. """ result = self.search(user_name, search_type=1002, limit=limit) if result['result']['userprofileCount'] <= 0: LOG.warning('User %s not existed!', user_name) raise SearchNotFound('user {} not existed'.format(user_name)) else: users = result['result']['userprofiles'] if quiet: user_id, user_name = users[0]['userId'], users[0]['nickname'] user = User(user_id, user_name) return user else: return self.display.select_one_user(users)
['def', 'search_user', '(', 'self', ',', 'user_name', ',', 'quiet', '=', 'False', ',', 'limit', '=', '9', ')', ':', 'result', '=', 'self', '.', 'search', '(', 'user_name', ',', 'search_type', '=', '1002', ',', 'limit', '=', 'limit', ')', 'if', 'result', '[', "'result'", ']', '[', "'userprofileCount'", ']', '<=', '0', ':', 'LOG', '.', 'warning', '(', "'User %s not existed!'", ',', 'user_name', ')', 'raise', 'SearchNotFound', '(', "'user {} not existed'", '.', 'format', '(', 'user_name', ')', ')', 'else', ':', 'users', '=', 'result', '[', "'result'", ']', '[', "'userprofiles'", ']', 'if', 'quiet', ':', 'user_id', ',', 'user_name', '=', 'users', '[', '0', ']', '[', "'userId'", ']', ',', 'users', '[', '0', ']', '[', "'nickname'", ']', 'user', '=', 'User', '(', 'user_id', ',', 'user_name', ')', 'return', 'user', 'else', ':', 'return', 'self', '.', 'display', '.', 'select_one_user', '(', 'users', ')']
Search user by user name. :params user_name: user name. :params quiet: automatically select the best one. :params limit: user count returned by weapi. :return: a User object.
['Search', 'user', 'by', 'user', 'name', '.']
train
https://github.com/ziwenxie/netease-dl/blob/84b226fc07b10f7f66580f0fc69f10356f66b5c3/netease/weapi.py#L210-L231
4,304
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_misc.py
MiscModule.cmd_alt
def cmd_alt(self, args): '''show altitude''' print("Altitude: %.1f" % self.status.altitude) qnh_pressure = self.get_mav_param('AFS_QNH_PRESSURE', None) if qnh_pressure is not None and qnh_pressure > 0: ground_temp = self.get_mav_param('GND_TEMP', 21) pressure = self.master.field('SCALED_PRESSURE', 'press_abs', 0) qnh_alt = self.altitude_difference(qnh_pressure, pressure, ground_temp) print("QNH Alt: %u meters %u feet for QNH pressure %.1f" % (qnh_alt, qnh_alt*3.2808, qnh_pressure)) print("QNH Estimate: %.1f millibars" % self.qnh_estimate())
python
def cmd_alt(self, args): '''show altitude''' print("Altitude: %.1f" % self.status.altitude) qnh_pressure = self.get_mav_param('AFS_QNH_PRESSURE', None) if qnh_pressure is not None and qnh_pressure > 0: ground_temp = self.get_mav_param('GND_TEMP', 21) pressure = self.master.field('SCALED_PRESSURE', 'press_abs', 0) qnh_alt = self.altitude_difference(qnh_pressure, pressure, ground_temp) print("QNH Alt: %u meters %u feet for QNH pressure %.1f" % (qnh_alt, qnh_alt*3.2808, qnh_pressure)) print("QNH Estimate: %.1f millibars" % self.qnh_estimate())
['def', 'cmd_alt', '(', 'self', ',', 'args', ')', ':', 'print', '(', '"Altitude: %.1f"', '%', 'self', '.', 'status', '.', 'altitude', ')', 'qnh_pressure', '=', 'self', '.', 'get_mav_param', '(', "'AFS_QNH_PRESSURE'", ',', 'None', ')', 'if', 'qnh_pressure', 'is', 'not', 'None', 'and', 'qnh_pressure', '>', '0', ':', 'ground_temp', '=', 'self', '.', 'get_mav_param', '(', "'GND_TEMP'", ',', '21', ')', 'pressure', '=', 'self', '.', 'master', '.', 'field', '(', "'SCALED_PRESSURE'", ',', "'press_abs'", ',', '0', ')', 'qnh_alt', '=', 'self', '.', 'altitude_difference', '(', 'qnh_pressure', ',', 'pressure', ',', 'ground_temp', ')', 'print', '(', '"QNH Alt: %u meters %u feet for QNH pressure %.1f"', '%', '(', 'qnh_alt', ',', 'qnh_alt', '*', '3.2808', ',', 'qnh_pressure', ')', ')', 'print', '(', '"QNH Estimate: %.1f millibars"', '%', 'self', '.', 'qnh_estimate', '(', ')', ')']
show altitude
['show', 'altitude']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_misc.py#L92-L101
4,305
litaotao/IPython-Dashboard
dashboard/server/resources/sql.py
SqlData.post
def post(self): '''return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result. ''' ## format sql data = request.get_json() options, sql_raw = data.get('options'), data.get('sql_raw') if options == 'format': sql_formmated = sqlparse.format(sql_raw, keyword_case='upper', reindent=True) return build_response(dict(data=sql_formmated, code=200)) elif options in ('all', 'selected'): conn = SQL(config.sql_host, config.sql_port, config.sql_user, config.sql_pwd, config.sql_db) result = conn.run(sql_raw) return build_response(dict(data=result, code=200)) else: pass pass
python
def post(self): '''return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result. ''' ## format sql data = request.get_json() options, sql_raw = data.get('options'), data.get('sql_raw') if options == 'format': sql_formmated = sqlparse.format(sql_raw, keyword_case='upper', reindent=True) return build_response(dict(data=sql_formmated, code=200)) elif options in ('all', 'selected'): conn = SQL(config.sql_host, config.sql_port, config.sql_user, config.sql_pwd, config.sql_db) result = conn.run(sql_raw) return build_response(dict(data=result, code=200)) else: pass pass
['def', 'post', '(', 'self', ')', ':', '## format sql', 'data', '=', 'request', '.', 'get_json', '(', ')', 'options', ',', 'sql_raw', '=', 'data', '.', 'get', '(', "'options'", ')', ',', 'data', '.', 'get', '(', "'sql_raw'", ')', 'if', 'options', '==', "'format'", ':', 'sql_formmated', '=', 'sqlparse', '.', 'format', '(', 'sql_raw', ',', 'keyword_case', '=', "'upper'", ',', 'reindent', '=', 'True', ')', 'return', 'build_response', '(', 'dict', '(', 'data', '=', 'sql_formmated', ',', 'code', '=', '200', ')', ')', 'elif', 'options', 'in', '(', "'all'", ',', "'selected'", ')', ':', 'conn', '=', 'SQL', '(', 'config', '.', 'sql_host', ',', 'config', '.', 'sql_port', ',', 'config', '.', 'sql_user', ',', 'config', '.', 'sql_pwd', ',', 'config', '.', 'sql_db', ')', 'result', '=', 'conn', '.', 'run', '(', 'sql_raw', ')', 'return', 'build_response', '(', 'dict', '(', 'data', '=', 'result', ',', 'code', '=', '200', ')', ')', 'else', ':', 'pass', 'pass']
return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result.
['return', 'executed', 'sql', 'result', 'to', 'client', '.']
train
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/sql.py#L40-L75
4,306
openstack/proliantutils
proliantutils/redfish/resources/system/iscsi.py
ISCSIResource.iscsi_settings
def iscsi_settings(self): """Property to provide reference to iSCSI settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return ISCSISettings( self._conn, utils.get_subresource_path_by( self, ["@Redfish.Settings", "SettingsObject"]), redfish_version=self.redfish_version)
python
def iscsi_settings(self): """Property to provide reference to iSCSI settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset. """ return ISCSISettings( self._conn, utils.get_subresource_path_by( self, ["@Redfish.Settings", "SettingsObject"]), redfish_version=self.redfish_version)
['def', 'iscsi_settings', '(', 'self', ')', ':', 'return', 'ISCSISettings', '(', 'self', '.', '_conn', ',', 'utils', '.', 'get_subresource_path_by', '(', 'self', ',', '[', '"@Redfish.Settings"', ',', '"SettingsObject"', ']', ')', ',', 'redfish_version', '=', 'self', '.', 'redfish_version', ')']
Property to provide reference to iSCSI settings instance It is calculated once when the first time it is queried. On refresh, this property gets reset.
['Property', 'to', 'provide', 'reference', 'to', 'iSCSI', 'settings', 'instance']
train
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/iscsi.py#L42-L51
4,307
quora/qcore
qcore/asserts.py
assert_is_instance
def assert_is_instance(value, types, message=None, extra=None): """Raises an AssertionError if value is not an instance of type(s).""" assert isinstance(value, types), _assert_fail_message( message, value, types, "is not an instance of", extra )
python
def assert_is_instance(value, types, message=None, extra=None): """Raises an AssertionError if value is not an instance of type(s).""" assert isinstance(value, types), _assert_fail_message( message, value, types, "is not an instance of", extra )
['def', 'assert_is_instance', '(', 'value', ',', 'types', ',', 'message', '=', 'None', ',', 'extra', '=', 'None', ')', ':', 'assert', 'isinstance', '(', 'value', ',', 'types', ')', ',', '_assert_fail_message', '(', 'message', ',', 'value', ',', 'types', ',', '"is not an instance of"', ',', 'extra', ')']
Raises an AssertionError if value is not an instance of type(s).
['Raises', 'an', 'AssertionError', 'if', 'value', 'is', 'not', 'an', 'instance', 'of', 'type', '(', 's', ')', '.']
train
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/asserts.py#L97-L101
4,308
python-gitlab/python-gitlab
gitlab/__init__.py
Gitlab.auth
def auth(self): """Performs an authentication. Uses either the private token, or the email/password pair. The `user` attribute will hold a `gitlab.objects.CurrentUser` object on success. """ if self.private_token or self.oauth_token: self._token_auth() else: self._credentials_auth()
python
def auth(self): """Performs an authentication. Uses either the private token, or the email/password pair. The `user` attribute will hold a `gitlab.objects.CurrentUser` object on success. """ if self.private_token or self.oauth_token: self._token_auth() else: self._credentials_auth()
['def', 'auth', '(', 'self', ')', ':', 'if', 'self', '.', 'private_token', 'or', 'self', '.', 'oauth_token', ':', 'self', '.', '_token_auth', '(', ')', 'else', ':', 'self', '.', '_credentials_auth', '(', ')']
Performs an authentication. Uses either the private token, or the email/password pair. The `user` attribute will hold a `gitlab.objects.CurrentUser` object on success.
['Performs', 'an', 'authentication', '.']
train
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/__init__.py#L192-L203
4,309
materialsproject/pymatgen
pymatgen/analysis/wulff.py
WulffShape.get_line_in_facet
def get_line_in_facet(self, facet): """ Returns the sorted pts in a facet used to draw a line """ lines = list(facet.outer_lines) pt = [] prev = None while len(lines) > 0: if prev is None: l = lines.pop(0) else: for i, l in enumerate(lines): if prev in l: l = lines.pop(i) if l[1] == prev: l.reverse() break # make sure the lines are connected one by one. # find the way covering all pts and facets pt.append(self.wulff_pt_list[l[0]].tolist()) pt.append(self.wulff_pt_list[l[1]].tolist()) prev = l[1] return pt
python
def get_line_in_facet(self, facet): """ Returns the sorted pts in a facet used to draw a line """ lines = list(facet.outer_lines) pt = [] prev = None while len(lines) > 0: if prev is None: l = lines.pop(0) else: for i, l in enumerate(lines): if prev in l: l = lines.pop(i) if l[1] == prev: l.reverse() break # make sure the lines are connected one by one. # find the way covering all pts and facets pt.append(self.wulff_pt_list[l[0]].tolist()) pt.append(self.wulff_pt_list[l[1]].tolist()) prev = l[1] return pt
['def', 'get_line_in_facet', '(', 'self', ',', 'facet', ')', ':', 'lines', '=', 'list', '(', 'facet', '.', 'outer_lines', ')', 'pt', '=', '[', ']', 'prev', '=', 'None', 'while', 'len', '(', 'lines', ')', '>', '0', ':', 'if', 'prev', 'is', 'None', ':', 'l', '=', 'lines', '.', 'pop', '(', '0', ')', 'else', ':', 'for', 'i', ',', 'l', 'in', 'enumerate', '(', 'lines', ')', ':', 'if', 'prev', 'in', 'l', ':', 'l', '=', 'lines', '.', 'pop', '(', 'i', ')', 'if', 'l', '[', '1', ']', '==', 'prev', ':', 'l', '.', 'reverse', '(', ')', 'break', '# make sure the lines are connected one by one.', '# find the way covering all pts and facets', 'pt', '.', 'append', '(', 'self', '.', 'wulff_pt_list', '[', 'l', '[', '0', ']', ']', '.', 'tolist', '(', ')', ')', 'pt', '.', 'append', '(', 'self', '.', 'wulff_pt_list', '[', 'l', '[', '1', ']', ']', '.', 'tolist', '(', ')', ')', 'prev', '=', 'l', '[', '1', ']', 'return', 'pt']
Returns the sorted pts in a facet used to draw a line
['Returns', 'the', 'sorted', 'pts', 'in', 'a', 'facet', 'used', 'to', 'draw', 'a', 'line']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/wulff.py#L353-L377
4,310
pjuren/pyokit
src/pyokit/io/genomeAlignment.py
__split_genomic_interval_filename
def __split_genomic_interval_filename(fn): """ Split a filename of the format chrom:start-end.ext or chrom.ext (full chrom). :return: tuple of (chrom, start, end) -- 'start' and 'end' are None if not present in the filename. """ if fn is None or fn == "": raise ValueError("invalid filename: " + str(fn)) fn = ".".join(fn.split(".")[:-1]) parts = fn.split(":") if len(parts) == 1: return (parts[0].strip(), None, None) else: r_parts = parts[1].split("-") if len(r_parts) != 2: raise ValueError("Invalid filename: " + str(fn)) return (parts[0].strip(), int(r_parts[0]), int(r_parts[1]))
python
def __split_genomic_interval_filename(fn): """ Split a filename of the format chrom:start-end.ext or chrom.ext (full chrom). :return: tuple of (chrom, start, end) -- 'start' and 'end' are None if not present in the filename. """ if fn is None or fn == "": raise ValueError("invalid filename: " + str(fn)) fn = ".".join(fn.split(".")[:-1]) parts = fn.split(":") if len(parts) == 1: return (parts[0].strip(), None, None) else: r_parts = parts[1].split("-") if len(r_parts) != 2: raise ValueError("Invalid filename: " + str(fn)) return (parts[0].strip(), int(r_parts[0]), int(r_parts[1]))
['def', '__split_genomic_interval_filename', '(', 'fn', ')', ':', 'if', 'fn', 'is', 'None', 'or', 'fn', '==', '""', ':', 'raise', 'ValueError', '(', '"invalid filename: "', '+', 'str', '(', 'fn', ')', ')', 'fn', '=', '"."', '.', 'join', '(', 'fn', '.', 'split', '(', '"."', ')', '[', ':', '-', '1', ']', ')', 'parts', '=', 'fn', '.', 'split', '(', '":"', ')', 'if', 'len', '(', 'parts', ')', '==', '1', ':', 'return', '(', 'parts', '[', '0', ']', '.', 'strip', '(', ')', ',', 'None', ',', 'None', ')', 'else', ':', 'r_parts', '=', 'parts', '[', '1', ']', '.', 'split', '(', '"-"', ')', 'if', 'len', '(', 'r_parts', ')', '!=', '2', ':', 'raise', 'ValueError', '(', '"Invalid filename: "', '+', 'str', '(', 'fn', ')', ')', 'return', '(', 'parts', '[', '0', ']', '.', 'strip', '(', ')', ',', 'int', '(', 'r_parts', '[', '0', ']', ')', ',', 'int', '(', 'r_parts', '[', '1', ']', ')', ')']
Split a filename of the format chrom:start-end.ext or chrom.ext (full chrom). :return: tuple of (chrom, start, end) -- 'start' and 'end' are None if not present in the filename.
['Split', 'a', 'filename', 'of', 'the', 'format', 'chrom', ':', 'start', '-', 'end', '.', 'ext', 'or', 'chrom', '.', 'ext', '(', 'full', 'chrom', ')', '.']
train
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/io/genomeAlignment.py#L75-L92
4,311
NoneGG/aredis
aredis/commands/hash.py
HashCommandMixin.hincrbyfloat
async def hincrbyfloat(self, name, key, amount=1.0): """ Increment the value of ``key`` in hash ``name`` by floating ``amount`` """ return await self.execute_command('HINCRBYFLOAT', name, key, amount)
python
async def hincrbyfloat(self, name, key, amount=1.0): """ Increment the value of ``key`` in hash ``name`` by floating ``amount`` """ return await self.execute_command('HINCRBYFLOAT', name, key, amount)
['async', 'def', 'hincrbyfloat', '(', 'self', ',', 'name', ',', 'key', ',', 'amount', '=', '1.0', ')', ':', 'return', 'await', 'self', '.', 'execute_command', '(', "'HINCRBYFLOAT'", ',', 'name', ',', 'key', ',', 'amount', ')']
Increment the value of ``key`` in hash ``name`` by floating ``amount``
['Increment', 'the', 'value', 'of', 'key', 'in', 'hash', 'name', 'by', 'floating', 'amount']
train
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/commands/hash.py#L47-L51
4,312
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
transformer_wikitext103_l4k_v0
def transformer_wikitext103_l4k_v0(): """HParams for training languagemodel_wikitext103_l4k.""" hparams = transformer_big() # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.num_heads = 4 hparams.max_length = 4096 hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.num_hidden_layers = 8 hparams.attention_dropout = 0.1 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.1 hparams.label_smoothing = 0.0 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 return hparams
python
def transformer_wikitext103_l4k_v0(): """HParams for training languagemodel_wikitext103_l4k.""" hparams = transformer_big() # Adafactor uses less memory than Adam. # switch to Adafactor with its recommended learning rate scheme. hparams.optimizer = "Adafactor" hparams.learning_rate_schedule = "rsqrt_decay" hparams.learning_rate_warmup_steps = 10000 hparams.num_heads = 4 hparams.max_length = 4096 hparams.batch_size = 4096 hparams.shared_embedding_and_softmax_weights = False hparams.num_hidden_layers = 8 hparams.attention_dropout = 0.1 hparams.layer_prepostprocess_dropout = 0.2 hparams.relu_dropout = 0.1 hparams.label_smoothing = 0.0 # Using noise broadcast in the dropout layers saves memory during training. hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads hparams.relu_dropout_broadcast_dims = "1" # length hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length # Avoid an expensive concat on TPU. # >1 shards helps with faster parameter distribution on multi-GPU machines hparams.symbol_modality_num_shards = 1 return hparams
['def', 'transformer_wikitext103_l4k_v0', '(', ')', ':', 'hparams', '=', 'transformer_big', '(', ')', '# Adafactor uses less memory than Adam.', '# switch to Adafactor with its recommended learning rate scheme.', 'hparams', '.', 'optimizer', '=', '"Adafactor"', 'hparams', '.', 'learning_rate_schedule', '=', '"rsqrt_decay"', 'hparams', '.', 'learning_rate_warmup_steps', '=', '10000', 'hparams', '.', 'num_heads', '=', '4', 'hparams', '.', 'max_length', '=', '4096', 'hparams', '.', 'batch_size', '=', '4096', 'hparams', '.', 'shared_embedding_and_softmax_weights', '=', 'False', 'hparams', '.', 'num_hidden_layers', '=', '8', 'hparams', '.', 'attention_dropout', '=', '0.1', 'hparams', '.', 'layer_prepostprocess_dropout', '=', '0.2', 'hparams', '.', 'relu_dropout', '=', '0.1', 'hparams', '.', 'label_smoothing', '=', '0.0', '# Using noise broadcast in the dropout layers saves memory during training.', 'hparams', '.', 'attention_dropout_broadcast_dims', '=', '"0,1"', '# batch, heads', 'hparams', '.', 'relu_dropout_broadcast_dims', '=', '"1"', '# length', 'hparams', '.', 'layer_prepostprocess_dropout_broadcast_dims', '=', '"1"', '# length', '# Avoid an expensive concat on TPU.', '# >1 shards helps with faster parameter distribution on multi-GPU machines', 'hparams', '.', 'symbol_modality_num_shards', '=', '1', 'return', 'hparams']
HParams for training languagemodel_wikitext103_l4k.
['HParams', 'for', 'training', 'languagemodel_wikitext103_l4k', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L2615-L2645
4,313
programa-stic/barf-project
barf/analysis/symbolic/emulator.py
ReilSymbolicEmulator.__process_instr
def __process_instr(self, instr, avoid, next_addr, initial_state, execution_state, trace_current): """Process a REIL instruction. Args: instr (ReilInstruction): Instruction to process. avoid (list): List of addresses to avoid while executing the code. next_addr (int): Address of the following instruction. initial_state (State): Initial execution state. execution_state (Queue): Queue of execution states. trace_current (list): Current trace. Returns: int: Returns the next address to execute. """ # Process branch (JCC oprnd0, empty, oprnd2). if instr.mnemonic == ReilMnemonic.JCC: not_taken_addr = next_addr address, index = split_address(instr.address) logger.debug("[+] Processing branch: {:#08x}:{:02x} : {}".format(address, index, instr)) # Process conditional branch (oprnd0 is a REGISTER). if isinstance(instr.operands[0], ReilRegisterOperand): next_ip = self.__process_branch_cond(instr, avoid, initial_state, execution_state, trace_current, not_taken_addr) # Process unconditional branch (oprnd0 is an INTEGER). else: next_ip = self.__process_branch_uncond(instr, trace_current, not_taken_addr) # Process the rest of the instructions. else: trace_current += [(instr, None)] self.__cpu.execute(instr) next_ip = next_addr return next_ip
python
def __process_instr(self, instr, avoid, next_addr, initial_state, execution_state, trace_current): """Process a REIL instruction. Args: instr (ReilInstruction): Instruction to process. avoid (list): List of addresses to avoid while executing the code. next_addr (int): Address of the following instruction. initial_state (State): Initial execution state. execution_state (Queue): Queue of execution states. trace_current (list): Current trace. Returns: int: Returns the next address to execute. """ # Process branch (JCC oprnd0, empty, oprnd2). if instr.mnemonic == ReilMnemonic.JCC: not_taken_addr = next_addr address, index = split_address(instr.address) logger.debug("[+] Processing branch: {:#08x}:{:02x} : {}".format(address, index, instr)) # Process conditional branch (oprnd0 is a REGISTER). if isinstance(instr.operands[0], ReilRegisterOperand): next_ip = self.__process_branch_cond(instr, avoid, initial_state, execution_state, trace_current, not_taken_addr) # Process unconditional branch (oprnd0 is an INTEGER). else: next_ip = self.__process_branch_uncond(instr, trace_current, not_taken_addr) # Process the rest of the instructions. else: trace_current += [(instr, None)] self.__cpu.execute(instr) next_ip = next_addr return next_ip
['def', '__process_instr', '(', 'self', ',', 'instr', ',', 'avoid', ',', 'next_addr', ',', 'initial_state', ',', 'execution_state', ',', 'trace_current', ')', ':', '# Process branch (JCC oprnd0, empty, oprnd2).', 'if', 'instr', '.', 'mnemonic', '==', 'ReilMnemonic', '.', 'JCC', ':', 'not_taken_addr', '=', 'next_addr', 'address', ',', 'index', '=', 'split_address', '(', 'instr', '.', 'address', ')', 'logger', '.', 'debug', '(', '"[+] Processing branch: {:#08x}:{:02x} : {}"', '.', 'format', '(', 'address', ',', 'index', ',', 'instr', ')', ')', '# Process conditional branch (oprnd0 is a REGISTER).', 'if', 'isinstance', '(', 'instr', '.', 'operands', '[', '0', ']', ',', 'ReilRegisterOperand', ')', ':', 'next_ip', '=', 'self', '.', '__process_branch_cond', '(', 'instr', ',', 'avoid', ',', 'initial_state', ',', 'execution_state', ',', 'trace_current', ',', 'not_taken_addr', ')', '# Process unconditional branch (oprnd0 is an INTEGER).', 'else', ':', 'next_ip', '=', 'self', '.', '__process_branch_uncond', '(', 'instr', ',', 'trace_current', ',', 'not_taken_addr', ')', '# Process the rest of the instructions.', 'else', ':', 'trace_current', '+=', '[', '(', 'instr', ',', 'None', ')', ']', 'self', '.', '__cpu', '.', 'execute', '(', 'instr', ')', 'next_ip', '=', 'next_addr', 'return', 'next_ip']
Process a REIL instruction. Args: instr (ReilInstruction): Instruction to process. avoid (list): List of addresses to avoid while executing the code. next_addr (int): Address of the following instruction. initial_state (State): Initial execution state. execution_state (Queue): Queue of execution states. trace_current (list): Current trace. Returns: int: Returns the next address to execute.
['Process', 'a', 'REIL', 'instruction', '.']
train
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/analysis/symbolic/emulator.py#L396-L433
4,314
openvax/pyensembl
pyensembl/genome.py
Genome.transcript_names
def transcript_names(self, contig=None, strand=None): """ What are all the transcript names in the database (optionally, restrict to a given chromosome and/or strand) """ return self._all_feature_values( column="transcript_name", feature="transcript", contig=contig, strand=strand)
python
def transcript_names(self, contig=None, strand=None): """ What are all the transcript names in the database (optionally, restrict to a given chromosome and/or strand) """ return self._all_feature_values( column="transcript_name", feature="transcript", contig=contig, strand=strand)
['def', 'transcript_names', '(', 'self', ',', 'contig', '=', 'None', ',', 'strand', '=', 'None', ')', ':', 'return', 'self', '.', '_all_feature_values', '(', 'column', '=', '"transcript_name"', ',', 'feature', '=', '"transcript"', ',', 'contig', '=', 'contig', ',', 'strand', '=', 'strand', ')']
What are all the transcript names in the database (optionally, restrict to a given chromosome and/or strand)
['What', 'are', 'all', 'the', 'transcript', 'names', 'in', 'the', 'database', '(', 'optionally', 'restrict', 'to', 'a', 'given', 'chromosome', 'and', '/', 'or', 'strand', ')']
train
https://github.com/openvax/pyensembl/blob/4b995fb72e848206d6fbf11950cf30964cd9b3aa/pyensembl/genome.py#L926-L935
4,315
jxtech/wechatpy
wechatpy/client/__init__.py
WeChatComponentClient.fetch_access_token
def fetch_access_token(self): """ 获取 access token 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list\ &t=resource/res_list&verify=1&id=open1419318587&token=&lang=zh_CN 这是内部刷新机制。请不要完全依赖! 因为有可能在缓存期间没有对此公众号的操作,造成refresh_token失效。 :return: 返回的 JSON 数据包 """ expires_in = 7200 result = self.component.refresh_authorizer_token( self.appid, self.refresh_token) if 'expires_in' in result: expires_in = result['expires_in'] self.session.set( self.access_token_key, result['authorizer_access_token'], expires_in ) self.expires_at = int(time.time()) + expires_in return result
python
def fetch_access_token(self): """ 获取 access token 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list\ &t=resource/res_list&verify=1&id=open1419318587&token=&lang=zh_CN 这是内部刷新机制。请不要完全依赖! 因为有可能在缓存期间没有对此公众号的操作,造成refresh_token失效。 :return: 返回的 JSON 数据包 """ expires_in = 7200 result = self.component.refresh_authorizer_token( self.appid, self.refresh_token) if 'expires_in' in result: expires_in = result['expires_in'] self.session.set( self.access_token_key, result['authorizer_access_token'], expires_in ) self.expires_at = int(time.time()) + expires_in return result
['def', 'fetch_access_token', '(', 'self', ')', ':', 'expires_in', '=', '7200', 'result', '=', 'self', '.', 'component', '.', 'refresh_authorizer_token', '(', 'self', '.', 'appid', ',', 'self', '.', 'refresh_token', ')', 'if', "'expires_in'", 'in', 'result', ':', 'expires_in', '=', 'result', '[', "'expires_in'", ']', 'self', '.', 'session', '.', 'set', '(', 'self', '.', 'access_token_key', ',', 'result', '[', "'authorizer_access_token'", ']', ',', 'expires_in', ')', 'self', '.', 'expires_at', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', '+', 'expires_in', 'return', 'result']
获取 access token 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list\ &t=resource/res_list&verify=1&id=open1419318587&token=&lang=zh_CN 这是内部刷新机制。请不要完全依赖! 因为有可能在缓存期间没有对此公众号的操作,造成refresh_token失效。 :return: 返回的 JSON 数据包
['获取', 'access', 'token', '详情请参考', 'https', ':', '//', 'open', '.', 'weixin', '.', 'qq', '.', 'com', '/', 'cgi', '-', 'bin', '/', 'showdocument?action', '=', 'dir_list', '\\', '&t', '=', 'resource', '/', 'res_list&verify', '=', '1&id', '=', 'open1419318587&token', '=', '&lang', '=', 'zh_CN']
train
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/__init__.py#L113-L135
4,316
hardbyte/python-can
can/interfaces/ixxat/canlib.py
__vciFormatError
def __vciFormatError(library_instance, function, HRESULT): """ Format a VCI error and attach failed function and decoded HRESULT :param CLibrary library_instance: Mapped instance of IXXAT vcinpl library :param callable function: Failed function :param HRESULT HRESULT: HRESULT returned by vcinpl call :return: Formatted string """ buf = ctypes.create_string_buffer(constants.VCI_MAX_ERRSTRLEN) ctypes.memset(buf, 0, constants.VCI_MAX_ERRSTRLEN) library_instance.vciFormatError(HRESULT, buf, constants.VCI_MAX_ERRSTRLEN) return "function {} failed ({})".format(function._name, buf.value.decode('utf-8', 'replace'))
python
def __vciFormatError(library_instance, function, HRESULT): """ Format a VCI error and attach failed function and decoded HRESULT :param CLibrary library_instance: Mapped instance of IXXAT vcinpl library :param callable function: Failed function :param HRESULT HRESULT: HRESULT returned by vcinpl call :return: Formatted string """ buf = ctypes.create_string_buffer(constants.VCI_MAX_ERRSTRLEN) ctypes.memset(buf, 0, constants.VCI_MAX_ERRSTRLEN) library_instance.vciFormatError(HRESULT, buf, constants.VCI_MAX_ERRSTRLEN) return "function {} failed ({})".format(function._name, buf.value.decode('utf-8', 'replace'))
['def', '__vciFormatError', '(', 'library_instance', ',', 'function', ',', 'HRESULT', ')', ':', 'buf', '=', 'ctypes', '.', 'create_string_buffer', '(', 'constants', '.', 'VCI_MAX_ERRSTRLEN', ')', 'ctypes', '.', 'memset', '(', 'buf', ',', '0', ',', 'constants', '.', 'VCI_MAX_ERRSTRLEN', ')', 'library_instance', '.', 'vciFormatError', '(', 'HRESULT', ',', 'buf', ',', 'constants', '.', 'VCI_MAX_ERRSTRLEN', ')', 'return', '"function {} failed ({})"', '.', 'format', '(', 'function', '.', '_name', ',', 'buf', '.', 'value', '.', 'decode', '(', "'utf-8'", ',', "'replace'", ')', ')']
Format a VCI error and attach failed function and decoded HRESULT :param CLibrary library_instance: Mapped instance of IXXAT vcinpl library :param callable function: Failed function :param HRESULT HRESULT: HRESULT returned by vcinpl call :return: Formatted string
['Format', 'a', 'VCI', 'error', 'and', 'attach', 'failed', 'function', 'and', 'decoded', 'HRESULT', ':', 'param', 'CLibrary', 'library_instance', ':', 'Mapped', 'instance', 'of', 'IXXAT', 'vcinpl', 'library', ':', 'param', 'callable', 'function', ':', 'Failed', 'function', ':', 'param', 'HRESULT', 'HRESULT', ':', 'HRESULT', 'returned', 'by', 'vcinpl', 'call', ':', 'return', ':', 'Formatted', 'string']
train
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/ixxat/canlib.py#L82-L96
4,317
mulkieran/justbases
src/justbases/_division.py
NatDivision._division
def _division(divisor, dividend, remainder, base): """ Get the quotient and remainder :param int divisor: the divisor :param dividend: the divident :type dividend: sequence of int :param int remainder: initial remainder :param int base: the base :returns: quotient and remainder :rtype: tuple of (list of int) * int Complexity: O(log_{divisor}(quotient)) """ quotient = [] for value in dividend: remainder = remainder * base + value (quot, rem) = divmod(remainder, divisor) quotient.append(quot) if quot > 0: remainder = rem return (quotient, remainder)
python
def _division(divisor, dividend, remainder, base): """ Get the quotient and remainder :param int divisor: the divisor :param dividend: the divident :type dividend: sequence of int :param int remainder: initial remainder :param int base: the base :returns: quotient and remainder :rtype: tuple of (list of int) * int Complexity: O(log_{divisor}(quotient)) """ quotient = [] for value in dividend: remainder = remainder * base + value (quot, rem) = divmod(remainder, divisor) quotient.append(quot) if quot > 0: remainder = rem return (quotient, remainder)
['def', '_division', '(', 'divisor', ',', 'dividend', ',', 'remainder', ',', 'base', ')', ':', 'quotient', '=', '[', ']', 'for', 'value', 'in', 'dividend', ':', 'remainder', '=', 'remainder', '*', 'base', '+', 'value', '(', 'quot', ',', 'rem', ')', '=', 'divmod', '(', 'remainder', ',', 'divisor', ')', 'quotient', '.', 'append', '(', 'quot', ')', 'if', 'quot', '>', '0', ':', 'remainder', '=', 'rem', 'return', '(', 'quotient', ',', 'remainder', ')']
Get the quotient and remainder :param int divisor: the divisor :param dividend: the divident :type dividend: sequence of int :param int remainder: initial remainder :param int base: the base :returns: quotient and remainder :rtype: tuple of (list of int) * int Complexity: O(log_{divisor}(quotient))
['Get', 'the', 'quotient', 'and', 'remainder']
train
https://github.com/mulkieran/justbases/blob/dd52ff4b3d11609f54b2673599ee4eeb20f9734f/src/justbases/_division.py#L192-L214
4,318
Fantomas42/django-blog-zinnia
zinnia/signals.py
disconnect_entry_signals
def disconnect_entry_signals(): """ Disconnect all the signals on Entry model. """ post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_PING_DIRECTORIES) post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS) post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE) post_delete.disconnect( sender=Entry, dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE)
python
def disconnect_entry_signals(): """ Disconnect all the signals on Entry model. """ post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_PING_DIRECTORIES) post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_PING_EXTERNAL_URLS) post_save.disconnect( sender=Entry, dispatch_uid=ENTRY_PS_FLUSH_SIMILAR_CACHE) post_delete.disconnect( sender=Entry, dispatch_uid=ENTRY_PD_FLUSH_SIMILAR_CACHE)
['def', 'disconnect_entry_signals', '(', ')', ':', 'post_save', '.', 'disconnect', '(', 'sender', '=', 'Entry', ',', 'dispatch_uid', '=', 'ENTRY_PS_PING_DIRECTORIES', ')', 'post_save', '.', 'disconnect', '(', 'sender', '=', 'Entry', ',', 'dispatch_uid', '=', 'ENTRY_PS_PING_EXTERNAL_URLS', ')', 'post_save', '.', 'disconnect', '(', 'sender', '=', 'Entry', ',', 'dispatch_uid', '=', 'ENTRY_PS_FLUSH_SIMILAR_CACHE', ')', 'post_delete', '.', 'disconnect', '(', 'sender', '=', 'Entry', ',', 'dispatch_uid', '=', 'ENTRY_PD_FLUSH_SIMILAR_CACHE', ')']
Disconnect all the signals on Entry model.
['Disconnect', 'all', 'the', 'signals', 'on', 'Entry', 'model', '.']
train
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/signals.py#L153-L168
4,319
rigetti/pyquil
pyquil/api/_compiler.py
_extract_program_from_pyquil_executable_response
def _extract_program_from_pyquil_executable_response(response: PyQuilExecutableResponse) -> Program: """ Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object. :param response: PyQuilExecutableResponse object to be unpacked. :return: Resulting pyQuil Program object. """ p = Program(response.program) for attr, val in response.attributes.items(): setattr(p, attr, val) return p
python
def _extract_program_from_pyquil_executable_response(response: PyQuilExecutableResponse) -> Program: """ Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object. :param response: PyQuilExecutableResponse object to be unpacked. :return: Resulting pyQuil Program object. """ p = Program(response.program) for attr, val in response.attributes.items(): setattr(p, attr, val) return p
['def', '_extract_program_from_pyquil_executable_response', '(', 'response', ':', 'PyQuilExecutableResponse', ')', '->', 'Program', ':', 'p', '=', 'Program', '(', 'response', '.', 'program', ')', 'for', 'attr', ',', 'val', 'in', 'response', '.', 'attributes', '.', 'items', '(', ')', ':', 'setattr', '(', 'p', ',', 'attr', ',', 'val', ')', 'return', 'p']
Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object. :param response: PyQuilExecutableResponse object to be unpacked. :return: Resulting pyQuil Program object.
['Unpacks', 'a', 'rpcq', 'PyQuilExecutableResponse', 'object', 'into', 'a', 'pyQuil', 'Program', 'object', '.']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_compiler.py#L55-L65
4,320
atdt/afraid
afraid/__init__.py
parse_args
def parse_args(args=None): """Parse command-line arguments""" parser = argparse.ArgumentParser(description='afraid.org dyndns client') ## positional arguments parser.add_argument('user') parser.add_argument('password') parser.add_argument('hosts', nargs='*', help='(deafult: all associated hosts)', default=None ) ## optional arguments # should we fork? parser.add_argument('--daemonize', '-d', action='store_true', default=False, help='run in background (default: no)', ) # log to a file or stdout parser.add_argument('--log', help='log to file (default: log to stdout)', type=argparse.FileType('w'), default=sys.stdout, metavar='file' ) # how long to sleep between updates parser.add_argument('--interval', help='update interval, in seconds (default: 21600)', metavar='seconds', default=6 * 60 * 60, # 6 hours type=int ) return parser.parse_args(args)
python
def parse_args(args=None): """Parse command-line arguments""" parser = argparse.ArgumentParser(description='afraid.org dyndns client') ## positional arguments parser.add_argument('user') parser.add_argument('password') parser.add_argument('hosts', nargs='*', help='(deafult: all associated hosts)', default=None ) ## optional arguments # should we fork? parser.add_argument('--daemonize', '-d', action='store_true', default=False, help='run in background (default: no)', ) # log to a file or stdout parser.add_argument('--log', help='log to file (default: log to stdout)', type=argparse.FileType('w'), default=sys.stdout, metavar='file' ) # how long to sleep between updates parser.add_argument('--interval', help='update interval, in seconds (default: 21600)', metavar='seconds', default=6 * 60 * 60, # 6 hours type=int ) return parser.parse_args(args)
['def', 'parse_args', '(', 'args', '=', 'None', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'afraid.org dyndns client'", ')', '## positional arguments', 'parser', '.', 'add_argument', '(', "'user'", ')', 'parser', '.', 'add_argument', '(', "'password'", ')', 'parser', '.', 'add_argument', '(', "'hosts'", ',', 'nargs', '=', "'*'", ',', 'help', '=', "'(deafult: all associated hosts)'", ',', 'default', '=', 'None', ')', '## optional arguments', '# should we fork?', 'parser', '.', 'add_argument', '(', "'--daemonize'", ',', "'-d'", ',', 'action', '=', "'store_true'", ',', 'default', '=', 'False', ',', 'help', '=', "'run in background (default: no)'", ',', ')', '# log to a file or stdout', 'parser', '.', 'add_argument', '(', "'--log'", ',', 'help', '=', "'log to file (default: log to stdout)'", ',', 'type', '=', 'argparse', '.', 'FileType', '(', "'w'", ')', ',', 'default', '=', 'sys', '.', 'stdout', ',', 'metavar', '=', "'file'", ')', '# how long to sleep between updates', 'parser', '.', 'add_argument', '(', "'--interval'", ',', 'help', '=', "'update interval, in seconds (default: 21600)'", ',', 'metavar', '=', "'seconds'", ',', 'default', '=', '6', '*', '60', '*', '60', ',', '# 6 hours', 'type', '=', 'int', ')', 'return', 'parser', '.', 'parse_args', '(', 'args', ')']
Parse command-line arguments
['Parse', 'command', '-', 'line', 'arguments']
train
https://github.com/atdt/afraid/blob/d74b2d4e41ed14e420da2793a89bef5d9b26ea26/afraid/__init__.py#L117-L156
4,321
jssimporter/python-jss
jss/jssobject.py
JSSObject.set_bool
def set_bool(self, location, value): """Set a boolean value. Casper booleans in XML are string literals of "true" or "false". This method sets the text value of "location" to the correct string representation of a boolean. Args: location: Element or a string path argument to find() value: Boolean or string value to set. (Accepts "true"/"True"/"TRUE"; all other strings are False). """ element = self._handle_location(location) if isinstance(value, basestring): value = True if value.upper() == "TRUE" else False elif not isinstance(value, bool): raise ValueError if value is True: element.text = "true" else: element.text = "false"
python
def set_bool(self, location, value): """Set a boolean value. Casper booleans in XML are string literals of "true" or "false". This method sets the text value of "location" to the correct string representation of a boolean. Args: location: Element or a string path argument to find() value: Boolean or string value to set. (Accepts "true"/"True"/"TRUE"; all other strings are False). """ element = self._handle_location(location) if isinstance(value, basestring): value = True if value.upper() == "TRUE" else False elif not isinstance(value, bool): raise ValueError if value is True: element.text = "true" else: element.text = "false"
['def', 'set_bool', '(', 'self', ',', 'location', ',', 'value', ')', ':', 'element', '=', 'self', '.', '_handle_location', '(', 'location', ')', 'if', 'isinstance', '(', 'value', ',', 'basestring', ')', ':', 'value', '=', 'True', 'if', 'value', '.', 'upper', '(', ')', '==', '"TRUE"', 'else', 'False', 'elif', 'not', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'raise', 'ValueError', 'if', 'value', 'is', 'True', ':', 'element', '.', 'text', '=', '"true"', 'else', ':', 'element', '.', 'text', '=', '"false"']
Set a boolean value. Casper booleans in XML are string literals of "true" or "false". This method sets the text value of "location" to the correct string representation of a boolean. Args: location: Element or a string path argument to find() value: Boolean or string value to set. (Accepts "true"/"True"/"TRUE"; all other strings are False).
['Set', 'a', 'boolean', 'value', '.']
train
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L391-L411
4,322
tych0/xcffib
module/__init__.py
Connection.hoist_event
def hoist_event(self, e): """ Hoist an xcb_generic_event_t to the right xcffib structure. """ if e.response_type == 0: return self._process_error(ffi.cast("xcb_generic_error_t *", e)) # We mask off the high bit here because events sent with SendEvent have # this bit set. We don't actually care where the event came from, so we # just throw this away. Maybe we could expose this, if anyone actually # cares about it. event = self._event_offsets[e.response_type & 0x7f] buf = CffiUnpacker(e) return event(buf)
python
def hoist_event(self, e): """ Hoist an xcb_generic_event_t to the right xcffib structure. """ if e.response_type == 0: return self._process_error(ffi.cast("xcb_generic_error_t *", e)) # We mask off the high bit here because events sent with SendEvent have # this bit set. We don't actually care where the event came from, so we # just throw this away. Maybe we could expose this, if anyone actually # cares about it. event = self._event_offsets[e.response_type & 0x7f] buf = CffiUnpacker(e) return event(buf)
['def', 'hoist_event', '(', 'self', ',', 'e', ')', ':', 'if', 'e', '.', 'response_type', '==', '0', ':', 'return', 'self', '.', '_process_error', '(', 'ffi', '.', 'cast', '(', '"xcb_generic_error_t *"', ',', 'e', ')', ')', '# We mask off the high bit here because events sent with SendEvent have', "# this bit set. We don't actually care where the event came from, so we", '# just throw this away. Maybe we could expose this, if anyone actually', '# cares about it.', 'event', '=', 'self', '.', '_event_offsets', '[', 'e', '.', 'response_type', '&', '0x7f', ']', 'buf', '=', 'CffiUnpacker', '(', 'e', ')', 'return', 'event', '(', 'buf', ')']
Hoist an xcb_generic_event_t to the right xcffib structure.
['Hoist', 'an', 'xcb_generic_event_t', 'to', 'the', 'right', 'xcffib', 'structure', '.']
train
https://github.com/tych0/xcffib/blob/c9c50c3ce513b130821f430be78c4c733626c707/module/__init__.py#L681-L693
4,323
RJT1990/pyflux
pyflux/families/skewt.py
Skewt.approximating_model_reg
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no): """ Creates approximating Gaussian state space model for Skewt measurement density Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm h_approx : float The variance of the measurement density data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants """ H = np.ones(data.shape[0])*h_approx mu = np.zeros(data.shape[0]) return H, mu
python
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no): """ Creates approximating Gaussian state space model for Skewt measurement density Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm h_approx : float The variance of the measurement density data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants """ H = np.ones(data.shape[0])*h_approx mu = np.zeros(data.shape[0]) return H, mu
['def', 'approximating_model_reg', '(', 'self', ',', 'beta', ',', 'T', ',', 'Z', ',', 'R', ',', 'Q', ',', 'h_approx', ',', 'data', ',', 'X', ',', 'state_no', ')', ':', 'H', '=', 'np', '.', 'ones', '(', 'data', '.', 'shape', '[', '0', ']', ')', '*', 'h_approx', 'mu', '=', 'np', '.', 'zeros', '(', 'data', '.', 'shape', '[', '0', ']', ')', 'return', 'H', ',', 'mu']
Creates approximating Gaussian state space model for Skewt measurement density Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm h_approx : float The variance of the measurement density data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants
['Creates', 'approximating', 'Gaussian', 'state', 'space', 'model', 'for', 'Skewt', 'measurement', 'density', 'Parameters', '----------', 'beta', ':', 'np', '.', 'array', 'Contains', 'untransformed', 'starting', 'values', 'for', 'latent', 'variables', 'T', 'Z', 'R', 'Q', ':', 'np', '.', 'array', 'State', 'space', 'matrices', 'used', 'in', 'KFS', 'algorithm', 'h_approx', ':', 'float', 'The', 'variance', 'of', 'the', 'measurement', 'density', 'data', ':', 'np', '.', 'array', 'The', 'univariate', 'time', 'series', 'data']
train
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/skewt.py#L86-L121
4,324
pinax/pinax-blog
pinax/blog/parsers/creole_parser.py
HtmlEmitter.emit_node
def emit_node(self, node): """Emit a single node.""" emit = getattr(self, "%s_emit" % node.kind, self.default_emit) return emit(node)
python
def emit_node(self, node): """Emit a single node.""" emit = getattr(self, "%s_emit" % node.kind, self.default_emit) return emit(node)
['def', 'emit_node', '(', 'self', ',', 'node', ')', ':', 'emit', '=', 'getattr', '(', 'self', ',', '"%s_emit"', '%', 'node', '.', 'kind', ',', 'self', '.', 'default_emit', ')', 'return', 'emit', '(', 'node', ')']
Emit a single node.
['Emit', 'a', 'single', 'node', '.']
train
https://github.com/pinax/pinax-blog/blob/be1d64946381b47d197b258a488d5de56aacccce/pinax/blog/parsers/creole_parser.py#L142-L145
4,325
KarchinLab/probabilistic2020
prob2020/python/scores.py
read_vest_pickle
def read_vest_pickle(gname, score_dir): """Read in VEST scores for given gene. Parameters ---------- gname : str name of gene score_dir : str directory containing vest scores Returns ------- gene_vest : dict or None dict containing vest scores for gene. Returns None if not found. """ vest_path = os.path.join(score_dir, gname+".vest.pickle") if os.path.exists(vest_path): if sys.version_info < (3,): with open(vest_path) as handle: gene_vest = pickle.load(handle) else: with open(vest_path, 'rb') as handle: gene_vest = pickle.load(handle, encoding='latin-1') return gene_vest else: return None
python
def read_vest_pickle(gname, score_dir): """Read in VEST scores for given gene. Parameters ---------- gname : str name of gene score_dir : str directory containing vest scores Returns ------- gene_vest : dict or None dict containing vest scores for gene. Returns None if not found. """ vest_path = os.path.join(score_dir, gname+".vest.pickle") if os.path.exists(vest_path): if sys.version_info < (3,): with open(vest_path) as handle: gene_vest = pickle.load(handle) else: with open(vest_path, 'rb') as handle: gene_vest = pickle.load(handle, encoding='latin-1') return gene_vest else: return None
['def', 'read_vest_pickle', '(', 'gname', ',', 'score_dir', ')', ':', 'vest_path', '=', 'os', '.', 'path', '.', 'join', '(', 'score_dir', ',', 'gname', '+', '".vest.pickle"', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'vest_path', ')', ':', 'if', 'sys', '.', 'version_info', '<', '(', '3', ',', ')', ':', 'with', 'open', '(', 'vest_path', ')', 'as', 'handle', ':', 'gene_vest', '=', 'pickle', '.', 'load', '(', 'handle', ')', 'else', ':', 'with', 'open', '(', 'vest_path', ',', "'rb'", ')', 'as', 'handle', ':', 'gene_vest', '=', 'pickle', '.', 'load', '(', 'handle', ',', 'encoding', '=', "'latin-1'", ')', 'return', 'gene_vest', 'else', ':', 'return', 'None']
Read in VEST scores for given gene. Parameters ---------- gname : str name of gene score_dir : str directory containing vest scores Returns ------- gene_vest : dict or None dict containing vest scores for gene. Returns None if not found.
['Read', 'in', 'VEST', 'scores', 'for', 'given', 'gene', '.']
train
https://github.com/KarchinLab/probabilistic2020/blob/5d70583b0a7c07cfe32e95f3a70e05df412acb84/prob2020/python/scores.py#L82-L107
4,326
apache/incubator-mxnet
python/mxnet/gluon/data/dataset.py
Dataset.transform
def transform(self, fn, lazy=True): """Returns a new dataset with each sample transformed by the transformer function `fn`. Parameters ---------- fn : callable A transformer function that takes a sample as input and returns the transformed sample. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset. """ trans = _LazyTransformDataset(self, fn) if lazy: return trans return SimpleDataset([i for i in trans])
python
def transform(self, fn, lazy=True): """Returns a new dataset with each sample transformed by the transformer function `fn`. Parameters ---------- fn : callable A transformer function that takes a sample as input and returns the transformed sample. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset. """ trans = _LazyTransformDataset(self, fn) if lazy: return trans return SimpleDataset([i for i in trans])
['def', 'transform', '(', 'self', ',', 'fn', ',', 'lazy', '=', 'True', ')', ':', 'trans', '=', '_LazyTransformDataset', '(', 'self', ',', 'fn', ')', 'if', 'lazy', ':', 'return', 'trans', 'return', 'SimpleDataset', '(', '[', 'i', 'for', 'i', 'in', 'trans', ']', ')']
Returns a new dataset with each sample transformed by the transformer function `fn`. Parameters ---------- fn : callable A transformer function that takes a sample as input and returns the transformed sample. lazy : bool, default True If False, transforms all samples at once. Otherwise, transforms each sample on demand. Note that if `fn` is stochastic, you must set lazy to True or you will get the same result on all epochs. Returns ------- Dataset The transformed dataset.
['Returns', 'a', 'new', 'dataset', 'with', 'each', 'sample', 'transformed', 'by', 'the', 'transformer', 'function', 'fn', '.']
train
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/data/dataset.py#L43-L66
4,327
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/futures.py
AsperaTransferListener.stop
def stop(self, free_resource=False): ''' send a stop transfer request to the Aspera sdk, can be done for: cancel - stop an in progress transfer free_resource - request to the Aspera sdk free resouces related to trasnfer_id ''' if not self.is_stopped(): self._is_stopping = True try: if free_resource or self.is_running(False): if not free_resource: logger.info("StopTransfer called - %s" % self.get_transfer_id()) self._is_stopped = faspmanager2.stopTransfer(self.get_transfer_id()) if not free_resource: logger.info("StopTransfer returned %s - %s" % ( self._is_stopped, self.get_transfer_id())) except Exception as ex: self.notify_exception(ex) self._is_stopping = False return self.is_stopped(False)
python
def stop(self, free_resource=False): ''' send a stop transfer request to the Aspera sdk, can be done for: cancel - stop an in progress transfer free_resource - request to the Aspera sdk free resouces related to trasnfer_id ''' if not self.is_stopped(): self._is_stopping = True try: if free_resource or self.is_running(False): if not free_resource: logger.info("StopTransfer called - %s" % self.get_transfer_id()) self._is_stopped = faspmanager2.stopTransfer(self.get_transfer_id()) if not free_resource: logger.info("StopTransfer returned %s - %s" % ( self._is_stopped, self.get_transfer_id())) except Exception as ex: self.notify_exception(ex) self._is_stopping = False return self.is_stopped(False)
['def', 'stop', '(', 'self', ',', 'free_resource', '=', 'False', ')', ':', 'if', 'not', 'self', '.', 'is_stopped', '(', ')', ':', 'self', '.', '_is_stopping', '=', 'True', 'try', ':', 'if', 'free_resource', 'or', 'self', '.', 'is_running', '(', 'False', ')', ':', 'if', 'not', 'free_resource', ':', 'logger', '.', 'info', '(', '"StopTransfer called - %s"', '%', 'self', '.', 'get_transfer_id', '(', ')', ')', 'self', '.', '_is_stopped', '=', 'faspmanager2', '.', 'stopTransfer', '(', 'self', '.', 'get_transfer_id', '(', ')', ')', 'if', 'not', 'free_resource', ':', 'logger', '.', 'info', '(', '"StopTransfer returned %s - %s"', '%', '(', 'self', '.', '_is_stopped', ',', 'self', '.', 'get_transfer_id', '(', ')', ')', ')', 'except', 'Exception', 'as', 'ex', ':', 'self', '.', 'notify_exception', '(', 'ex', ')', 'self', '.', '_is_stopping', '=', 'False', 'return', 'self', '.', 'is_stopped', '(', 'False', ')']
send a stop transfer request to the Aspera sdk, can be done for: cancel - stop an in progress transfer free_resource - request to the Aspera sdk free resouces related to trasnfer_id
['send', 'a', 'stop', 'transfer', 'request', 'to', 'the', 'Aspera', 'sdk', 'can', 'be', 'done', 'for', ':', 'cancel', '-', 'stop', 'an', 'in', 'progress', 'transfer', 'free_resource', '-', 'request', 'to', 'the', 'Aspera', 'sdk', 'free', 'resouces', 'related', 'to', 'trasnfer_id']
train
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/futures.py#L254-L274
4,328
FutunnOpen/futuquant
futuquant/quote/open_quote_context.py
OpenQuoteContext.get_order_book
def get_order_book(self, code): """ 获取实时摆盘数据 :param code: 股票代码 :return: (ret, data) ret == RET_OK 返回字典,数据格式如下 ret != RET_OK 返回错误字符串 {‘code’: 股票代码 ‘Ask’:[ (ask_price1, ask_volume1,order_num), (ask_price2, ask_volume2, order_num),…] ‘Bid’: [ (bid_price1, bid_volume1, order_num), (bid_price2, bid_volume2, order_num),…] } 'Ask':卖盘, 'Bid'买盘。每个元组的含义是(委托价格,委托数量,委托订单数) """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderBookQuery.pack_req, OrderBookQuery.unpack_rsp, ) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, orderbook = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, orderbook
python
def get_order_book(self, code): """ 获取实时摆盘数据 :param code: 股票代码 :return: (ret, data) ret == RET_OK 返回字典,数据格式如下 ret != RET_OK 返回错误字符串 {‘code’: 股票代码 ‘Ask’:[ (ask_price1, ask_volume1,order_num), (ask_price2, ask_volume2, order_num),…] ‘Bid’: [ (bid_price1, bid_volume1, order_num), (bid_price2, bid_volume2, order_num),…] } 'Ask':卖盘, 'Bid'买盘。每个元组的含义是(委托价格,委托数量,委托订单数) """ if code is None or is_str(code) is False: error_str = ERROR_STR_PREFIX + "the type of code param is wrong" return RET_ERROR, error_str query_processor = self._get_sync_query_processor( OrderBookQuery.pack_req, OrderBookQuery.unpack_rsp, ) kargs = { "code": code, "conn_id": self.get_sync_conn_id() } ret_code, msg, orderbook = query_processor(**kargs) if ret_code == RET_ERROR: return ret_code, msg return RET_OK, orderbook
['def', 'get_order_book', '(', 'self', ',', 'code', ')', ':', 'if', 'code', 'is', 'None', 'or', 'is_str', '(', 'code', ')', 'is', 'False', ':', 'error_str', '=', 'ERROR_STR_PREFIX', '+', '"the type of code param is wrong"', 'return', 'RET_ERROR', ',', 'error_str', 'query_processor', '=', 'self', '.', '_get_sync_query_processor', '(', 'OrderBookQuery', '.', 'pack_req', ',', 'OrderBookQuery', '.', 'unpack_rsp', ',', ')', 'kargs', '=', '{', '"code"', ':', 'code', ',', '"conn_id"', ':', 'self', '.', 'get_sync_conn_id', '(', ')', '}', 'ret_code', ',', 'msg', ',', 'orderbook', '=', 'query_processor', '(', '*', '*', 'kargs', ')', 'if', 'ret_code', '==', 'RET_ERROR', ':', 'return', 'ret_code', ',', 'msg', 'return', 'RET_OK', ',', 'orderbook']
获取实时摆盘数据 :param code: 股票代码 :return: (ret, data) ret == RET_OK 返回字典,数据格式如下 ret != RET_OK 返回错误字符串 {‘code’: 股票代码 ‘Ask’:[ (ask_price1, ask_volume1,order_num), (ask_price2, ask_volume2, order_num),…] ‘Bid’: [ (bid_price1, bid_volume1, order_num), (bid_price2, bid_volume2, order_num),…] } 'Ask':卖盘, 'Bid'买盘。每个元组的含义是(委托价格,委托数量,委托订单数)
['获取实时摆盘数据']
train
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/quote/open_quote_context.py#L1410-L1445
4,329
KenjiTakahashi/td
td/model.py
Model._modifyInternal
def _modifyInternal(self, *, sort=None, purge=False, done=None): """Creates a whole new database from existing one, based on given modifiers. :sort: pattern should look like this: ([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}), where True|False indicate whether to reverse or not, <index> are one of Model.indexes and <level_index> indicate a number of level to sort. Of course, the lists above may contain multiple items. :done: patterns looks similar to :sort:, except that it has additional <regexp> values and that True|False means to mark as done|undone. @note: Should not be used directly. It was defined here, because :save: decorator needs undecorated version of Model.modify. :sort: Pattern on which to sort the database. :purge: Whether to purge done items. :done: Pattern on which to mark items as done/undone. :returns: New database, modified according to supplied arguments. """ sortAll, sortLevels = sort is not None and sort or ([], {}) doneAll, doneLevels = done is not None and done or ([], {}) def _mark(v, i): if done is None: return v[:4] def _mark_(index, regexp, du): if du is None: return v[:4] if index is None: for v_ in v[:3]: if regexp is None or re.match(regexp, str(v_)): return v[:3] + [du] return v[:4] if regexp is None or re.match(regexp, str(v[index])): return v[:3] + [du] try: for doneLevel in doneLevels[i]: result = _mark_(*doneLevel) if result is not None: return result except KeyError: pass for doneAll_ in doneAll: result = _mark_(*doneAll_) if result is None: return v[:4] return result def _modify(submodel, i): _new = list() for v in submodel: if purge: if not v[3]: _new.append(_mark(v, i) + [_modify(v[4], i + 1)]) else: _new.append(_mark(v, i) + [_modify(v[4], i + 1)]) levels = sortLevels.get(i) or sortLevels.get(str(i)) for index, reverse in levels or sortAll: _new = sorted(_new, key=lambda e: e[index], reverse=reverse) return _new return _modify(self.data, 1)
python
def _modifyInternal(self, *, sort=None, purge=False, done=None): """Creates a whole new database from existing one, based on given modifiers. :sort: pattern should look like this: ([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}), where True|False indicate whether to reverse or not, <index> are one of Model.indexes and <level_index> indicate a number of level to sort. Of course, the lists above may contain multiple items. :done: patterns looks similar to :sort:, except that it has additional <regexp> values and that True|False means to mark as done|undone. @note: Should not be used directly. It was defined here, because :save: decorator needs undecorated version of Model.modify. :sort: Pattern on which to sort the database. :purge: Whether to purge done items. :done: Pattern on which to mark items as done/undone. :returns: New database, modified according to supplied arguments. """ sortAll, sortLevels = sort is not None and sort or ([], {}) doneAll, doneLevels = done is not None and done or ([], {}) def _mark(v, i): if done is None: return v[:4] def _mark_(index, regexp, du): if du is None: return v[:4] if index is None: for v_ in v[:3]: if regexp is None or re.match(regexp, str(v_)): return v[:3] + [du] return v[:4] if regexp is None or re.match(regexp, str(v[index])): return v[:3] + [du] try: for doneLevel in doneLevels[i]: result = _mark_(*doneLevel) if result is not None: return result except KeyError: pass for doneAll_ in doneAll: result = _mark_(*doneAll_) if result is None: return v[:4] return result def _modify(submodel, i): _new = list() for v in submodel: if purge: if not v[3]: _new.append(_mark(v, i) + [_modify(v[4], i + 1)]) else: _new.append(_mark(v, i) + [_modify(v[4], i + 1)]) levels = sortLevels.get(i) or sortLevels.get(str(i)) for index, reverse in levels or sortAll: _new = sorted(_new, key=lambda e: e[index], reverse=reverse) return _new return _modify(self.data, 1)
['def', '_modifyInternal', '(', 'self', ',', '*', ',', 'sort', '=', 'None', ',', 'purge', '=', 'False', ',', 'done', '=', 'None', ')', ':', 'sortAll', ',', 'sortLevels', '=', 'sort', 'is', 'not', 'None', 'and', 'sort', 'or', '(', '[', ']', ',', '{', '}', ')', 'doneAll', ',', 'doneLevels', '=', 'done', 'is', 'not', 'None', 'and', 'done', 'or', '(', '[', ']', ',', '{', '}', ')', 'def', '_mark', '(', 'v', ',', 'i', ')', ':', 'if', 'done', 'is', 'None', ':', 'return', 'v', '[', ':', '4', ']', 'def', '_mark_', '(', 'index', ',', 'regexp', ',', 'du', ')', ':', 'if', 'du', 'is', 'None', ':', 'return', 'v', '[', ':', '4', ']', 'if', 'index', 'is', 'None', ':', 'for', 'v_', 'in', 'v', '[', ':', '3', ']', ':', 'if', 'regexp', 'is', 'None', 'or', 're', '.', 'match', '(', 'regexp', ',', 'str', '(', 'v_', ')', ')', ':', 'return', 'v', '[', ':', '3', ']', '+', '[', 'du', ']', 'return', 'v', '[', ':', '4', ']', 'if', 'regexp', 'is', 'None', 'or', 're', '.', 'match', '(', 'regexp', ',', 'str', '(', 'v', '[', 'index', ']', ')', ')', ':', 'return', 'v', '[', ':', '3', ']', '+', '[', 'du', ']', 'try', ':', 'for', 'doneLevel', 'in', 'doneLevels', '[', 'i', ']', ':', 'result', '=', '_mark_', '(', '*', 'doneLevel', ')', 'if', 'result', 'is', 'not', 'None', ':', 'return', 'result', 'except', 'KeyError', ':', 'pass', 'for', 'doneAll_', 'in', 'doneAll', ':', 'result', '=', '_mark_', '(', '*', 'doneAll_', ')', 'if', 'result', 'is', 'None', ':', 'return', 'v', '[', ':', '4', ']', 'return', 'result', 'def', '_modify', '(', 'submodel', ',', 'i', ')', ':', '_new', '=', 'list', '(', ')', 'for', 'v', 'in', 'submodel', ':', 'if', 'purge', ':', 'if', 'not', 'v', '[', '3', ']', ':', '_new', '.', 'append', '(', '_mark', '(', 'v', ',', 'i', ')', '+', '[', '_modify', '(', 'v', '[', '4', ']', ',', 'i', '+', '1', ')', ']', ')', 'else', ':', '_new', '.', 'append', '(', '_mark', '(', 'v', ',', 'i', ')', '+', '[', '_modify', '(', 'v', '[', '4', ']', ',', 'i', '+', '1', ')', ']', ')', 'levels', '=', 'sortLevels', '.', 'get', '(', 'i', ')', 'or', 'sortLevels', '.', 'get', '(', 'str', '(', 'i', ')', ')', 'for', 'index', ',', 'reverse', 'in', 'levels', 'or', 'sortAll', ':', '_new', '=', 'sorted', '(', '_new', ',', 'key', '=', 'lambda', 'e', ':', 'e', '[', 'index', ']', ',', 'reverse', '=', 'reverse', ')', 'return', '_new', 'return', '_modify', '(', 'self', '.', 'data', ',', '1', ')']
Creates a whole new database from existing one, based on given modifiers. :sort: pattern should look like this: ([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}), where True|False indicate whether to reverse or not, <index> are one of Model.indexes and <level_index> indicate a number of level to sort. Of course, the lists above may contain multiple items. :done: patterns looks similar to :sort:, except that it has additional <regexp> values and that True|False means to mark as done|undone. @note: Should not be used directly. It was defined here, because :save: decorator needs undecorated version of Model.modify. :sort: Pattern on which to sort the database. :purge: Whether to purge done items. :done: Pattern on which to mark items as done/undone. :returns: New database, modified according to supplied arguments.
['Creates', 'a', 'whole', 'new', 'database', 'from', 'existing', 'one', 'based', 'on', 'given', 'modifiers', '.']
train
https://github.com/KenjiTakahashi/td/blob/7311eabc63efe6fe6600687c3026f0837454c2e4/td/model.py#L281-L346
4,330
wbond/vat_moss-python
vat_moss/geoip2.py
calculate_rate
def calculate_rate(country_code, subdivision, city, address_country_code=None, address_exception=None): """ Calculates the VAT rate from the data returned by a GeoLite2 database :param country_code: Two-character country code :param subdivision: The first subdivision name :param city: The city name :param address_country_code: The user's country_code, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :param address_exception: The user's exception name, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :raises: ValueError - if country code is not two characers, or subdivision or city are not strings UndefinitiveError - when no address_country_code and address_exception are provided and the geoip2 information is not specific enough :return: A tuple of (Decimal percentage rate, country code, exception name [or None]) """ if not country_code or not isinstance(country_code, str_cls) or len(country_code) != 2: raise ValueError('Invalidly formatted country code') if not isinstance(subdivision, str_cls): raise ValueError('Subdivision is not a string') if not isinstance(city, str_cls): raise ValueError('City is not a string') country_code = country_code.upper() subdivision = subdivision.lower() city = city.lower() if country_code not in rates.BY_COUNTRY: return (Decimal('0.0'), country_code, None) country_default = rates.BY_COUNTRY[country_code]['rate'] if country_code not in GEOIP2_EXCEPTIONS: return (country_default, country_code, None) exceptions = GEOIP2_EXCEPTIONS[country_code] for matcher in exceptions: # Subdivision-only match if isinstance(matcher, str_cls): sub_match = matcher city_match = None else: sub_match, city_match = matcher if sub_match != subdivision: continue if city_match and city_match != city: continue info = exceptions[matcher] exception_name = info['name'] if not info['definitive']: if address_country_code is None: raise UndefinitiveError('It is not possible to determine the users VAT rates based on the information provided') if address_country_code != country_code: continue if address_exception != exception_name: continue rate = rates.BY_COUNTRY[country_code]['exceptions'][exception_name] return (rate, country_code, exception_name) return (country_default, country_code, None)
python
def calculate_rate(country_code, subdivision, city, address_country_code=None, address_exception=None): """ Calculates the VAT rate from the data returned by a GeoLite2 database :param country_code: Two-character country code :param subdivision: The first subdivision name :param city: The city name :param address_country_code: The user's country_code, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :param address_exception: The user's exception name, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :raises: ValueError - if country code is not two characers, or subdivision or city are not strings UndefinitiveError - when no address_country_code and address_exception are provided and the geoip2 information is not specific enough :return: A tuple of (Decimal percentage rate, country code, exception name [or None]) """ if not country_code or not isinstance(country_code, str_cls) or len(country_code) != 2: raise ValueError('Invalidly formatted country code') if not isinstance(subdivision, str_cls): raise ValueError('Subdivision is not a string') if not isinstance(city, str_cls): raise ValueError('City is not a string') country_code = country_code.upper() subdivision = subdivision.lower() city = city.lower() if country_code not in rates.BY_COUNTRY: return (Decimal('0.0'), country_code, None) country_default = rates.BY_COUNTRY[country_code]['rate'] if country_code not in GEOIP2_EXCEPTIONS: return (country_default, country_code, None) exceptions = GEOIP2_EXCEPTIONS[country_code] for matcher in exceptions: # Subdivision-only match if isinstance(matcher, str_cls): sub_match = matcher city_match = None else: sub_match, city_match = matcher if sub_match != subdivision: continue if city_match and city_match != city: continue info = exceptions[matcher] exception_name = info['name'] if not info['definitive']: if address_country_code is None: raise UndefinitiveError('It is not possible to determine the users VAT rates based on the information provided') if address_country_code != country_code: continue if address_exception != exception_name: continue rate = rates.BY_COUNTRY[country_code]['exceptions'][exception_name] return (rate, country_code, exception_name) return (country_default, country_code, None)
['def', 'calculate_rate', '(', 'country_code', ',', 'subdivision', ',', 'city', ',', 'address_country_code', '=', 'None', ',', 'address_exception', '=', 'None', ')', ':', 'if', 'not', 'country_code', 'or', 'not', 'isinstance', '(', 'country_code', ',', 'str_cls', ')', 'or', 'len', '(', 'country_code', ')', '!=', '2', ':', 'raise', 'ValueError', '(', "'Invalidly formatted country code'", ')', 'if', 'not', 'isinstance', '(', 'subdivision', ',', 'str_cls', ')', ':', 'raise', 'ValueError', '(', "'Subdivision is not a string'", ')', 'if', 'not', 'isinstance', '(', 'city', ',', 'str_cls', ')', ':', 'raise', 'ValueError', '(', "'City is not a string'", ')', 'country_code', '=', 'country_code', '.', 'upper', '(', ')', 'subdivision', '=', 'subdivision', '.', 'lower', '(', ')', 'city', '=', 'city', '.', 'lower', '(', ')', 'if', 'country_code', 'not', 'in', 'rates', '.', 'BY_COUNTRY', ':', 'return', '(', 'Decimal', '(', "'0.0'", ')', ',', 'country_code', ',', 'None', ')', 'country_default', '=', 'rates', '.', 'BY_COUNTRY', '[', 'country_code', ']', '[', "'rate'", ']', 'if', 'country_code', 'not', 'in', 'GEOIP2_EXCEPTIONS', ':', 'return', '(', 'country_default', ',', 'country_code', ',', 'None', ')', 'exceptions', '=', 'GEOIP2_EXCEPTIONS', '[', 'country_code', ']', 'for', 'matcher', 'in', 'exceptions', ':', '# Subdivision-only match', 'if', 'isinstance', '(', 'matcher', ',', 'str_cls', ')', ':', 'sub_match', '=', 'matcher', 'city_match', '=', 'None', 'else', ':', 'sub_match', ',', 'city_match', '=', 'matcher', 'if', 'sub_match', '!=', 'subdivision', ':', 'continue', 'if', 'city_match', 'and', 'city_match', '!=', 'city', ':', 'continue', 'info', '=', 'exceptions', '[', 'matcher', ']', 'exception_name', '=', 'info', '[', "'name'", ']', 'if', 'not', 'info', '[', "'definitive'", ']', ':', 'if', 'address_country_code', 'is', 'None', ':', 'raise', 'UndefinitiveError', '(', "'It is not possible to determine the users VAT rates based on the information provided'", ')', 'if', 'address_country_code', '!=', 'country_code', ':', 'continue', 'if', 'address_exception', '!=', 'exception_name', ':', 'continue', 'rate', '=', 'rates', '.', 'BY_COUNTRY', '[', 'country_code', ']', '[', "'exceptions'", ']', '[', 'exception_name', ']', 'return', '(', 'rate', ',', 'country_code', ',', 'exception_name', ')', 'return', '(', 'country_default', ',', 'country_code', ',', 'None', ')']
Calculates the VAT rate from the data returned by a GeoLite2 database :param country_code: Two-character country code :param subdivision: The first subdivision name :param city: The city name :param address_country_code: The user's country_code, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :param address_exception: The user's exception name, as detected from billing_address or declared_residence. This prevents an UndefinitiveError from being raised. :raises: ValueError - if country code is not two characers, or subdivision or city are not strings UndefinitiveError - when no address_country_code and address_exception are provided and the geoip2 information is not specific enough :return: A tuple of (Decimal percentage rate, country code, exception name [or None])
['Calculates', 'the', 'VAT', 'rate', 'from', 'the', 'data', 'returned', 'by', 'a', 'GeoLite2', 'database']
train
https://github.com/wbond/vat_moss-python/blob/5089dcf036eb2e9abc58e78186fd46b522a50620/vat_moss/geoip2.py#L17-L99
4,331
pylast/pylast
src/pylast/__init__.py
_Network.scrobble
def scrobble( self, artist, title, timestamp, album=None, album_artist=None, track_number=None, duration=None, stream_id=None, context=None, mbid=None, ): """Used to add a track-play to a user's profile. Parameters: artist (Required) : The artist name. title (Required) : The track name. timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. context (Optional) : Sub-client version (not public, only enabled for certain API keys) stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. duration (Optional) : The length of the track in seconds. """ return self.scrobble_many( ( { "artist": artist, "title": title, "timestamp": timestamp, "album": album, "album_artist": album_artist, "track_number": track_number, "duration": duration, "stream_id": stream_id, "context": context, "mbid": mbid, }, ) )
python
def scrobble( self, artist, title, timestamp, album=None, album_artist=None, track_number=None, duration=None, stream_id=None, context=None, mbid=None, ): """Used to add a track-play to a user's profile. Parameters: artist (Required) : The artist name. title (Required) : The track name. timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. context (Optional) : Sub-client version (not public, only enabled for certain API keys) stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. duration (Optional) : The length of the track in seconds. """ return self.scrobble_many( ( { "artist": artist, "title": title, "timestamp": timestamp, "album": album, "album_artist": album_artist, "track_number": track_number, "duration": duration, "stream_id": stream_id, "context": context, "mbid": mbid, }, ) )
['def', 'scrobble', '(', 'self', ',', 'artist', ',', 'title', ',', 'timestamp', ',', 'album', '=', 'None', ',', 'album_artist', '=', 'None', ',', 'track_number', '=', 'None', ',', 'duration', '=', 'None', ',', 'stream_id', '=', 'None', ',', 'context', '=', 'None', ',', 'mbid', '=', 'None', ',', ')', ':', 'return', 'self', '.', 'scrobble_many', '(', '(', '{', '"artist"', ':', 'artist', ',', '"title"', ':', 'title', ',', '"timestamp"', ':', 'timestamp', ',', '"album"', ':', 'album', ',', '"album_artist"', ':', 'album_artist', ',', '"track_number"', ':', 'track_number', ',', '"duration"', ':', 'duration', ',', '"stream_id"', ':', 'stream_id', ',', '"context"', ':', 'context', ',', '"mbid"', ':', 'mbid', ',', '}', ',', ')', ')']
Used to add a track-play to a user's profile. Parameters: artist (Required) : The artist name. title (Required) : The track name. timestamp (Required) : The time the track started playing, in UNIX timestamp format (integer number of seconds since 00:00:00, January 1st 1970 UTC). This must be in the UTC time zone. album (Optional) : The album name. album_artist (Optional) : The album artist - if this differs from the track artist. context (Optional) : Sub-client version (not public, only enabled for certain API keys) stream_id (Optional) : The stream id for this track received from the radio.getPlaylist service. track_number (Optional) : The track number of the track on the album. mbid (Optional) : The MusicBrainz Track ID. duration (Optional) : The length of the track in seconds.
['Used', 'to', 'add', 'a', 'track', '-', 'play', 'to', 'a', 'user', 's', 'profile', '.']
train
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L568-L618
4,332
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_web.py
get_png_data_url
def get_png_data_url(blob: Optional[bytes]) -> str: """ Converts a PNG blob into a local URL encapsulating the PNG. """ return BASE64_PNG_URL_PREFIX + base64.b64encode(blob).decode('ascii')
python
def get_png_data_url(blob: Optional[bytes]) -> str: """ Converts a PNG blob into a local URL encapsulating the PNG. """ return BASE64_PNG_URL_PREFIX + base64.b64encode(blob).decode('ascii')
['def', 'get_png_data_url', '(', 'blob', ':', 'Optional', '[', 'bytes', ']', ')', '->', 'str', ':', 'return', 'BASE64_PNG_URL_PREFIX', '+', 'base64', '.', 'b64encode', '(', 'blob', ')', '.', 'decode', '(', "'ascii'", ')']
Converts a PNG blob into a local URL encapsulating the PNG.
['Converts', 'a', 'PNG', 'blob', 'into', 'a', 'local', 'URL', 'encapsulating', 'the', 'PNG', '.']
train
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_web.py#L401-L405
4,333
xypnox/email_purifier
epurifier/email_checker.py
EmailPurifier.CheckEmail
def CheckEmail(self, email, checkTypo=False): '''Checks a Single email if it is correct''' contents = email.split('@') if len(contents) == 2: if contents[1] in self.valid: return True return False
python
def CheckEmail(self, email, checkTypo=False): '''Checks a Single email if it is correct''' contents = email.split('@') if len(contents) == 2: if contents[1] in self.valid: return True return False
['def', 'CheckEmail', '(', 'self', ',', 'email', ',', 'checkTypo', '=', 'False', ')', ':', 'contents', '=', 'email', '.', 'split', '(', "'@'", ')', 'if', 'len', '(', 'contents', ')', '==', '2', ':', 'if', 'contents', '[', '1', ']', 'in', 'self', '.', 'valid', ':', 'return', 'True', 'return', 'False']
Checks a Single email if it is correct
['Checks', 'a', 'Single', 'email', 'if', 'it', 'is', 'correct']
train
https://github.com/xypnox/email_purifier/blob/a9ecde9c5293b5c283e0c5b4cf8744c76418fb6f/epurifier/email_checker.py#L38-L44
4,334
T-002/pycast
pycast/methods/exponentialsmoothing.py
HoltMethod._get_parameter_intervals
def _get_parameter_intervals(self): """Returns the intervals for the methods parameter. Only parameters with defined intervals can be used for optimization! :return: Returns a dictionary containing the parameter intervals, using the parameter name as key, while the value hast the following format: [minValue, maxValue, minIntervalClosed, maxIntervalClosed] - minValue Minimal value for the parameter - maxValue Maximal value for the parameter - minIntervalClosed :py:const:`True`, if minValue represents a valid value for the parameter. :py:const:`False` otherwise. - maxIntervalClosed: :py:const:`True`, if maxValue represents a valid value for the parameter. :py:const:`False` otherwise. :rtype: dictionary """ parameterIntervals = {} parameterIntervals["smoothingFactor"] = [0.0, 1.0, False, False] parameterIntervals["trendSmoothingFactor"] = [0.0, 1.0, False, False] return parameterIntervals
python
def _get_parameter_intervals(self): """Returns the intervals for the methods parameter. Only parameters with defined intervals can be used for optimization! :return: Returns a dictionary containing the parameter intervals, using the parameter name as key, while the value hast the following format: [minValue, maxValue, minIntervalClosed, maxIntervalClosed] - minValue Minimal value for the parameter - maxValue Maximal value for the parameter - minIntervalClosed :py:const:`True`, if minValue represents a valid value for the parameter. :py:const:`False` otherwise. - maxIntervalClosed: :py:const:`True`, if maxValue represents a valid value for the parameter. :py:const:`False` otherwise. :rtype: dictionary """ parameterIntervals = {} parameterIntervals["smoothingFactor"] = [0.0, 1.0, False, False] parameterIntervals["trendSmoothingFactor"] = [0.0, 1.0, False, False] return parameterIntervals
['def', '_get_parameter_intervals', '(', 'self', ')', ':', 'parameterIntervals', '=', '{', '}', 'parameterIntervals', '[', '"smoothingFactor"', ']', '=', '[', '0.0', ',', '1.0', ',', 'False', ',', 'False', ']', 'parameterIntervals', '[', '"trendSmoothingFactor"', ']', '=', '[', '0.0', ',', '1.0', ',', 'False', ',', 'False', ']', 'return', 'parameterIntervals']
Returns the intervals for the methods parameter. Only parameters with defined intervals can be used for optimization! :return: Returns a dictionary containing the parameter intervals, using the parameter name as key, while the value hast the following format: [minValue, maxValue, minIntervalClosed, maxIntervalClosed] - minValue Minimal value for the parameter - maxValue Maximal value for the parameter - minIntervalClosed :py:const:`True`, if minValue represents a valid value for the parameter. :py:const:`False` otherwise. - maxIntervalClosed: :py:const:`True`, if maxValue represents a valid value for the parameter. :py:const:`False` otherwise. :rtype: dictionary
['Returns', 'the', 'intervals', 'for', 'the', 'methods', 'parameter', '.']
train
https://github.com/T-002/pycast/blob/8a53505c6d8367e0ea572e8af768e80b29e1cc41/pycast/methods/exponentialsmoothing.py#L178-L204
4,335
senaite/senaite.core
bika/lims/catalog/catalog_utilities.py
setup_catalogs
def setup_catalogs( portal, catalogs_definition={}, force_reindex=False, catalogs_extension={}, force_no_reindex=False): """ Setup the given catalogs. Redefines the map between content types and catalogs and then checks the indexes and metacolumns, if one index/column doesn't exist in the catalog_definition any more it will be removed, otherwise, if a new index/column is found, it will be created. :param portal: The Plone's Portal object :param catalogs_definition: a dictionary with the following structure { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } } :type catalogs_definition: dict :param force_reindex: Force to reindex the catalogs even if there's no need :type force_reindex: bool :param force_no_reindex: Force reindexing NOT to happen. :param catalog_extensions: An extension for the primary catalogs definition Same dict structure as param catalogs_definition. Allows to add columns and indexes required by Bika-specific add-ons. :type catalog_extensions: dict """ # If not given catalogs_definition, use the LIMS one if not catalogs_definition: catalogs_definition = getCatalogDefinitions() # Merge the catalogs definition of the extension with the primary # catalog definition definition = _merge_catalog_definitions(catalogs_definition, catalogs_extension) # Mapping content types in catalogs # This variable will be used to clean reindex the catalog. Saves the # catalogs ids archetype_tool = getToolByName(portal, 'archetype_tool') clean_and_rebuild = _map_content_types(archetype_tool, definition) # Indexing for cat_id in definition.keys(): reindex = False reindex = _setup_catalog( portal, cat_id, definition.get(cat_id, {})) if (reindex or force_reindex) and (cat_id not in clean_and_rebuild): # add the catalog if it has not been added before clean_and_rebuild.append(cat_id) # Reindex the catalogs which needs it if not force_no_reindex: _cleanAndRebuildIfNeeded(portal, clean_and_rebuild) return clean_and_rebuild
python
def setup_catalogs( portal, catalogs_definition={}, force_reindex=False, catalogs_extension={}, force_no_reindex=False): """ Setup the given catalogs. Redefines the map between content types and catalogs and then checks the indexes and metacolumns, if one index/column doesn't exist in the catalog_definition any more it will be removed, otherwise, if a new index/column is found, it will be created. :param portal: The Plone's Portal object :param catalogs_definition: a dictionary with the following structure { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } } :type catalogs_definition: dict :param force_reindex: Force to reindex the catalogs even if there's no need :type force_reindex: bool :param force_no_reindex: Force reindexing NOT to happen. :param catalog_extensions: An extension for the primary catalogs definition Same dict structure as param catalogs_definition. Allows to add columns and indexes required by Bika-specific add-ons. :type catalog_extensions: dict """ # If not given catalogs_definition, use the LIMS one if not catalogs_definition: catalogs_definition = getCatalogDefinitions() # Merge the catalogs definition of the extension with the primary # catalog definition definition = _merge_catalog_definitions(catalogs_definition, catalogs_extension) # Mapping content types in catalogs # This variable will be used to clean reindex the catalog. Saves the # catalogs ids archetype_tool = getToolByName(portal, 'archetype_tool') clean_and_rebuild = _map_content_types(archetype_tool, definition) # Indexing for cat_id in definition.keys(): reindex = False reindex = _setup_catalog( portal, cat_id, definition.get(cat_id, {})) if (reindex or force_reindex) and (cat_id not in clean_and_rebuild): # add the catalog if it has not been added before clean_and_rebuild.append(cat_id) # Reindex the catalogs which needs it if not force_no_reindex: _cleanAndRebuildIfNeeded(portal, clean_and_rebuild) return clean_and_rebuild
['def', 'setup_catalogs', '(', 'portal', ',', 'catalogs_definition', '=', '{', '}', ',', 'force_reindex', '=', 'False', ',', 'catalogs_extension', '=', '{', '}', ',', 'force_no_reindex', '=', 'False', ')', ':', '# If not given catalogs_definition, use the LIMS one', 'if', 'not', 'catalogs_definition', ':', 'catalogs_definition', '=', 'getCatalogDefinitions', '(', ')', '# Merge the catalogs definition of the extension with the primary', '# catalog definition', 'definition', '=', '_merge_catalog_definitions', '(', 'catalogs_definition', ',', 'catalogs_extension', ')', '# Mapping content types in catalogs', '# This variable will be used to clean reindex the catalog. Saves the', '# catalogs ids', 'archetype_tool', '=', 'getToolByName', '(', 'portal', ',', "'archetype_tool'", ')', 'clean_and_rebuild', '=', '_map_content_types', '(', 'archetype_tool', ',', 'definition', ')', '# Indexing', 'for', 'cat_id', 'in', 'definition', '.', 'keys', '(', ')', ':', 'reindex', '=', 'False', 'reindex', '=', '_setup_catalog', '(', 'portal', ',', 'cat_id', ',', 'definition', '.', 'get', '(', 'cat_id', ',', '{', '}', ')', ')', 'if', '(', 'reindex', 'or', 'force_reindex', ')', 'and', '(', 'cat_id', 'not', 'in', 'clean_and_rebuild', ')', ':', '# add the catalog if it has not been added before', 'clean_and_rebuild', '.', 'append', '(', 'cat_id', ')', '# Reindex the catalogs which needs it', 'if', 'not', 'force_no_reindex', ':', '_cleanAndRebuildIfNeeded', '(', 'portal', ',', 'clean_and_rebuild', ')', 'return', 'clean_and_rebuild']
Setup the given catalogs. Redefines the map between content types and catalogs and then checks the indexes and metacolumns, if one index/column doesn't exist in the catalog_definition any more it will be removed, otherwise, if a new index/column is found, it will be created. :param portal: The Plone's Portal object :param catalogs_definition: a dictionary with the following structure { CATALOG_ID: { 'types': ['ContentType', ...], 'indexes': { 'UID': 'FieldIndex', ... }, 'columns': [ 'Title', ... ] } } :type catalogs_definition: dict :param force_reindex: Force to reindex the catalogs even if there's no need :type force_reindex: bool :param force_no_reindex: Force reindexing NOT to happen. :param catalog_extensions: An extension for the primary catalogs definition Same dict structure as param catalogs_definition. Allows to add columns and indexes required by Bika-specific add-ons. :type catalog_extensions: dict
['Setup', 'the', 'given', 'catalogs', '.', 'Redefines', 'the', 'map', 'between', 'content', 'types', 'and', 'catalogs', 'and', 'then', 'checks', 'the', 'indexes', 'and', 'metacolumns', 'if', 'one', 'index', '/', 'column', 'doesn', 't', 'exist', 'in', 'the', 'catalog_definition', 'any', 'more', 'it', 'will', 'be', 'removed', 'otherwise', 'if', 'a', 'new', 'index', '/', 'column', 'is', 'found', 'it', 'will', 'be', 'created', '.']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/catalog/catalog_utilities.py#L78-L137
4,336
Erotemic/utool
utool/util_time.py
get_timestats_str
def get_timestats_str(unixtime_list, newlines=1, full=True, isutc=True): r""" Args: unixtime_list (list): newlines (bool): Returns: str: timestat_str CommandLine: python -m utool.util_time --test-get_timestats_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5] >>> newlines = 1 >>> full = False >>> timestat_str = get_timestats_str(unixtime_list, newlines, full=full, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 04:03:42', 'min' : '1970/01/01 00:00:00', 'range': '5:16:40', 'std' : '2:02:01', } Example2: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5, float('nan'), 0] >>> newlines = 1 >>> timestat_str = get_timestats_str(unixtime_list, newlines, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 03:23:05', 'min' : '1970/01/01 00:00:00', 'nMax' : 1, 'nMin' : 2, 'num_nan': 1, 'range' : '5:16:40', 'shape' : (7,), 'std' : '2:23:43', } """ import utool as ut datetime_stats = get_timestats_dict(unixtime_list, full=full, isutc=isutc) timestat_str = ut.repr4(datetime_stats, newlines=newlines) return timestat_str
python
def get_timestats_str(unixtime_list, newlines=1, full=True, isutc=True): r""" Args: unixtime_list (list): newlines (bool): Returns: str: timestat_str CommandLine: python -m utool.util_time --test-get_timestats_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5] >>> newlines = 1 >>> full = False >>> timestat_str = get_timestats_str(unixtime_list, newlines, full=full, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 04:03:42', 'min' : '1970/01/01 00:00:00', 'range': '5:16:40', 'std' : '2:02:01', } Example2: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5, float('nan'), 0] >>> newlines = 1 >>> timestat_str = get_timestats_str(unixtime_list, newlines, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 03:23:05', 'min' : '1970/01/01 00:00:00', 'nMax' : 1, 'nMin' : 2, 'num_nan': 1, 'range' : '5:16:40', 'shape' : (7,), 'std' : '2:23:43', } """ import utool as ut datetime_stats = get_timestats_dict(unixtime_list, full=full, isutc=isutc) timestat_str = ut.repr4(datetime_stats, newlines=newlines) return timestat_str
['def', 'get_timestats_str', '(', 'unixtime_list', ',', 'newlines', '=', '1', ',', 'full', '=', 'True', ',', 'isutc', '=', 'True', ')', ':', 'import', 'utool', 'as', 'ut', 'datetime_stats', '=', 'get_timestats_dict', '(', 'unixtime_list', ',', 'full', '=', 'full', ',', 'isutc', '=', 'isutc', ')', 'timestat_str', '=', 'ut', '.', 'repr4', '(', 'datetime_stats', ',', 'newlines', '=', 'newlines', ')', 'return', 'timestat_str']
r""" Args: unixtime_list (list): newlines (bool): Returns: str: timestat_str CommandLine: python -m utool.util_time --test-get_timestats_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5] >>> newlines = 1 >>> full = False >>> timestat_str = get_timestats_str(unixtime_list, newlines, full=full, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 04:03:42', 'min' : '1970/01/01 00:00:00', 'range': '5:16:40', 'std' : '2:02:01', } Example2: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> import utool as ut >>> unixtime_list = [0, 0 + 60 * 60 * 5 , 10 + 60 * 60 * 5, 100 + 60 * 60 * 5, 1000 + 60 * 60 * 5, float('nan'), 0] >>> newlines = 1 >>> timestat_str = get_timestats_str(unixtime_list, newlines, isutc=True) >>> result = ut.align(str(timestat_str), ':') >>> print(result) { 'max' : '1970/01/01 05:16:40', 'mean' : '1970/01/01 03:23:05', 'min' : '1970/01/01 00:00:00', 'nMax' : 1, 'nMin' : 2, 'num_nan': 1, 'range' : '5:16:40', 'shape' : (7,), 'std' : '2:23:43', }
['r', 'Args', ':', 'unixtime_list', '(', 'list', ')', ':', 'newlines', '(', 'bool', ')', ':']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_time.py#L1015-L1070
4,337
federico123579/Trading212-API
tradingAPI/api.py
API.checkPos
def checkPos(self): """check all positions""" soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser') poss = [] for label in soup.find_all("tr"): pos_id = label['id'] # init an empty list # check if it already exist pos_list = [x for x in self.positions if x.id == pos_id] if pos_list: # and update it pos = pos_list[0] pos.update(label) else: pos = self.new_pos(label) pos.get_gain() poss.append(pos) # remove old positions self.positions.clear() self.positions.extend(poss) logger.debug("%d positions update" % len(poss)) return self.positions
python
def checkPos(self): """check all positions""" soup = BeautifulSoup(self.css1(path['movs-table']).html, 'html.parser') poss = [] for label in soup.find_all("tr"): pos_id = label['id'] # init an empty list # check if it already exist pos_list = [x for x in self.positions if x.id == pos_id] if pos_list: # and update it pos = pos_list[0] pos.update(label) else: pos = self.new_pos(label) pos.get_gain() poss.append(pos) # remove old positions self.positions.clear() self.positions.extend(poss) logger.debug("%d positions update" % len(poss)) return self.positions
['def', 'checkPos', '(', 'self', ')', ':', 'soup', '=', 'BeautifulSoup', '(', 'self', '.', 'css1', '(', 'path', '[', "'movs-table'", ']', ')', '.', 'html', ',', "'html.parser'", ')', 'poss', '=', '[', ']', 'for', 'label', 'in', 'soup', '.', 'find_all', '(', '"tr"', ')', ':', 'pos_id', '=', 'label', '[', "'id'", ']', '# init an empty list', '# check if it already exist', 'pos_list', '=', '[', 'x', 'for', 'x', 'in', 'self', '.', 'positions', 'if', 'x', '.', 'id', '==', 'pos_id', ']', 'if', 'pos_list', ':', '# and update it', 'pos', '=', 'pos_list', '[', '0', ']', 'pos', '.', 'update', '(', 'label', ')', 'else', ':', 'pos', '=', 'self', '.', 'new_pos', '(', 'label', ')', 'pos', '.', 'get_gain', '(', ')', 'poss', '.', 'append', '(', 'pos', ')', '# remove old positions', 'self', '.', 'positions', '.', 'clear', '(', ')', 'self', '.', 'positions', '.', 'extend', '(', 'poss', ')', 'logger', '.', 'debug', '(', '"%d positions update"', '%', 'len', '(', 'poss', ')', ')', 'return', 'self', '.', 'positions']
check all positions
['check', 'all', 'positions']
train
https://github.com/federico123579/Trading212-API/blob/0fab20b71a2348e72bbe76071b81f3692128851f/tradingAPI/api.py#L76-L97
4,338
bububa/pyTOP
pyTOP/trade.py
Trade.receivetime_delay
def receivetime_delay(self, tid, days, session): '''taobao.trade.receivetime.delay 延长交易收货时间 延长交易收货时间''' request = TOPRequest('taobao.trade.receivetime.delay') request['tid'] = tid request['days'] = days self.create(self.execute(request, session)['trade']) return self
python
def receivetime_delay(self, tid, days, session): '''taobao.trade.receivetime.delay 延长交易收货时间 延长交易收货时间''' request = TOPRequest('taobao.trade.receivetime.delay') request['tid'] = tid request['days'] = days self.create(self.execute(request, session)['trade']) return self
['def', 'receivetime_delay', '(', 'self', ',', 'tid', ',', 'days', ',', 'session', ')', ':', 'request', '=', 'TOPRequest', '(', "'taobao.trade.receivetime.delay'", ')', 'request', '[', "'tid'", ']', '=', 'tid', 'request', '[', "'days'", ']', '=', 'days', 'self', '.', 'create', '(', 'self', '.', 'execute', '(', 'request', ',', 'session', ')', '[', "'trade'", ']', ')', 'return', 'self']
taobao.trade.receivetime.delay 延长交易收货时间 延长交易收货时间
['taobao', '.', 'trade', '.', 'receivetime', '.', 'delay', '延长交易收货时间', '延长交易收货时间']
train
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/trade.py#L215-L223
4,339
gregoil/ipdbugger
ipdbugger/__init__.py
IPDBugger.do_raise
def do_raise(self, arg): """Raise the last exception caught.""" self.do_continue(arg) # Annotating the exception for a continual re-raise _, exc_value, _ = self.exc_info exc_value._ipdbugger_let_raise = True raise_(*self.exc_info)
python
def do_raise(self, arg): """Raise the last exception caught.""" self.do_continue(arg) # Annotating the exception for a continual re-raise _, exc_value, _ = self.exc_info exc_value._ipdbugger_let_raise = True raise_(*self.exc_info)
['def', 'do_raise', '(', 'self', ',', 'arg', ')', ':', 'self', '.', 'do_continue', '(', 'arg', ')', '# Annotating the exception for a continual re-raise', '_', ',', 'exc_value', ',', '_', '=', 'self', '.', 'exc_info', 'exc_value', '.', '_ipdbugger_let_raise', '=', 'True', 'raise_', '(', '*', 'self', '.', 'exc_info', ')']
Raise the last exception caught.
['Raise', 'the', 'last', 'exception', 'caught', '.']
train
https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L48-L56
4,340
google/grr
api_client/python/grr_api_client/hunt.py
HuntBase.CreateApproval
def CreateApproval(self, reason=None, notified_users=None, email_cc_addresses=None): """Create a new approval for the current user to access this hunt.""" if not reason: raise ValueError("reason can't be empty") if not notified_users: raise ValueError("notified_users list can't be empty.") approval = user_pb2.ApiHuntApproval( reason=reason, notified_users=notified_users, email_cc_addresses=email_cc_addresses or []) args = user_pb2.ApiCreateHuntApprovalArgs( hunt_id=self.hunt_id, approval=approval) data = self._context.SendRequest("CreateHuntApproval", args) return HuntApproval( data=data, username=self._context.username, context=self._context)
python
def CreateApproval(self, reason=None, notified_users=None, email_cc_addresses=None): """Create a new approval for the current user to access this hunt.""" if not reason: raise ValueError("reason can't be empty") if not notified_users: raise ValueError("notified_users list can't be empty.") approval = user_pb2.ApiHuntApproval( reason=reason, notified_users=notified_users, email_cc_addresses=email_cc_addresses or []) args = user_pb2.ApiCreateHuntApprovalArgs( hunt_id=self.hunt_id, approval=approval) data = self._context.SendRequest("CreateHuntApproval", args) return HuntApproval( data=data, username=self._context.username, context=self._context)
['def', 'CreateApproval', '(', 'self', ',', 'reason', '=', 'None', ',', 'notified_users', '=', 'None', ',', 'email_cc_addresses', '=', 'None', ')', ':', 'if', 'not', 'reason', ':', 'raise', 'ValueError', '(', '"reason can\'t be empty"', ')', 'if', 'not', 'notified_users', ':', 'raise', 'ValueError', '(', '"notified_users list can\'t be empty."', ')', 'approval', '=', 'user_pb2', '.', 'ApiHuntApproval', '(', 'reason', '=', 'reason', ',', 'notified_users', '=', 'notified_users', ',', 'email_cc_addresses', '=', 'email_cc_addresses', 'or', '[', ']', ')', 'args', '=', 'user_pb2', '.', 'ApiCreateHuntApprovalArgs', '(', 'hunt_id', '=', 'self', '.', 'hunt_id', ',', 'approval', '=', 'approval', ')', 'data', '=', 'self', '.', '_context', '.', 'SendRequest', '(', '"CreateHuntApproval"', ',', 'args', ')', 'return', 'HuntApproval', '(', 'data', '=', 'data', ',', 'username', '=', 'self', '.', '_context', '.', 'username', ',', 'context', '=', 'self', '.', '_context', ')']
Create a new approval for the current user to access this hunt.
['Create', 'a', 'new', 'approval', 'for', 'the', 'current', 'user', 'to', 'access', 'this', 'hunt', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/api_client/python/grr_api_client/hunt.py#L172-L193
4,341
ga4gh/ga4gh-server
ga4gh/server/repo/rnaseq2ga.py
RnaSqliteStore.addRNAQuantification
def addRNAQuantification(self, datafields): """ Adds an RNAQuantification to the db. Datafields is a tuple in the order: id, feature_set_ids, description, name, read_group_ids, programs, biosample_id """ self._rnaValueList.append(datafields) if len(self._rnaValueList) >= self._batchSize: self.batchaddRNAQuantification()
python
def addRNAQuantification(self, datafields): """ Adds an RNAQuantification to the db. Datafields is a tuple in the order: id, feature_set_ids, description, name, read_group_ids, programs, biosample_id """ self._rnaValueList.append(datafields) if len(self._rnaValueList) >= self._batchSize: self.batchaddRNAQuantification()
['def', 'addRNAQuantification', '(', 'self', ',', 'datafields', ')', ':', 'self', '.', '_rnaValueList', '.', 'append', '(', 'datafields', ')', 'if', 'len', '(', 'self', '.', '_rnaValueList', ')', '>=', 'self', '.', '_batchSize', ':', 'self', '.', 'batchaddRNAQuantification', '(', ')']
Adds an RNAQuantification to the db. Datafields is a tuple in the order: id, feature_set_ids, description, name, read_group_ids, programs, biosample_id
['Adds', 'an', 'RNAQuantification', 'to', 'the', 'db', '.', 'Datafields', 'is', 'a', 'tuple', 'in', 'the', 'order', ':', 'id', 'feature_set_ids', 'description', 'name', 'read_group_ids', 'programs', 'biosample_id']
train
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/repo/rnaseq2ga.py#L50-L59
4,342
intake/intake
intake/source/utils.py
reverse_format
def reverse_format(format_string, resolved_string): """ Reverse the string method format. Given format_string and resolved_string, find arguments that would give ``format_string.format(**arguments) == resolved_string`` Parameters ---------- format_string : str Format template string as used with str.format method resolved_string : str String with same pattern as format_string but with fields filled out. Returns ------- args : dict Dict of the form {field_name: value} such that ``format_string.(**args) == resolved_string`` Examples -------- >>> reverse_format('data_{year}_{month}_{day}.csv', 'data_2014_01_03.csv') {'year': '2014', 'month': '01', 'day': '03'} >>> reverse_format('data_{year:d}_{month:d}_{day:d}.csv', 'data_2014_01_03.csv') {'year': 2014, 'month': 1, 'day': 3} >>> reverse_format('data_{date:%Y_%m_%d}.csv', 'data_2016_10_01.csv') {'date': datetime.datetime(2016, 10, 1, 0, 0)} >>> reverse_format('{state:2}{zip:5}', 'PA19104') {'state': 'PA', 'zip': '19104'} See also -------- str.format : method that this reverses reverse_formats : method for reversing a list of strings using one pattern """ from string import Formatter from datetime import datetime fmt = Formatter() args = {} # ensure that format_string is in posix format format_string = make_path_posix(format_string) # split the string into bits literal_texts, field_names, format_specs, conversions = zip(*fmt.parse(format_string)) if not any(field_names): return {} for i, conversion in enumerate(conversions): if conversion: raise ValueError(('Conversion not allowed. Found on {}.' .format(field_names[i]))) # ensure that resolved string is in posix format resolved_string = make_path_posix(resolved_string) # get a list of the parts that matter bits = _get_parts_of_format_string(resolved_string, literal_texts, format_specs) for i, (field_name, format_spec) in enumerate(zip(field_names, format_specs)): if field_name: try: if format_spec.startswith('%'): args[field_name] = datetime.strptime(bits[i], format_spec) elif format_spec[-1] in list('bcdoxX'): args[field_name] = int(bits[i]) elif format_spec[-1] in list('eEfFgGn'): args[field_name] = float(bits[i]) elif format_spec[-1] == '%': args[field_name] = float(bits[i][:-1])/100 else: args[field_name] = fmt.format_field(bits[i], format_spec) except: args[field_name] = bits[i] return args
python
def reverse_format(format_string, resolved_string): """ Reverse the string method format. Given format_string and resolved_string, find arguments that would give ``format_string.format(**arguments) == resolved_string`` Parameters ---------- format_string : str Format template string as used with str.format method resolved_string : str String with same pattern as format_string but with fields filled out. Returns ------- args : dict Dict of the form {field_name: value} such that ``format_string.(**args) == resolved_string`` Examples -------- >>> reverse_format('data_{year}_{month}_{day}.csv', 'data_2014_01_03.csv') {'year': '2014', 'month': '01', 'day': '03'} >>> reverse_format('data_{year:d}_{month:d}_{day:d}.csv', 'data_2014_01_03.csv') {'year': 2014, 'month': 1, 'day': 3} >>> reverse_format('data_{date:%Y_%m_%d}.csv', 'data_2016_10_01.csv') {'date': datetime.datetime(2016, 10, 1, 0, 0)} >>> reverse_format('{state:2}{zip:5}', 'PA19104') {'state': 'PA', 'zip': '19104'} See also -------- str.format : method that this reverses reverse_formats : method for reversing a list of strings using one pattern """ from string import Formatter from datetime import datetime fmt = Formatter() args = {} # ensure that format_string is in posix format format_string = make_path_posix(format_string) # split the string into bits literal_texts, field_names, format_specs, conversions = zip(*fmt.parse(format_string)) if not any(field_names): return {} for i, conversion in enumerate(conversions): if conversion: raise ValueError(('Conversion not allowed. Found on {}.' .format(field_names[i]))) # ensure that resolved string is in posix format resolved_string = make_path_posix(resolved_string) # get a list of the parts that matter bits = _get_parts_of_format_string(resolved_string, literal_texts, format_specs) for i, (field_name, format_spec) in enumerate(zip(field_names, format_specs)): if field_name: try: if format_spec.startswith('%'): args[field_name] = datetime.strptime(bits[i], format_spec) elif format_spec[-1] in list('bcdoxX'): args[field_name] = int(bits[i]) elif format_spec[-1] in list('eEfFgGn'): args[field_name] = float(bits[i]) elif format_spec[-1] == '%': args[field_name] = float(bits[i][:-1])/100 else: args[field_name] = fmt.format_field(bits[i], format_spec) except: args[field_name] = bits[i] return args
['def', 'reverse_format', '(', 'format_string', ',', 'resolved_string', ')', ':', 'from', 'string', 'import', 'Formatter', 'from', 'datetime', 'import', 'datetime', 'fmt', '=', 'Formatter', '(', ')', 'args', '=', '{', '}', '# ensure that format_string is in posix format', 'format_string', '=', 'make_path_posix', '(', 'format_string', ')', '# split the string into bits', 'literal_texts', ',', 'field_names', ',', 'format_specs', ',', 'conversions', '=', 'zip', '(', '*', 'fmt', '.', 'parse', '(', 'format_string', ')', ')', 'if', 'not', 'any', '(', 'field_names', ')', ':', 'return', '{', '}', 'for', 'i', ',', 'conversion', 'in', 'enumerate', '(', 'conversions', ')', ':', 'if', 'conversion', ':', 'raise', 'ValueError', '(', '(', "'Conversion not allowed. Found on {}.'", '.', 'format', '(', 'field_names', '[', 'i', ']', ')', ')', ')', '# ensure that resolved string is in posix format', 'resolved_string', '=', 'make_path_posix', '(', 'resolved_string', ')', '# get a list of the parts that matter', 'bits', '=', '_get_parts_of_format_string', '(', 'resolved_string', ',', 'literal_texts', ',', 'format_specs', ')', 'for', 'i', ',', '(', 'field_name', ',', 'format_spec', ')', 'in', 'enumerate', '(', 'zip', '(', 'field_names', ',', 'format_specs', ')', ')', ':', 'if', 'field_name', ':', 'try', ':', 'if', 'format_spec', '.', 'startswith', '(', "'%'", ')', ':', 'args', '[', 'field_name', ']', '=', 'datetime', '.', 'strptime', '(', 'bits', '[', 'i', ']', ',', 'format_spec', ')', 'elif', 'format_spec', '[', '-', '1', ']', 'in', 'list', '(', "'bcdoxX'", ')', ':', 'args', '[', 'field_name', ']', '=', 'int', '(', 'bits', '[', 'i', ']', ')', 'elif', 'format_spec', '[', '-', '1', ']', 'in', 'list', '(', "'eEfFgGn'", ')', ':', 'args', '[', 'field_name', ']', '=', 'float', '(', 'bits', '[', 'i', ']', ')', 'elif', 'format_spec', '[', '-', '1', ']', '==', "'%'", ':', 'args', '[', 'field_name', ']', '=', 'float', '(', 'bits', '[', 'i', ']', '[', ':', '-', '1', ']', ')', '/', '100', 'else', ':', 'args', '[', 'field_name', ']', '=', 'fmt', '.', 'format_field', '(', 'bits', '[', 'i', ']', ',', 'format_spec', ')', 'except', ':', 'args', '[', 'field_name', ']', '=', 'bits', '[', 'i', ']', 'return', 'args']
Reverse the string method format. Given format_string and resolved_string, find arguments that would give ``format_string.format(**arguments) == resolved_string`` Parameters ---------- format_string : str Format template string as used with str.format method resolved_string : str String with same pattern as format_string but with fields filled out. Returns ------- args : dict Dict of the form {field_name: value} such that ``format_string.(**args) == resolved_string`` Examples -------- >>> reverse_format('data_{year}_{month}_{day}.csv', 'data_2014_01_03.csv') {'year': '2014', 'month': '01', 'day': '03'} >>> reverse_format('data_{year:d}_{month:d}_{day:d}.csv', 'data_2014_01_03.csv') {'year': 2014, 'month': 1, 'day': 3} >>> reverse_format('data_{date:%Y_%m_%d}.csv', 'data_2016_10_01.csv') {'date': datetime.datetime(2016, 10, 1, 0, 0)} >>> reverse_format('{state:2}{zip:5}', 'PA19104') {'state': 'PA', 'zip': '19104'} See also -------- str.format : method that this reverses reverse_formats : method for reversing a list of strings using one pattern
['Reverse', 'the', 'string', 'method', 'format', '.']
train
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/source/utils.py#L134-L213
4,343
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
mxmt
def mxmt(m1, m2): """ Multiply a 3x3 matrix and the transpose of another 3x3 matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmt_c.html :param m1: 3x3 double precision matrix. :type m1: 3x3-Element Array of floats :param m2: 3x3 double precision matrix. :type m2: 3x3-Element Array of floats :return: The product m1 times m2 transpose. :rtype: float """ m1 = stypes.toDoubleMatrix(m1) m2 = stypes.toDoubleMatrix(m2) mout = stypes.emptyDoubleMatrix() libspice.mxmt_c(m1, m2, mout) return stypes.cMatrixToNumpy(mout)
python
def mxmt(m1, m2): """ Multiply a 3x3 matrix and the transpose of another 3x3 matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmt_c.html :param m1: 3x3 double precision matrix. :type m1: 3x3-Element Array of floats :param m2: 3x3 double precision matrix. :type m2: 3x3-Element Array of floats :return: The product m1 times m2 transpose. :rtype: float """ m1 = stypes.toDoubleMatrix(m1) m2 = stypes.toDoubleMatrix(m2) mout = stypes.emptyDoubleMatrix() libspice.mxmt_c(m1, m2, mout) return stypes.cMatrixToNumpy(mout)
['def', 'mxmt', '(', 'm1', ',', 'm2', ')', ':', 'm1', '=', 'stypes', '.', 'toDoubleMatrix', '(', 'm1', ')', 'm2', '=', 'stypes', '.', 'toDoubleMatrix', '(', 'm2', ')', 'mout', '=', 'stypes', '.', 'emptyDoubleMatrix', '(', ')', 'libspice', '.', 'mxmt_c', '(', 'm1', ',', 'm2', ',', 'mout', ')', 'return', 'stypes', '.', 'cMatrixToNumpy', '(', 'mout', ')']
Multiply a 3x3 matrix and the transpose of another 3x3 matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/mxmt_c.html :param m1: 3x3 double precision matrix. :type m1: 3x3-Element Array of floats :param m2: 3x3 double precision matrix. :type m2: 3x3-Element Array of floats :return: The product m1 times m2 transpose. :rtype: float
['Multiply', 'a', '3x3', 'matrix', 'and', 'the', 'transpose', 'of', 'another', '3x3', 'matrix', '.']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L8693-L8710
4,344
ramses-tech/ramses
ramses/utils.py
is_callable_tag
def is_callable_tag(tag): """ Determine whether :tag: is a valid callable string tag. String is assumed to be valid callable if it starts with '{{' and ends with '}}'. :param tag: String name of tag. """ return (isinstance(tag, six.string_types) and tag.strip().startswith('{{') and tag.strip().endswith('}}'))
python
def is_callable_tag(tag): """ Determine whether :tag: is a valid callable string tag. String is assumed to be valid callable if it starts with '{{' and ends with '}}'. :param tag: String name of tag. """ return (isinstance(tag, six.string_types) and tag.strip().startswith('{{') and tag.strip().endswith('}}'))
['def', 'is_callable_tag', '(', 'tag', ')', ':', 'return', '(', 'isinstance', '(', 'tag', ',', 'six', '.', 'string_types', ')', 'and', 'tag', '.', 'strip', '(', ')', '.', 'startswith', '(', "'{{'", ')', 'and', 'tag', '.', 'strip', '(', ')', '.', 'endswith', '(', "'}}'", ')', ')']
Determine whether :tag: is a valid callable string tag. String is assumed to be valid callable if it starts with '{{' and ends with '}}'. :param tag: String name of tag.
['Determine', 'whether', ':', 'tag', ':', 'is', 'a', 'valid', 'callable', 'string', 'tag', '.']
train
https://github.com/ramses-tech/ramses/blob/ea2e1e896325b7256cdf5902309e05fd98e0c14c/ramses/utils.py#L254-L264
4,345
backtrader/backtrader
contrib/utils/influxdb-import.py
InfluxDBTool.write_dataframe_to_idb
def write_dataframe_to_idb(self, ticker): """Write Pandas Dataframe to InfluxDB database""" cachepath = self._cache cachefile = ('%s/%s-1M.csv.gz' % (cachepath, ticker)) if not os.path.exists(cachefile): log.warn('Import file does not exist: %s' % (cachefile)) return df = pd.read_csv(cachefile, compression='infer', header=0, infer_datetime_format=True) df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time']) df = df.set_index('Datetime') df = df.drop(['Date', 'Time'], axis=1) try: self.dfdb.write_points(df, ticker) except InfluxDBClientError as err: log.error('Write to database failed: %s' % err)
python
def write_dataframe_to_idb(self, ticker): """Write Pandas Dataframe to InfluxDB database""" cachepath = self._cache cachefile = ('%s/%s-1M.csv.gz' % (cachepath, ticker)) if not os.path.exists(cachefile): log.warn('Import file does not exist: %s' % (cachefile)) return df = pd.read_csv(cachefile, compression='infer', header=0, infer_datetime_format=True) df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time']) df = df.set_index('Datetime') df = df.drop(['Date', 'Time'], axis=1) try: self.dfdb.write_points(df, ticker) except InfluxDBClientError as err: log.error('Write to database failed: %s' % err)
['def', 'write_dataframe_to_idb', '(', 'self', ',', 'ticker', ')', ':', 'cachepath', '=', 'self', '.', '_cache', 'cachefile', '=', '(', "'%s/%s-1M.csv.gz'", '%', '(', 'cachepath', ',', 'ticker', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'cachefile', ')', ':', 'log', '.', 'warn', '(', "'Import file does not exist: %s'", '%', '(', 'cachefile', ')', ')', 'return', 'df', '=', 'pd', '.', 'read_csv', '(', 'cachefile', ',', 'compression', '=', "'infer'", ',', 'header', '=', '0', ',', 'infer_datetime_format', '=', 'True', ')', 'df', '[', "'Datetime'", ']', '=', 'pd', '.', 'to_datetime', '(', 'df', '[', "'Date'", ']', '+', "' '", '+', 'df', '[', "'Time'", ']', ')', 'df', '=', 'df', '.', 'set_index', '(', "'Datetime'", ')', 'df', '=', 'df', '.', 'drop', '(', '[', "'Date'", ',', "'Time'", ']', ',', 'axis', '=', '1', ')', 'try', ':', 'self', '.', 'dfdb', '.', 'write_points', '(', 'df', ',', 'ticker', ')', 'except', 'InfluxDBClientError', 'as', 'err', ':', 'log', '.', 'error', '(', "'Write to database failed: %s'", '%', 'err', ')']
Write Pandas Dataframe to InfluxDB database
['Write', 'Pandas', 'Dataframe', 'to', 'InfluxDB', 'database']
train
https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/influxdb-import.py#L29-L49
4,346
openstack/networking-cisco
networking_cisco/ml2_drivers/nexus/nexus_db_v2.py
get_nve_vni_switch_bindings
def get_nve_vni_switch_bindings(vni, switch_ip): """Return the nexus nve binding(s) per switch.""" LOG.debug("get_nve_vni_switch_bindings() called") session = bc.get_reader_session() try: return (session.query(nexus_models_v2.NexusNVEBinding). filter_by(vni=vni, switch_ip=switch_ip).all()) except sa_exc.NoResultFound: return None
python
def get_nve_vni_switch_bindings(vni, switch_ip): """Return the nexus nve binding(s) per switch.""" LOG.debug("get_nve_vni_switch_bindings() called") session = bc.get_reader_session() try: return (session.query(nexus_models_v2.NexusNVEBinding). filter_by(vni=vni, switch_ip=switch_ip).all()) except sa_exc.NoResultFound: return None
['def', 'get_nve_vni_switch_bindings', '(', 'vni', ',', 'switch_ip', ')', ':', 'LOG', '.', 'debug', '(', '"get_nve_vni_switch_bindings() called"', ')', 'session', '=', 'bc', '.', 'get_reader_session', '(', ')', 'try', ':', 'return', '(', 'session', '.', 'query', '(', 'nexus_models_v2', '.', 'NexusNVEBinding', ')', '.', 'filter_by', '(', 'vni', '=', 'vni', ',', 'switch_ip', '=', 'switch_ip', ')', '.', 'all', '(', ')', ')', 'except', 'sa_exc', '.', 'NoResultFound', ':', 'return', 'None']
Return the nexus nve binding(s) per switch.
['Return', 'the', 'nexus', 'nve', 'binding', '(', 's', ')', 'per', 'switch', '.']
train
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/ml2_drivers/nexus/nexus_db_v2.py#L399-L407
4,347
dj-stripe/dj-stripe
djstripe/models/billing.py
Invoice.retry
def retry(self): """ Retry payment on this invoice if it isn't paid, closed, or forgiven.""" if not self.paid and not self.forgiven and not self.closed: stripe_invoice = self.api_retrieve() updated_stripe_invoice = ( stripe_invoice.pay() ) # pay() throws an exception if the charge is not successful. type(self).sync_from_stripe_data(updated_stripe_invoice) return True return False
python
def retry(self): """ Retry payment on this invoice if it isn't paid, closed, or forgiven.""" if not self.paid and not self.forgiven and not self.closed: stripe_invoice = self.api_retrieve() updated_stripe_invoice = ( stripe_invoice.pay() ) # pay() throws an exception if the charge is not successful. type(self).sync_from_stripe_data(updated_stripe_invoice) return True return False
['def', 'retry', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'paid', 'and', 'not', 'self', '.', 'forgiven', 'and', 'not', 'self', '.', 'closed', ':', 'stripe_invoice', '=', 'self', '.', 'api_retrieve', '(', ')', 'updated_stripe_invoice', '=', '(', 'stripe_invoice', '.', 'pay', '(', ')', ')', '# pay() throws an exception if the charge is not successful.', 'type', '(', 'self', ')', '.', 'sync_from_stripe_data', '(', 'updated_stripe_invoice', ')', 'return', 'True', 'return', 'False']
Retry payment on this invoice if it isn't paid, closed, or forgiven.
['Retry', 'payment', 'on', 'this', 'invoice', 'if', 'it', 'isn', 't', 'paid', 'closed', 'or', 'forgiven', '.']
train
https://github.com/dj-stripe/dj-stripe/blob/a5308a3808cd6e2baba49482f7a699f3a8992518/djstripe/models/billing.py#L437-L447
4,348
sdispater/cachy
cachy/tagged_cache.py
TaggedCache.add
def add(self, key, val, minutes): """ Store an item in the cache if it does not exist. :param key: The cache key :type key: str :param val: The cache value :type val: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int|datetime :rtype: bool """ if not self.has(key): self.put(key, val, minutes) return True return False
python
def add(self, key, val, minutes): """ Store an item in the cache if it does not exist. :param key: The cache key :type key: str :param val: The cache value :type val: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int|datetime :rtype: bool """ if not self.has(key): self.put(key, val, minutes) return True return False
['def', 'add', '(', 'self', ',', 'key', ',', 'val', ',', 'minutes', ')', ':', 'if', 'not', 'self', '.', 'has', '(', 'key', ')', ':', 'self', '.', 'put', '(', 'key', ',', 'val', ',', 'minutes', ')', 'return', 'True', 'return', 'False']
Store an item in the cache if it does not exist. :param key: The cache key :type key: str :param val: The cache value :type val: mixed :param minutes: The lifetime in minutes of the cached value :type minutes: int|datetime :rtype: bool
['Store', 'an', 'item', 'in', 'the', 'cache', 'if', 'it', 'does', 'not', 'exist', '.']
train
https://github.com/sdispater/cachy/blob/ee4b044d6aafa80125730a00b1f679a7bd852b8a/cachy/tagged_cache.py#L74-L94
4,349
calmjs/calmjs
src/calmjs/toolchain.py
Toolchain.compile_bundle_entry
def compile_bundle_entry(self, spec, entry): """ Handler for each entry for the bundle method of the compile process. This copies the source file or directory into the build directory. """ modname, source, target, modpath = entry bundled_modpath = {modname: modpath} bundled_target = {modname: target} export_module_name = [] if isfile(source): export_module_name.append(modname) copy_target = join(spec[BUILD_DIR], target) if not exists(dirname(copy_target)): makedirs(dirname(copy_target)) shutil.copy(source, copy_target) elif isdir(source): copy_target = join(spec[BUILD_DIR], modname) shutil.copytree(source, copy_target) return bundled_modpath, bundled_target, export_module_name
python
def compile_bundle_entry(self, spec, entry): """ Handler for each entry for the bundle method of the compile process. This copies the source file or directory into the build directory. """ modname, source, target, modpath = entry bundled_modpath = {modname: modpath} bundled_target = {modname: target} export_module_name = [] if isfile(source): export_module_name.append(modname) copy_target = join(spec[BUILD_DIR], target) if not exists(dirname(copy_target)): makedirs(dirname(copy_target)) shutil.copy(source, copy_target) elif isdir(source): copy_target = join(spec[BUILD_DIR], modname) shutil.copytree(source, copy_target) return bundled_modpath, bundled_target, export_module_name
['def', 'compile_bundle_entry', '(', 'self', ',', 'spec', ',', 'entry', ')', ':', 'modname', ',', 'source', ',', 'target', ',', 'modpath', '=', 'entry', 'bundled_modpath', '=', '{', 'modname', ':', 'modpath', '}', 'bundled_target', '=', '{', 'modname', ':', 'target', '}', 'export_module_name', '=', '[', ']', 'if', 'isfile', '(', 'source', ')', ':', 'export_module_name', '.', 'append', '(', 'modname', ')', 'copy_target', '=', 'join', '(', 'spec', '[', 'BUILD_DIR', ']', ',', 'target', ')', 'if', 'not', 'exists', '(', 'dirname', '(', 'copy_target', ')', ')', ':', 'makedirs', '(', 'dirname', '(', 'copy_target', ')', ')', 'shutil', '.', 'copy', '(', 'source', ',', 'copy_target', ')', 'elif', 'isdir', '(', 'source', ')', ':', 'copy_target', '=', 'join', '(', 'spec', '[', 'BUILD_DIR', ']', ',', 'modname', ')', 'shutil', '.', 'copytree', '(', 'source', ',', 'copy_target', ')', 'return', 'bundled_modpath', ',', 'bundled_target', ',', 'export_module_name']
Handler for each entry for the bundle method of the compile process. This copies the source file or directory into the build directory.
['Handler', 'for', 'each', 'entry', 'for', 'the', 'bundle', 'method', 'of', 'the', 'compile', 'process', '.', 'This', 'copies', 'the', 'source', 'file', 'or', 'directory', 'into', 'the', 'build', 'directory', '.']
train
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/toolchain.py#L1202-L1223
4,350
openstack/horizon
openstack_dashboard/api/keystone.py
remove_group_roles
def remove_group_roles(request, group, domain=None, project=None): """Removes all roles from a group on a domain or project.""" client = keystoneclient(request, admin=True) roles = client.roles.list(group=group, domain=domain, project=project) for role in roles: remove_group_role(request, role=role.id, group=group, domain=domain, project=project)
python
def remove_group_roles(request, group, domain=None, project=None): """Removes all roles from a group on a domain or project.""" client = keystoneclient(request, admin=True) roles = client.roles.list(group=group, domain=domain, project=project) for role in roles: remove_group_role(request, role=role.id, group=group, domain=domain, project=project)
['def', 'remove_group_roles', '(', 'request', ',', 'group', ',', 'domain', '=', 'None', ',', 'project', '=', 'None', ')', ':', 'client', '=', 'keystoneclient', '(', 'request', ',', 'admin', '=', 'True', ')', 'roles', '=', 'client', '.', 'roles', '.', 'list', '(', 'group', '=', 'group', ',', 'domain', '=', 'domain', ',', 'project', '=', 'project', ')', 'for', 'role', 'in', 'roles', ':', 'remove_group_role', '(', 'request', ',', 'role', '=', 'role', '.', 'id', ',', 'group', '=', 'group', ',', 'domain', '=', 'domain', ',', 'project', '=', 'project', ')']
Removes all roles from a group on a domain or project.
['Removes', 'all', 'roles', 'from', 'a', 'group', 'on', 'a', 'domain', 'or', 'project', '.']
train
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/keystone.py#L886-L892
4,351
wright-group/WrightTools
WrightTools/data/_channel.py
Channel.normalize
def normalize(self, mag=1.): """Normalize a Channel, set `null` to 0 and the mag to given value. Parameters ---------- mag : float (optional) New value of mag. Default is 1. """ def f(dataset, s, null, mag): dataset[s] -= null dataset[s] /= mag if self.signed: mag = self.mag() / mag else: mag = self.max() / mag self.chunkwise(f, null=self.null, mag=mag) self._null = 0
python
def normalize(self, mag=1.): """Normalize a Channel, set `null` to 0 and the mag to given value. Parameters ---------- mag : float (optional) New value of mag. Default is 1. """ def f(dataset, s, null, mag): dataset[s] -= null dataset[s] /= mag if self.signed: mag = self.mag() / mag else: mag = self.max() / mag self.chunkwise(f, null=self.null, mag=mag) self._null = 0
['def', 'normalize', '(', 'self', ',', 'mag', '=', '1.', ')', ':', 'def', 'f', '(', 'dataset', ',', 's', ',', 'null', ',', 'mag', ')', ':', 'dataset', '[', 's', ']', '-=', 'null', 'dataset', '[', 's', ']', '/=', 'mag', 'if', 'self', '.', 'signed', ':', 'mag', '=', 'self', '.', 'mag', '(', ')', '/', 'mag', 'else', ':', 'mag', '=', 'self', '.', 'max', '(', ')', '/', 'mag', 'self', '.', 'chunkwise', '(', 'f', ',', 'null', '=', 'self', '.', 'null', ',', 'mag', '=', 'mag', ')', 'self', '.', '_null', '=', '0']
Normalize a Channel, set `null` to 0 and the mag to given value. Parameters ---------- mag : float (optional) New value of mag. Default is 1.
['Normalize', 'a', 'Channel', 'set', 'null', 'to', '0', 'and', 'the', 'mag', 'to', 'given', 'value', '.']
train
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_channel.py#L115-L133
4,352
syrusakbary/promise
promise/dataloader.py
DataLoader.load_many
def load_many(self, keys): # type: (Iterable[Hashable]) -> Promise """ Loads multiple keys, promising an array of values >>> a, b = await my_loader.load_many([ 'a', 'b' ]) This is equivalent to the more verbose: >>> a, b = await Promise.all([ >>> my_loader.load('a'), >>> my_loader.load('b') >>> ]) """ if not isinstance(keys, Iterable): raise TypeError( ( "The loader.loadMany() function must be called with Array<key> " + "but got: {}." ).format(keys) ) return Promise.all([self.load(key) for key in keys])
python
def load_many(self, keys): # type: (Iterable[Hashable]) -> Promise """ Loads multiple keys, promising an array of values >>> a, b = await my_loader.load_many([ 'a', 'b' ]) This is equivalent to the more verbose: >>> a, b = await Promise.all([ >>> my_loader.load('a'), >>> my_loader.load('b') >>> ]) """ if not isinstance(keys, Iterable): raise TypeError( ( "The loader.loadMany() function must be called with Array<key> " + "but got: {}." ).format(keys) ) return Promise.all([self.load(key) for key in keys])
['def', 'load_many', '(', 'self', ',', 'keys', ')', ':', '# type: (Iterable[Hashable]) -> Promise', 'if', 'not', 'isinstance', '(', 'keys', ',', 'Iterable', ')', ':', 'raise', 'TypeError', '(', '(', '"The loader.loadMany() function must be called with Array<key> "', '+', '"but got: {}."', ')', '.', 'format', '(', 'keys', ')', ')', 'return', 'Promise', '.', 'all', '(', '[', 'self', '.', 'load', '(', 'key', ')', 'for', 'key', 'in', 'keys', ']', ')']
Loads multiple keys, promising an array of values >>> a, b = await my_loader.load_many([ 'a', 'b' ]) This is equivalent to the more verbose: >>> a, b = await Promise.all([ >>> my_loader.load('a'), >>> my_loader.load('b') >>> ])
['Loads', 'multiple', 'keys', 'promising', 'an', 'array', 'of', 'values']
train
https://github.com/syrusakbary/promise/blob/d80d791fcc86c89713dac57b55e56c0a9024f153/promise/dataloader.py#L126-L148
4,353
saltstack/salt
salt/modules/pcs.py
resource_show
def resource_show(resource_id, extra_args=None, cibfile=None): ''' Show a resource via pcs command resource_id name of the resource extra_args additional options for the pcs command cibfile use cibfile instead of the live CIB CLI Example: .. code-block:: bash salt '*' pcs.resource_show resource_id='galera' cibfile='/tmp/cib_for_galera.cib' ''' return item_show(item='resource', item_id=resource_id, extra_args=extra_args, cibfile=cibfile)
python
def resource_show(resource_id, extra_args=None, cibfile=None): ''' Show a resource via pcs command resource_id name of the resource extra_args additional options for the pcs command cibfile use cibfile instead of the live CIB CLI Example: .. code-block:: bash salt '*' pcs.resource_show resource_id='galera' cibfile='/tmp/cib_for_galera.cib' ''' return item_show(item='resource', item_id=resource_id, extra_args=extra_args, cibfile=cibfile)
['def', 'resource_show', '(', 'resource_id', ',', 'extra_args', '=', 'None', ',', 'cibfile', '=', 'None', ')', ':', 'return', 'item_show', '(', 'item', '=', "'resource'", ',', 'item_id', '=', 'resource_id', ',', 'extra_args', '=', 'extra_args', ',', 'cibfile', '=', 'cibfile', ')']
Show a resource via pcs command resource_id name of the resource extra_args additional options for the pcs command cibfile use cibfile instead of the live CIB CLI Example: .. code-block:: bash salt '*' pcs.resource_show resource_id='galera' cibfile='/tmp/cib_for_galera.cib'
['Show', 'a', 'resource', 'via', 'pcs', 'command']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/pcs.py#L400-L417
4,354
dmlc/gluon-nlp
src/gluonnlp/data/transforms.py
BERTTokenizer._tokenize_wordpiece
def _tokenize_wordpiece(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in self.basic_tokenizer._whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.vocab.unknown_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.vocab.unknown_token) else: output_tokens.extend(sub_tokens) return output_tokens
python
def _tokenize_wordpiece(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in self.basic_tokenizer._whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.vocab.unknown_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = ''.join(chars[start:end]) if start > 0: substr = '##' + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.vocab.unknown_token) else: output_tokens.extend(sub_tokens) return output_tokens
['def', '_tokenize_wordpiece', '(', 'self', ',', 'text', ')', ':', 'output_tokens', '=', '[', ']', 'for', 'token', 'in', 'self', '.', 'basic_tokenizer', '.', '_whitespace_tokenize', '(', 'text', ')', ':', 'chars', '=', 'list', '(', 'token', ')', 'if', 'len', '(', 'chars', ')', '>', 'self', '.', 'max_input_chars_per_word', ':', 'output_tokens', '.', 'append', '(', 'self', '.', 'vocab', '.', 'unknown_token', ')', 'continue', 'is_bad', '=', 'False', 'start', '=', '0', 'sub_tokens', '=', '[', ']', 'while', 'start', '<', 'len', '(', 'chars', ')', ':', 'end', '=', 'len', '(', 'chars', ')', 'cur_substr', '=', 'None', 'while', 'start', '<', 'end', ':', 'substr', '=', "''", '.', 'join', '(', 'chars', '[', 'start', ':', 'end', ']', ')', 'if', 'start', '>', '0', ':', 'substr', '=', "'##'", '+', 'substr', 'if', 'substr', 'in', 'self', '.', 'vocab', ':', 'cur_substr', '=', 'substr', 'break', 'end', '-=', '1', 'if', 'cur_substr', 'is', 'None', ':', 'is_bad', '=', 'True', 'break', 'sub_tokens', '.', 'append', '(', 'cur_substr', ')', 'start', '=', 'end', 'if', 'is_bad', ':', 'output_tokens', '.', 'append', '(', 'self', '.', 'vocab', '.', 'unknown_token', ')', 'else', ':', 'output_tokens', '.', 'extend', '(', 'sub_tokens', ')', 'return', 'output_tokens']
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BERTBasicTokenizer. Returns: A list of wordpiece tokens.
['Tokenizes', 'a', 'piece', 'of', 'text', 'into', 'its', 'word', 'pieces', '.']
train
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/transforms.py#L960-L1007
4,355
pgmpy/pgmpy
pgmpy/readwrite/ProbModelXML.py
ProbModelXMLReader.get_model
def get_model(self): """ Returns the model instance of the ProbModel. Return --------------- model: an instance of BayesianModel. Examples ------- >>> reader = ProbModelXMLReader() >>> reader.get_model() """ if self.probnet.get('type') == "BayesianNetwork": model = BayesianModel() model.add_nodes_from(self.probnet['Variables'].keys()) model.add_edges_from(self.probnet['edges'].keys()) tabular_cpds = [] cpds = self.probnet['Potentials'] for cpd in cpds: var = list(cpd['Variables'].keys())[0] states = self.probnet['Variables'][var]['States'] evidence = cpd['Variables'][var] evidence_card = [len(self.probnet['Variables'][evidence_var]['States']) for evidence_var in evidence] arr = list(map(float, cpd['Values'].split())) values = np.array(arr) values = values.reshape((len(states), values.size//len(states))) tabular_cpds.append(TabularCPD(var, len(states), values, evidence, evidence_card)) model.add_cpds(*tabular_cpds) variables = model.nodes() for var in variables: for prop_name, prop_value in self.probnet['Variables'][var].items(): model.node[var][prop_name] = prop_value edges = model.edges() if nx.__version__.startswith('1'): for edge in edges: for prop_name, prop_value in self.probnet['edges'][edge].items(): model.edge[edge[0]][edge[1]][prop_name] = prop_value else: for edge in edges: for prop_name, prop_value in self.probnet['edges'][edge].items(): model.adj[edge[0]][edge[1]][prop_name] = prop_value return model else: raise ValueError("Please specify only Bayesian Network.")
python
def get_model(self): """ Returns the model instance of the ProbModel. Return --------------- model: an instance of BayesianModel. Examples ------- >>> reader = ProbModelXMLReader() >>> reader.get_model() """ if self.probnet.get('type') == "BayesianNetwork": model = BayesianModel() model.add_nodes_from(self.probnet['Variables'].keys()) model.add_edges_from(self.probnet['edges'].keys()) tabular_cpds = [] cpds = self.probnet['Potentials'] for cpd in cpds: var = list(cpd['Variables'].keys())[0] states = self.probnet['Variables'][var]['States'] evidence = cpd['Variables'][var] evidence_card = [len(self.probnet['Variables'][evidence_var]['States']) for evidence_var in evidence] arr = list(map(float, cpd['Values'].split())) values = np.array(arr) values = values.reshape((len(states), values.size//len(states))) tabular_cpds.append(TabularCPD(var, len(states), values, evidence, evidence_card)) model.add_cpds(*tabular_cpds) variables = model.nodes() for var in variables: for prop_name, prop_value in self.probnet['Variables'][var].items(): model.node[var][prop_name] = prop_value edges = model.edges() if nx.__version__.startswith('1'): for edge in edges: for prop_name, prop_value in self.probnet['edges'][edge].items(): model.edge[edge[0]][edge[1]][prop_name] = prop_value else: for edge in edges: for prop_name, prop_value in self.probnet['edges'][edge].items(): model.adj[edge[0]][edge[1]][prop_name] = prop_value return model else: raise ValueError("Please specify only Bayesian Network.")
['def', 'get_model', '(', 'self', ')', ':', 'if', 'self', '.', 'probnet', '.', 'get', '(', "'type'", ')', '==', '"BayesianNetwork"', ':', 'model', '=', 'BayesianModel', '(', ')', 'model', '.', 'add_nodes_from', '(', 'self', '.', 'probnet', '[', "'Variables'", ']', '.', 'keys', '(', ')', ')', 'model', '.', 'add_edges_from', '(', 'self', '.', 'probnet', '[', "'edges'", ']', '.', 'keys', '(', ')', ')', 'tabular_cpds', '=', '[', ']', 'cpds', '=', 'self', '.', 'probnet', '[', "'Potentials'", ']', 'for', 'cpd', 'in', 'cpds', ':', 'var', '=', 'list', '(', 'cpd', '[', "'Variables'", ']', '.', 'keys', '(', ')', ')', '[', '0', ']', 'states', '=', 'self', '.', 'probnet', '[', "'Variables'", ']', '[', 'var', ']', '[', "'States'", ']', 'evidence', '=', 'cpd', '[', "'Variables'", ']', '[', 'var', ']', 'evidence_card', '=', '[', 'len', '(', 'self', '.', 'probnet', '[', "'Variables'", ']', '[', 'evidence_var', ']', '[', "'States'", ']', ')', 'for', 'evidence_var', 'in', 'evidence', ']', 'arr', '=', 'list', '(', 'map', '(', 'float', ',', 'cpd', '[', "'Values'", ']', '.', 'split', '(', ')', ')', ')', 'values', '=', 'np', '.', 'array', '(', 'arr', ')', 'values', '=', 'values', '.', 'reshape', '(', '(', 'len', '(', 'states', ')', ',', 'values', '.', 'size', '//', 'len', '(', 'states', ')', ')', ')', 'tabular_cpds', '.', 'append', '(', 'TabularCPD', '(', 'var', ',', 'len', '(', 'states', ')', ',', 'values', ',', 'evidence', ',', 'evidence_card', ')', ')', 'model', '.', 'add_cpds', '(', '*', 'tabular_cpds', ')', 'variables', '=', 'model', '.', 'nodes', '(', ')', 'for', 'var', 'in', 'variables', ':', 'for', 'prop_name', ',', 'prop_value', 'in', 'self', '.', 'probnet', '[', "'Variables'", ']', '[', 'var', ']', '.', 'items', '(', ')', ':', 'model', '.', 'node', '[', 'var', ']', '[', 'prop_name', ']', '=', 'prop_value', 'edges', '=', 'model', '.', 'edges', '(', ')', 'if', 'nx', '.', '__version__', '.', 'startswith', '(', "'1'", ')', ':', 'for', 'edge', 'in', 'edges', ':', 'for', 'prop_name', ',', 'prop_value', 'in', 'self', '.', 'probnet', '[', "'edges'", ']', '[', 'edge', ']', '.', 'items', '(', ')', ':', 'model', '.', 'edge', '[', 'edge', '[', '0', ']', ']', '[', 'edge', '[', '1', ']', ']', '[', 'prop_name', ']', '=', 'prop_value', 'else', ':', 'for', 'edge', 'in', 'edges', ':', 'for', 'prop_name', ',', 'prop_value', 'in', 'self', '.', 'probnet', '[', "'edges'", ']', '[', 'edge', ']', '.', 'items', '(', ')', ':', 'model', '.', 'adj', '[', 'edge', '[', '0', ']', ']', '[', 'edge', '[', '1', ']', ']', '[', 'prop_name', ']', '=', 'prop_value', 'return', 'model', 'else', ':', 'raise', 'ValueError', '(', '"Please specify only Bayesian Network."', ')']
Returns the model instance of the ProbModel. Return --------------- model: an instance of BayesianModel. Examples ------- >>> reader = ProbModelXMLReader() >>> reader.get_model()
['Returns', 'the', 'model', 'instance', 'of', 'the', 'ProbModel', '.']
train
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/ProbModelXML.py#L1019-L1068
4,356
manns/pyspread
pyspread/src/actions/_main_window_actions.py
PrintActions.print_preview
def print_preview(self, print_area, print_data): """Launch print preview""" if cairo is None: return print_info = \ self.main_window.interfaces.get_cairo_export_info("Print") if print_info is None: # Dialog has been canceled return printout_preview = Printout(self.grid, print_data, print_info) printout_printing = Printout(self.grid, print_data, print_info) preview = wx.PrintPreview(printout_preview, printout_printing, print_data) if not preview.Ok(): # Printout preview failed return pfrm = wx.PreviewFrame(preview, self.main_window, _("Print preview")) pfrm.Initialize() pfrm.SetPosition(self.main_window.GetPosition()) pfrm.SetSize(self.main_window.GetSize()) pfrm.Show(True)
python
def print_preview(self, print_area, print_data): """Launch print preview""" if cairo is None: return print_info = \ self.main_window.interfaces.get_cairo_export_info("Print") if print_info is None: # Dialog has been canceled return printout_preview = Printout(self.grid, print_data, print_info) printout_printing = Printout(self.grid, print_data, print_info) preview = wx.PrintPreview(printout_preview, printout_printing, print_data) if not preview.Ok(): # Printout preview failed return pfrm = wx.PreviewFrame(preview, self.main_window, _("Print preview")) pfrm.Initialize() pfrm.SetPosition(self.main_window.GetPosition()) pfrm.SetSize(self.main_window.GetSize()) pfrm.Show(True)
['def', 'print_preview', '(', 'self', ',', 'print_area', ',', 'print_data', ')', ':', 'if', 'cairo', 'is', 'None', ':', 'return', 'print_info', '=', 'self', '.', 'main_window', '.', 'interfaces', '.', 'get_cairo_export_info', '(', '"Print"', ')', 'if', 'print_info', 'is', 'None', ':', '# Dialog has been canceled', 'return', 'printout_preview', '=', 'Printout', '(', 'self', '.', 'grid', ',', 'print_data', ',', 'print_info', ')', 'printout_printing', '=', 'Printout', '(', 'self', '.', 'grid', ',', 'print_data', ',', 'print_info', ')', 'preview', '=', 'wx', '.', 'PrintPreview', '(', 'printout_preview', ',', 'printout_printing', ',', 'print_data', ')', 'if', 'not', 'preview', '.', 'Ok', '(', ')', ':', '# Printout preview failed', 'return', 'pfrm', '=', 'wx', '.', 'PreviewFrame', '(', 'preview', ',', 'self', '.', 'main_window', ',', '_', '(', '"Print preview"', ')', ')', 'pfrm', '.', 'Initialize', '(', ')', 'pfrm', '.', 'SetPosition', '(', 'self', '.', 'main_window', '.', 'GetPosition', '(', ')', ')', 'pfrm', '.', 'SetSize', '(', 'self', '.', 'main_window', '.', 'GetSize', '(', ')', ')', 'pfrm', '.', 'Show', '(', 'True', ')']
Launch print preview
['Launch', 'print', 'preview']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_main_window_actions.py#L325-L353
4,357
dslackw/slpkg
slpkg/main.py
ArgParse.bin_remove
def bin_remove(self): """Remove Slackware packages """ packages = self.args[1:] options = [ "-r", "--removepkg" ] additional_options = [ "--deps", "--check-deps", "--tag", "--checklist" ] flag, extra = "", [] flags = [ "-warn", "-preserve", "-copy", "-keep" ] # merge --check-deps and --deps options if (additional_options[1] in self.args and additional_options[0] not in self.args): self.args.append(additional_options[0]) if len(self.args) > 1 and self.args[0] in options: for additional in additional_options: if additional in self.args: extra.append(additional) self.args.remove(additional) packages = self.args[1:] for fl in flags: if fl in self.args: flag = self.args[1] packages = self.args[2:] PackageManager(packages).remove(flag, extra) else: usage("")
python
def bin_remove(self): """Remove Slackware packages """ packages = self.args[1:] options = [ "-r", "--removepkg" ] additional_options = [ "--deps", "--check-deps", "--tag", "--checklist" ] flag, extra = "", [] flags = [ "-warn", "-preserve", "-copy", "-keep" ] # merge --check-deps and --deps options if (additional_options[1] in self.args and additional_options[0] not in self.args): self.args.append(additional_options[0]) if len(self.args) > 1 and self.args[0] in options: for additional in additional_options: if additional in self.args: extra.append(additional) self.args.remove(additional) packages = self.args[1:] for fl in flags: if fl in self.args: flag = self.args[1] packages = self.args[2:] PackageManager(packages).remove(flag, extra) else: usage("")
['def', 'bin_remove', '(', 'self', ')', ':', 'packages', '=', 'self', '.', 'args', '[', '1', ':', ']', 'options', '=', '[', '"-r"', ',', '"--removepkg"', ']', 'additional_options', '=', '[', '"--deps"', ',', '"--check-deps"', ',', '"--tag"', ',', '"--checklist"', ']', 'flag', ',', 'extra', '=', '""', ',', '[', ']', 'flags', '=', '[', '"-warn"', ',', '"-preserve"', ',', '"-copy"', ',', '"-keep"', ']', '# merge --check-deps and --deps options', 'if', '(', 'additional_options', '[', '1', ']', 'in', 'self', '.', 'args', 'and', 'additional_options', '[', '0', ']', 'not', 'in', 'self', '.', 'args', ')', ':', 'self', '.', 'args', '.', 'append', '(', 'additional_options', '[', '0', ']', ')', 'if', 'len', '(', 'self', '.', 'args', ')', '>', '1', 'and', 'self', '.', 'args', '[', '0', ']', 'in', 'options', ':', 'for', 'additional', 'in', 'additional_options', ':', 'if', 'additional', 'in', 'self', '.', 'args', ':', 'extra', '.', 'append', '(', 'additional', ')', 'self', '.', 'args', '.', 'remove', '(', 'additional', ')', 'packages', '=', 'self', '.', 'args', '[', '1', ':', ']', 'for', 'fl', 'in', 'flags', ':', 'if', 'fl', 'in', 'self', '.', 'args', ':', 'flag', '=', 'self', '.', 'args', '[', '1', ']', 'packages', '=', 'self', '.', 'args', '[', '2', ':', ']', 'PackageManager', '(', 'packages', ')', '.', 'remove', '(', 'flag', ',', 'extra', ')', 'else', ':', 'usage', '(', '""', ')']
Remove Slackware packages
['Remove', 'Slackware', 'packages']
train
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L570-L607
4,358
pjuren/pyokit
src/pyokit/scripts/join.py
get_key_field
def get_key_field(ui, ui_option_name, default_val=0, default_is_number=True): """ parse an option from a UI object as the name of a key field. If the named option is not set, return the default values for the tuple. :return: a tuple of two items, first is the value of the option, second is a boolean value that indicates whether the value is a column name or a column number (numbers start at 0). """ key = default_val key_is_field_number = default_is_number if ui.optionIsSet(ui_option_name): key = ui.getValue(ui_option_name) try: key = int(key) - 1 key_is_field_number = True except ValueError: key_is_field_number = False return key, key_is_field_number
python
def get_key_field(ui, ui_option_name, default_val=0, default_is_number=True): """ parse an option from a UI object as the name of a key field. If the named option is not set, return the default values for the tuple. :return: a tuple of two items, first is the value of the option, second is a boolean value that indicates whether the value is a column name or a column number (numbers start at 0). """ key = default_val key_is_field_number = default_is_number if ui.optionIsSet(ui_option_name): key = ui.getValue(ui_option_name) try: key = int(key) - 1 key_is_field_number = True except ValueError: key_is_field_number = False return key, key_is_field_number
['def', 'get_key_field', '(', 'ui', ',', 'ui_option_name', ',', 'default_val', '=', '0', ',', 'default_is_number', '=', 'True', ')', ':', 'key', '=', 'default_val', 'key_is_field_number', '=', 'default_is_number', 'if', 'ui', '.', 'optionIsSet', '(', 'ui_option_name', ')', ':', 'key', '=', 'ui', '.', 'getValue', '(', 'ui_option_name', ')', 'try', ':', 'key', '=', 'int', '(', 'key', ')', '-', '1', 'key_is_field_number', '=', 'True', 'except', 'ValueError', ':', 'key_is_field_number', '=', 'False', 'return', 'key', ',', 'key_is_field_number']
parse an option from a UI object as the name of a key field. If the named option is not set, return the default values for the tuple. :return: a tuple of two items, first is the value of the option, second is a boolean value that indicates whether the value is a column name or a column number (numbers start at 0).
['parse', 'an', 'option', 'from', 'a', 'UI', 'object', 'as', 'the', 'name', 'of', 'a', 'key', 'field', '.']
train
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/scripts/join.py#L511-L530
4,359
mosdef-hub/mbuild
mbuild/compound.py
Compound.to_trajectory
def to_trajectory(self, show_ports=False, chains=None, residues=None, box=None): """Convert to an md.Trajectory and flatten the compound. Parameters ---------- show_ports : bool, optional, default=False Include all port atoms when converting to trajectory. chains : mb.Compound or list of mb.Compound Chain types to add to the topology residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. box : mb.Box, optional, default=self.boundingbox (with buffer) Box information to be used when converting to a `Trajectory`. If 'None', a bounding box is used with a 0.5nm buffer in each dimension. to avoid overlapping atoms, unless `self.periodicity` is not None, in which case those values are used for the box lengths. Returns ------- trajectory : md.Trajectory See also -------- _to_topology """ atom_list = [particle for particle in self.particles(show_ports)] top = self._to_topology(atom_list, chains, residues) # Coordinates. xyz = np.ndarray(shape=(1, top.n_atoms, 3), dtype='float') for idx, atom in enumerate(atom_list): xyz[0, idx] = atom.pos # Unitcell information. unitcell_angles = [90.0, 90.0, 90.0] if box is None: unitcell_lengths = np.empty(3) for dim, val in enumerate(self.periodicity): if val: unitcell_lengths[dim] = val else: unitcell_lengths[dim] = self.boundingbox.lengths[dim] + 0.5 else: unitcell_lengths = box.lengths unitcell_angles = box.angles return md.Trajectory(xyz, top, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles)
python
def to_trajectory(self, show_ports=False, chains=None, residues=None, box=None): """Convert to an md.Trajectory and flatten the compound. Parameters ---------- show_ports : bool, optional, default=False Include all port atoms when converting to trajectory. chains : mb.Compound or list of mb.Compound Chain types to add to the topology residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. box : mb.Box, optional, default=self.boundingbox (with buffer) Box information to be used when converting to a `Trajectory`. If 'None', a bounding box is used with a 0.5nm buffer in each dimension. to avoid overlapping atoms, unless `self.periodicity` is not None, in which case those values are used for the box lengths. Returns ------- trajectory : md.Trajectory See also -------- _to_topology """ atom_list = [particle for particle in self.particles(show_ports)] top = self._to_topology(atom_list, chains, residues) # Coordinates. xyz = np.ndarray(shape=(1, top.n_atoms, 3), dtype='float') for idx, atom in enumerate(atom_list): xyz[0, idx] = atom.pos # Unitcell information. unitcell_angles = [90.0, 90.0, 90.0] if box is None: unitcell_lengths = np.empty(3) for dim, val in enumerate(self.periodicity): if val: unitcell_lengths[dim] = val else: unitcell_lengths[dim] = self.boundingbox.lengths[dim] + 0.5 else: unitcell_lengths = box.lengths unitcell_angles = box.angles return md.Trajectory(xyz, top, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles)
['def', 'to_trajectory', '(', 'self', ',', 'show_ports', '=', 'False', ',', 'chains', '=', 'None', ',', 'residues', '=', 'None', ',', 'box', '=', 'None', ')', ':', 'atom_list', '=', '[', 'particle', 'for', 'particle', 'in', 'self', '.', 'particles', '(', 'show_ports', ')', ']', 'top', '=', 'self', '.', '_to_topology', '(', 'atom_list', ',', 'chains', ',', 'residues', ')', '# Coordinates.', 'xyz', '=', 'np', '.', 'ndarray', '(', 'shape', '=', '(', '1', ',', 'top', '.', 'n_atoms', ',', '3', ')', ',', 'dtype', '=', "'float'", ')', 'for', 'idx', ',', 'atom', 'in', 'enumerate', '(', 'atom_list', ')', ':', 'xyz', '[', '0', ',', 'idx', ']', '=', 'atom', '.', 'pos', '# Unitcell information.', 'unitcell_angles', '=', '[', '90.0', ',', '90.0', ',', '90.0', ']', 'if', 'box', 'is', 'None', ':', 'unitcell_lengths', '=', 'np', '.', 'empty', '(', '3', ')', 'for', 'dim', ',', 'val', 'in', 'enumerate', '(', 'self', '.', 'periodicity', ')', ':', 'if', 'val', ':', 'unitcell_lengths', '[', 'dim', ']', '=', 'val', 'else', ':', 'unitcell_lengths', '[', 'dim', ']', '=', 'self', '.', 'boundingbox', '.', 'lengths', '[', 'dim', ']', '+', '0.5', 'else', ':', 'unitcell_lengths', '=', 'box', '.', 'lengths', 'unitcell_angles', '=', 'box', '.', 'angles', 'return', 'md', '.', 'Trajectory', '(', 'xyz', ',', 'top', ',', 'unitcell_lengths', '=', 'unitcell_lengths', ',', 'unitcell_angles', '=', 'unitcell_angles', ')']
Convert to an md.Trajectory and flatten the compound. Parameters ---------- show_ports : bool, optional, default=False Include all port atoms when converting to trajectory. chains : mb.Compound or list of mb.Compound Chain types to add to the topology residues : str of list of str Labels of residues in the Compound. Residues are assigned by checking against Compound.name. box : mb.Box, optional, default=self.boundingbox (with buffer) Box information to be used when converting to a `Trajectory`. If 'None', a bounding box is used with a 0.5nm buffer in each dimension. to avoid overlapping atoms, unless `self.periodicity` is not None, in which case those values are used for the box lengths. Returns ------- trajectory : md.Trajectory See also -------- _to_topology
['Convert', 'to', 'an', 'md', '.', 'Trajectory', 'and', 'flatten', 'the', 'compound', '.']
train
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/compound.py#L1879-L1931
4,360
SatelliteQE/nailgun
nailgun/entity_mixins.py
_get_entity_id
def _get_entity_id(field_name, attrs): """Find the ID for a one to one relationship. The server may return JSON data in the following forms for a :class:`nailgun.entity_fields.OneToOneField`:: 'user': None 'user': {'name': 'Alice Hayes', 'login': 'ahayes', 'id': 1} 'user_id': 1 'user_id': None Search ``attrs`` for a one to one ``field_name`` and return its ID. :param field_name: A string. The name of a field. :param attrs: A dict. A JSON payload as returned from a server. :returns: Either an entity ID or None. """ field_name_id = field_name + '_id' if field_name in attrs: if attrs[field_name] is None: return None elif 'id' in attrs[field_name]: return attrs[field_name]['id'] if field_name_id in attrs: return attrs[field_name_id] else: raise MissingValueError( 'Cannot find a value for the "{0}" field. Searched for keys named ' '{1}, but available keys are {2}.' .format(field_name, (field_name, field_name_id), attrs.keys()) )
python
def _get_entity_id(field_name, attrs): """Find the ID for a one to one relationship. The server may return JSON data in the following forms for a :class:`nailgun.entity_fields.OneToOneField`:: 'user': None 'user': {'name': 'Alice Hayes', 'login': 'ahayes', 'id': 1} 'user_id': 1 'user_id': None Search ``attrs`` for a one to one ``field_name`` and return its ID. :param field_name: A string. The name of a field. :param attrs: A dict. A JSON payload as returned from a server. :returns: Either an entity ID or None. """ field_name_id = field_name + '_id' if field_name in attrs: if attrs[field_name] is None: return None elif 'id' in attrs[field_name]: return attrs[field_name]['id'] if field_name_id in attrs: return attrs[field_name_id] else: raise MissingValueError( 'Cannot find a value for the "{0}" field. Searched for keys named ' '{1}, but available keys are {2}.' .format(field_name, (field_name, field_name_id), attrs.keys()) )
['def', '_get_entity_id', '(', 'field_name', ',', 'attrs', ')', ':', 'field_name_id', '=', 'field_name', '+', "'_id'", 'if', 'field_name', 'in', 'attrs', ':', 'if', 'attrs', '[', 'field_name', ']', 'is', 'None', ':', 'return', 'None', 'elif', "'id'", 'in', 'attrs', '[', 'field_name', ']', ':', 'return', 'attrs', '[', 'field_name', ']', '[', "'id'", ']', 'if', 'field_name_id', 'in', 'attrs', ':', 'return', 'attrs', '[', 'field_name_id', ']', 'else', ':', 'raise', 'MissingValueError', '(', '\'Cannot find a value for the "{0}" field. Searched for keys named \'', "'{1}, but available keys are {2}.'", '.', 'format', '(', 'field_name', ',', '(', 'field_name', ',', 'field_name_id', ')', ',', 'attrs', '.', 'keys', '(', ')', ')', ')']
Find the ID for a one to one relationship. The server may return JSON data in the following forms for a :class:`nailgun.entity_fields.OneToOneField`:: 'user': None 'user': {'name': 'Alice Hayes', 'login': 'ahayes', 'id': 1} 'user_id': 1 'user_id': None Search ``attrs`` for a one to one ``field_name`` and return its ID. :param field_name: A string. The name of a field. :param attrs: A dict. A JSON payload as returned from a server. :returns: Either an entity ID or None.
['Find', 'the', 'ID', 'for', 'a', 'one', 'to', 'one', 'relationship', '.']
train
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entity_mixins.py#L226-L257
4,361
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.favorite_remove
def favorite_remove(self, post_id): """Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id. """ return self._get('favorites/{0}.json'.format(post_id), method='DELETE', auth=True)
python
def favorite_remove(self, post_id): """Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id. """ return self._get('favorites/{0}.json'.format(post_id), method='DELETE', auth=True)
['def', 'favorite_remove', '(', 'self', ',', 'post_id', ')', ':', 'return', 'self', '.', '_get', '(', "'favorites/{0}.json'", '.', 'format', '(', 'post_id', ')', ',', 'method', '=', "'DELETE'", ',', 'auth', '=', 'True', ')']
Remove a post from favorites (Requires login). Parameters: post_id (int): Where post_id is the post id.
['Remove', 'a', 'post', 'from', 'favorites', '(', 'Requires', 'login', ')', '.']
train
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L431-L438
4,362
spyder-ide/spyder
spyder/utils/syntaxhighlighters.py
PygmentsSH.highlightBlock
def highlightBlock(self, text): """ Actually highlight the block""" # Note that an undefined blockstate is equal to -1, so the first block # will have the correct behaviour of starting at 0. if self._allow_highlight: start = self.previousBlockState() + 1 end = start + len(text) for i, (fmt, letter) in enumerate(self._charlist[start:end]): self.setFormat(i, 1, fmt) self.setCurrentBlockState(end) self.highlight_spaces(text)
python
def highlightBlock(self, text): """ Actually highlight the block""" # Note that an undefined blockstate is equal to -1, so the first block # will have the correct behaviour of starting at 0. if self._allow_highlight: start = self.previousBlockState() + 1 end = start + len(text) for i, (fmt, letter) in enumerate(self._charlist[start:end]): self.setFormat(i, 1, fmt) self.setCurrentBlockState(end) self.highlight_spaces(text)
['def', 'highlightBlock', '(', 'self', ',', 'text', ')', ':', '# Note that an undefined blockstate is equal to -1, so the first block\r', '# will have the correct behaviour of starting at 0.\r', 'if', 'self', '.', '_allow_highlight', ':', 'start', '=', 'self', '.', 'previousBlockState', '(', ')', '+', '1', 'end', '=', 'start', '+', 'len', '(', 'text', ')', 'for', 'i', ',', '(', 'fmt', ',', 'letter', ')', 'in', 'enumerate', '(', 'self', '.', '_charlist', '[', 'start', ':', 'end', ']', ')', ':', 'self', '.', 'setFormat', '(', 'i', ',', '1', ',', 'fmt', ')', 'self', '.', 'setCurrentBlockState', '(', 'end', ')', 'self', '.', 'highlight_spaces', '(', 'text', ')']
Actually highlight the block
['Actually', 'highlight', 'the', 'block']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/syntaxhighlighters.py#L1220-L1230
4,363
incf-nidash/nidmresults
nidmresults/objects/modelfitting.py
Group.export
def export(self, nidm_version, export_dir): """ Create prov entities and activities. """ self.add_attributes(( (PROV['type'], self.type), (NIDM_GROUP_NAME, self.group_name), (NIDM_NUMBER_OF_SUBJECTS, self.num_subjects), (PROV['label'], self.label)))
python
def export(self, nidm_version, export_dir): """ Create prov entities and activities. """ self.add_attributes(( (PROV['type'], self.type), (NIDM_GROUP_NAME, self.group_name), (NIDM_NUMBER_OF_SUBJECTS, self.num_subjects), (PROV['label'], self.label)))
['def', 'export', '(', 'self', ',', 'nidm_version', ',', 'export_dir', ')', ':', 'self', '.', 'add_attributes', '(', '(', '(', 'PROV', '[', "'type'", ']', ',', 'self', '.', 'type', ')', ',', '(', 'NIDM_GROUP_NAME', ',', 'self', '.', 'group_name', ')', ',', '(', 'NIDM_NUMBER_OF_SUBJECTS', ',', 'self', '.', 'num_subjects', ')', ',', '(', 'PROV', '[', "'label'", ']', ',', 'self', '.', 'label', ')', ')', ')']
Create prov entities and activities.
['Create', 'prov', 'entities', 'and', 'activities', '.']
train
https://github.com/incf-nidash/nidmresults/blob/438f7cce6abc4a4379b629bd76f4d427891e033f/nidmresults/objects/modelfitting.py#L160-L168
4,364
senaite/senaite.core
bika/lims/browser/worksheet/views/analyses.py
AnalysesView.folderitems
def folderitems(self): """Returns an array of dictionaries, each dictionary represents an analysis row to be rendered in the list. The array returned is sorted in accordance with the layout positions set for the analyses this worksheet contains when the analyses were added in the worksheet. :returns: list of dicts with the items to be rendered in the list """ items = BaseView.folderitems(self) # Fill empty positions from the layout with fake rows. The worksheet # can be generated by making use of a WorksheetTemplate, so there is # the chance that some slots of this worksheet being empty. We need to # render a row still, at lest to display the slot number (Pos) self.fill_empty_slots(items) # Sort the items in accordance with the layout items = sorted(items, key=itemgetter("pos_sortkey")) # Fill the slot header cells (first cell of each row). Each slot # contains the analyses that belong to the same parent # (AnalysisRequest, ReferenceSample), so the information about the # parent must be displayed in the first cell of each slot. self.fill_slots_headers(items) return items
python
def folderitems(self): """Returns an array of dictionaries, each dictionary represents an analysis row to be rendered in the list. The array returned is sorted in accordance with the layout positions set for the analyses this worksheet contains when the analyses were added in the worksheet. :returns: list of dicts with the items to be rendered in the list """ items = BaseView.folderitems(self) # Fill empty positions from the layout with fake rows. The worksheet # can be generated by making use of a WorksheetTemplate, so there is # the chance that some slots of this worksheet being empty. We need to # render a row still, at lest to display the slot number (Pos) self.fill_empty_slots(items) # Sort the items in accordance with the layout items = sorted(items, key=itemgetter("pos_sortkey")) # Fill the slot header cells (first cell of each row). Each slot # contains the analyses that belong to the same parent # (AnalysisRequest, ReferenceSample), so the information about the # parent must be displayed in the first cell of each slot. self.fill_slots_headers(items) return items
['def', 'folderitems', '(', 'self', ')', ':', 'items', '=', 'BaseView', '.', 'folderitems', '(', 'self', ')', '# Fill empty positions from the layout with fake rows. The worksheet', '# can be generated by making use of a WorksheetTemplate, so there is', '# the chance that some slots of this worksheet being empty. We need to', '# render a row still, at lest to display the slot number (Pos)', 'self', '.', 'fill_empty_slots', '(', 'items', ')', '# Sort the items in accordance with the layout', 'items', '=', 'sorted', '(', 'items', ',', 'key', '=', 'itemgetter', '(', '"pos_sortkey"', ')', ')', '# Fill the slot header cells (first cell of each row). Each slot', '# contains the analyses that belong to the same parent', '# (AnalysisRequest, ReferenceSample), so the information about the', '# parent must be displayed in the first cell of each slot.', 'self', '.', 'fill_slots_headers', '(', 'items', ')', 'return', 'items']
Returns an array of dictionaries, each dictionary represents an analysis row to be rendered in the list. The array returned is sorted in accordance with the layout positions set for the analyses this worksheet contains when the analyses were added in the worksheet. :returns: list of dicts with the items to be rendered in the list
['Returns', 'an', 'array', 'of', 'dictionaries', 'each', 'dictionary', 'represents', 'an', 'analysis', 'row', 'to', 'be', 'rendered', 'in', 'the', 'list', '.', 'The', 'array', 'returned', 'is', 'sorted', 'in', 'accordance', 'with', 'the', 'layout', 'positions', 'set', 'for', 'the', 'analyses', 'this', 'worksheet', 'contains', 'when', 'the', 'analyses', 'were', 'added', 'in', 'the', 'worksheet', '.']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/worksheet/views/analyses.py#L209-L234
4,365
materialsproject/pymatgen
pymatgen/core/spectrum.py
Spectrum.get_interpolated_value
def get_interpolated_value(self, x): """ Returns an interpolated y value for a particular x value. Args: x: x value to return the y value for Returns: Value of y at x """ if len(self.ydim) == 1: return get_linear_interpolated_value(self.x, self.y, x) else: return [get_linear_interpolated_value(self.x, self.y[:, k], x) for k in range(self.ydim[1])]
python
def get_interpolated_value(self, x): """ Returns an interpolated y value for a particular x value. Args: x: x value to return the y value for Returns: Value of y at x """ if len(self.ydim) == 1: return get_linear_interpolated_value(self.x, self.y, x) else: return [get_linear_interpolated_value(self.x, self.y[:, k], x) for k in range(self.ydim[1])]
['def', 'get_interpolated_value', '(', 'self', ',', 'x', ')', ':', 'if', 'len', '(', 'self', '.', 'ydim', ')', '==', '1', ':', 'return', 'get_linear_interpolated_value', '(', 'self', '.', 'x', ',', 'self', '.', 'y', ',', 'x', ')', 'else', ':', 'return', '[', 'get_linear_interpolated_value', '(', 'self', '.', 'x', ',', 'self', '.', 'y', '[', ':', ',', 'k', ']', ',', 'x', ')', 'for', 'k', 'in', 'range', '(', 'self', '.', 'ydim', '[', '1', ']', ')', ']']
Returns an interpolated y value for a particular x value. Args: x: x value to return the y value for Returns: Value of y at x
['Returns', 'an', 'interpolated', 'y', 'value', 'for', 'a', 'particular', 'x', 'value', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/spectrum.py#L105-L119
4,366
playpauseandstop/rororo
rororo/settings.py
from_env
def from_env(key: str, default: T = None) -> Union[str, Optional[T]]: """Shortcut for safely reading environment variable. :param key: Environment var key. :param default: Return default value if environment var not found by given key. By default: ``None`` """ return os.getenv(key, default)
python
def from_env(key: str, default: T = None) -> Union[str, Optional[T]]: """Shortcut for safely reading environment variable. :param key: Environment var key. :param default: Return default value if environment var not found by given key. By default: ``None`` """ return os.getenv(key, default)
['def', 'from_env', '(', 'key', ':', 'str', ',', 'default', ':', 'T', '=', 'None', ')', '->', 'Union', '[', 'str', ',', 'Optional', '[', 'T', ']', ']', ':', 'return', 'os', '.', 'getenv', '(', 'key', ',', 'default', ')']
Shortcut for safely reading environment variable. :param key: Environment var key. :param default: Return default value if environment var not found by given key. By default: ``None``
['Shortcut', 'for', 'safely', 'reading', 'environment', 'variable', '.']
train
https://github.com/playpauseandstop/rororo/blob/28a04e8028c29647941e727116335e9d6fd64c27/rororo/settings.py#L26-L34
4,367
cherrypy/cheroot
cheroot/server.py
HTTPServer.stop
def stop(self): """Gracefully shutdown a server that is serving forever.""" self.ready = False if self._start_time is not None: self._run_time += (time.time() - self._start_time) self._start_time = None sock = getattr(self, 'socket', None) if sock: if not isinstance(self.bind_addr, six.string_types): # Touch our own socket to make accept() return immediately. try: host, port = sock.getsockname()[:2] except socket.error as ex: if ex.args[0] not in errors.socket_errors_to_ignore: # Changed to use error code and not message # See # https://github.com/cherrypy/cherrypy/issues/860. raise else: # Note that we're explicitly NOT using AI_PASSIVE, # here, because we want an actual IP to touch. # localhost won't work if we've bound to a public IP, # but it will if we bound to '0.0.0.0' (INADDR_ANY). for res in socket.getaddrinfo( host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, ): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See # https://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() if hasattr(sock, 'close'): sock.close() self.socket = None self.requests.stop(self.shutdown_timeout)
python
def stop(self): """Gracefully shutdown a server that is serving forever.""" self.ready = False if self._start_time is not None: self._run_time += (time.time() - self._start_time) self._start_time = None sock = getattr(self, 'socket', None) if sock: if not isinstance(self.bind_addr, six.string_types): # Touch our own socket to make accept() return immediately. try: host, port = sock.getsockname()[:2] except socket.error as ex: if ex.args[0] not in errors.socket_errors_to_ignore: # Changed to use error code and not message # See # https://github.com/cherrypy/cherrypy/issues/860. raise else: # Note that we're explicitly NOT using AI_PASSIVE, # here, because we want an actual IP to touch. # localhost won't work if we've bound to a public IP, # but it will if we bound to '0.0.0.0' (INADDR_ANY). for res in socket.getaddrinfo( host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, ): af, socktype, proto, canonname, sa = res s = None try: s = socket.socket(af, socktype, proto) # See # https://groups.google.com/group/cherrypy-users/ # browse_frm/thread/bbfe5eb39c904fe0 s.settimeout(1.0) s.connect((host, port)) s.close() except socket.error: if s: s.close() if hasattr(sock, 'close'): sock.close() self.socket = None self.requests.stop(self.shutdown_timeout)
['def', 'stop', '(', 'self', ')', ':', 'self', '.', 'ready', '=', 'False', 'if', 'self', '.', '_start_time', 'is', 'not', 'None', ':', 'self', '.', '_run_time', '+=', '(', 'time', '.', 'time', '(', ')', '-', 'self', '.', '_start_time', ')', 'self', '.', '_start_time', '=', 'None', 'sock', '=', 'getattr', '(', 'self', ',', "'socket'", ',', 'None', ')', 'if', 'sock', ':', 'if', 'not', 'isinstance', '(', 'self', '.', 'bind_addr', ',', 'six', '.', 'string_types', ')', ':', '# Touch our own socket to make accept() return immediately.', 'try', ':', 'host', ',', 'port', '=', 'sock', '.', 'getsockname', '(', ')', '[', ':', '2', ']', 'except', 'socket', '.', 'error', 'as', 'ex', ':', 'if', 'ex', '.', 'args', '[', '0', ']', 'not', 'in', 'errors', '.', 'socket_errors_to_ignore', ':', '# Changed to use error code and not message', '# See', '# https://github.com/cherrypy/cherrypy/issues/860.', 'raise', 'else', ':', "# Note that we're explicitly NOT using AI_PASSIVE,", '# here, because we want an actual IP to touch.', "# localhost won't work if we've bound to a public IP,", "# but it will if we bound to '0.0.0.0' (INADDR_ANY).", 'for', 'res', 'in', 'socket', '.', 'getaddrinfo', '(', 'host', ',', 'port', ',', 'socket', '.', 'AF_UNSPEC', ',', 'socket', '.', 'SOCK_STREAM', ',', ')', ':', 'af', ',', 'socktype', ',', 'proto', ',', 'canonname', ',', 'sa', '=', 'res', 's', '=', 'None', 'try', ':', 's', '=', 'socket', '.', 'socket', '(', 'af', ',', 'socktype', ',', 'proto', ')', '# See', '# https://groups.google.com/group/cherrypy-users/', '# browse_frm/thread/bbfe5eb39c904fe0', 's', '.', 'settimeout', '(', '1.0', ')', 's', '.', 'connect', '(', '(', 'host', ',', 'port', ')', ')', 's', '.', 'close', '(', ')', 'except', 'socket', '.', 'error', ':', 'if', 's', ':', 's', '.', 'close', '(', ')', 'if', 'hasattr', '(', 'sock', ',', "'close'", ')', ':', 'sock', '.', 'close', '(', ')', 'self', '.', 'socket', '=', 'None', 'self', '.', 'requests', '.', 'stop', '(', 'self', '.', 'shutdown_timeout', ')']
Gracefully shutdown a server that is serving forever.
['Gracefully', 'shutdown', 'a', 'server', 'that', 'is', 'serving', 'forever', '.']
train
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L2087-L2132
4,368
rueckstiess/mtools
mtools/mlaunch/mlaunch.py
MLaunchTool._load_parameters
def _load_parameters(self): """ Load the .mlaunch_startup file that exists in each datadir. Handles different protocol versions. """ datapath = self.dir startup_file = os.path.join(datapath, '.mlaunch_startup') if not os.path.exists(startup_file): return False in_dict = json.load(open(startup_file, 'rb')) # handle legacy version without versioned protocol if 'protocol_version' not in in_dict: in_dict['protocol_version'] = 1 self.loaded_args = in_dict self.startup_info = {} # hostname was added recently self.loaded_args['hostname'] = socket.gethostname() elif in_dict['protocol_version'] == 2: self.startup_info = in_dict['startup_info'] self.loaded_unknown_args = in_dict['unknown_args'] self.loaded_args = in_dict['parsed_args'] # changed 'authentication' to 'auth', if present (from old env) rename if 'authentication' in self.loaded_args: self.loaded_args['auth'] = self.loaded_args['authentication'] del self.loaded_args['authentication'] return True
python
def _load_parameters(self): """ Load the .mlaunch_startup file that exists in each datadir. Handles different protocol versions. """ datapath = self.dir startup_file = os.path.join(datapath, '.mlaunch_startup') if not os.path.exists(startup_file): return False in_dict = json.load(open(startup_file, 'rb')) # handle legacy version without versioned protocol if 'protocol_version' not in in_dict: in_dict['protocol_version'] = 1 self.loaded_args = in_dict self.startup_info = {} # hostname was added recently self.loaded_args['hostname'] = socket.gethostname() elif in_dict['protocol_version'] == 2: self.startup_info = in_dict['startup_info'] self.loaded_unknown_args = in_dict['unknown_args'] self.loaded_args = in_dict['parsed_args'] # changed 'authentication' to 'auth', if present (from old env) rename if 'authentication' in self.loaded_args: self.loaded_args['auth'] = self.loaded_args['authentication'] del self.loaded_args['authentication'] return True
['def', '_load_parameters', '(', 'self', ')', ':', 'datapath', '=', 'self', '.', 'dir', 'startup_file', '=', 'os', '.', 'path', '.', 'join', '(', 'datapath', ',', "'.mlaunch_startup'", ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'startup_file', ')', ':', 'return', 'False', 'in_dict', '=', 'json', '.', 'load', '(', 'open', '(', 'startup_file', ',', "'rb'", ')', ')', '# handle legacy version without versioned protocol', 'if', "'protocol_version'", 'not', 'in', 'in_dict', ':', 'in_dict', '[', "'protocol_version'", ']', '=', '1', 'self', '.', 'loaded_args', '=', 'in_dict', 'self', '.', 'startup_info', '=', '{', '}', '# hostname was added recently', 'self', '.', 'loaded_args', '[', "'hostname'", ']', '=', 'socket', '.', 'gethostname', '(', ')', 'elif', 'in_dict', '[', "'protocol_version'", ']', '==', '2', ':', 'self', '.', 'startup_info', '=', 'in_dict', '[', "'startup_info'", ']', 'self', '.', 'loaded_unknown_args', '=', 'in_dict', '[', "'unknown_args'", ']', 'self', '.', 'loaded_args', '=', 'in_dict', '[', "'parsed_args'", ']', "# changed 'authentication' to 'auth', if present (from old env) rename", 'if', "'authentication'", 'in', 'self', '.', 'loaded_args', ':', 'self', '.', 'loaded_args', '[', "'auth'", ']', '=', 'self', '.', 'loaded_args', '[', "'authentication'", ']', 'del', 'self', '.', 'loaded_args', '[', "'authentication'", ']', 'return', 'True']
Load the .mlaunch_startup file that exists in each datadir. Handles different protocol versions.
['Load', 'the', '.', 'mlaunch_startup', 'file', 'that', 'exists', 'in', 'each', 'datadir', '.']
train
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mlaunch/mlaunch.py#L1378-L1410
4,369
volafiled/python-volapi
volapi/handler.py
Handler._handle_files
def _handle_files(self, data): """Handle new files being uploaded""" initial = data.get("set", False) files = data["files"] for f in files: try: fobj = File( self.room, self.conn, f[0], f[1], type=f[2], size=f[3], expire_time=int(f[4]) / 1000, uploader=f[6].get("nick") or f[6].get("user"), ) self.room.filedict = fobj.fid, fobj if not initial: self.conn.enqueue_data("file", fobj) except Exception: import pprint LOGGER.exception("bad file") pprint.pprint(f) if initial: self.conn.enqueue_data("initial_files", self.room.files)
python
def _handle_files(self, data): """Handle new files being uploaded""" initial = data.get("set", False) files = data["files"] for f in files: try: fobj = File( self.room, self.conn, f[0], f[1], type=f[2], size=f[3], expire_time=int(f[4]) / 1000, uploader=f[6].get("nick") or f[6].get("user"), ) self.room.filedict = fobj.fid, fobj if not initial: self.conn.enqueue_data("file", fobj) except Exception: import pprint LOGGER.exception("bad file") pprint.pprint(f) if initial: self.conn.enqueue_data("initial_files", self.room.files)
['def', '_handle_files', '(', 'self', ',', 'data', ')', ':', 'initial', '=', 'data', '.', 'get', '(', '"set"', ',', 'False', ')', 'files', '=', 'data', '[', '"files"', ']', 'for', 'f', 'in', 'files', ':', 'try', ':', 'fobj', '=', 'File', '(', 'self', '.', 'room', ',', 'self', '.', 'conn', ',', 'f', '[', '0', ']', ',', 'f', '[', '1', ']', ',', 'type', '=', 'f', '[', '2', ']', ',', 'size', '=', 'f', '[', '3', ']', ',', 'expire_time', '=', 'int', '(', 'f', '[', '4', ']', ')', '/', '1000', ',', 'uploader', '=', 'f', '[', '6', ']', '.', 'get', '(', '"nick"', ')', 'or', 'f', '[', '6', ']', '.', 'get', '(', '"user"', ')', ',', ')', 'self', '.', 'room', '.', 'filedict', '=', 'fobj', '.', 'fid', ',', 'fobj', 'if', 'not', 'initial', ':', 'self', '.', 'conn', '.', 'enqueue_data', '(', '"file"', ',', 'fobj', ')', 'except', 'Exception', ':', 'import', 'pprint', 'LOGGER', '.', 'exception', '(', '"bad file"', ')', 'pprint', '.', 'pprint', '(', 'f', ')', 'if', 'initial', ':', 'self', '.', 'conn', '.', 'enqueue_data', '(', '"initial_files"', ',', 'self', '.', 'room', '.', 'files', ')']
Handle new files being uploaded
['Handle', 'new', 'files', 'being', 'uploaded']
train
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/handler.py#L151-L177
4,370
obriencj/python-javatools
javatools/distinfo.py
DistInfo.get_provides
def get_provides(self, ignored=tuple()): """ a map of provided classes and class members, and what provides them. ignored is an optional list of globbed patterns indicating packages, classes, etc that shouldn't be included in the provides map""" if self._provides is None: self._collect_requires_provides() d = self._provides if ignored: d = dict((k, v) for k, v in d.items() if not fnmatches(k, *ignored)) return d
python
def get_provides(self, ignored=tuple()): """ a map of provided classes and class members, and what provides them. ignored is an optional list of globbed patterns indicating packages, classes, etc that shouldn't be included in the provides map""" if self._provides is None: self._collect_requires_provides() d = self._provides if ignored: d = dict((k, v) for k, v in d.items() if not fnmatches(k, *ignored)) return d
['def', 'get_provides', '(', 'self', ',', 'ignored', '=', 'tuple', '(', ')', ')', ':', 'if', 'self', '.', '_provides', 'is', 'None', ':', 'self', '.', '_collect_requires_provides', '(', ')', 'd', '=', 'self', '.', '_provides', 'if', 'ignored', ':', 'd', '=', 'dict', '(', '(', 'k', ',', 'v', ')', 'for', 'k', ',', 'v', 'in', 'd', '.', 'items', '(', ')', 'if', 'not', 'fnmatches', '(', 'k', ',', '*', 'ignored', ')', ')', 'return', 'd']
a map of provided classes and class members, and what provides them. ignored is an optional list of globbed patterns indicating packages, classes, etc that shouldn't be included in the provides map
['a', 'map', 'of', 'provided', 'classes', 'and', 'class', 'members', 'and', 'what', 'provides', 'them', '.', 'ignored', 'is', 'an', 'optional', 'list', 'of', 'globbed', 'patterns', 'indicating', 'packages', 'classes', 'etc', 'that', 'shouldn', 't', 'be', 'included', 'in', 'the', 'provides', 'map']
train
https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/distinfo.py#L131-L144
4,371
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/external_ca/apis/certificate_issuers_api.py
CertificateIssuersApi.get_certificate_issuer
def get_certificate_issuer(self, certificate_issuer_id, **kwargs): # noqa: E501 """Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 else: (data) = self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 return data
python
def get_certificate_issuer(self, certificate_issuer_id, **kwargs): # noqa: E501 """Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 else: (data) = self.get_certificate_issuer_with_http_info(certificate_issuer_id, **kwargs) # noqa: E501 return data
['def', 'get_certificate_issuer', '(', 'self', ',', 'certificate_issuer_id', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'get_certificate_issuer_with_http_info', '(', 'certificate_issuer_id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'get_certificate_issuer_with_http_info', '(', 'certificate_issuer_id', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data']
Get certificate issuer by ID. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_certificate_issuer(certificate_issuer_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str certificate_issuer_id: Certificate issuer ID. The ID of the certificate issuer. (required) :return: CertificateIssuerInfo If the method is called asynchronously, returns the request thread.
['Get', 'certificate', 'issuer', 'by', 'ID', '.', '#', 'noqa', ':', 'E501']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/external_ca/apis/certificate_issuers_api.py#L234-L253
4,372
vicenteneto/python-cartolafc
cartolafc/api.py
Api.times
def times(self, query): """ Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca. """ url = '{api_url}/times'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [TimeInfo.from_dict(time_info) for time_info in data]
python
def times(self, query): """ Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca. """ url = '{api_url}/times'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [TimeInfo.from_dict(time_info) for time_info in data]
['def', 'times', '(', 'self', ',', 'query', ')', ':', 'url', '=', "'{api_url}/times'", '.', 'format', '(', 'api_url', '=', 'self', '.', '_api_url', ')', 'data', '=', 'self', '.', '_request', '(', 'url', ',', 'params', '=', 'dict', '(', 'q', '=', 'query', ')', ')', 'return', '[', 'TimeInfo', '.', 'from_dict', '(', 'time_info', ')', 'for', 'time_info', 'in', 'data', ']']
Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca.
['Retorna', 'o', 'resultado', 'da', 'busca', 'ao', 'Cartola', 'por', 'um', 'determinado', 'termo', 'de', 'pesquisa', '.']
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L293-L304
4,373
razorpay/razorpay-python
razorpay/resources/payment.py
Payment.transfer
def transfer(self, payment_id, data={}, **kwargs): """" Create Transfer for given Payment Id Args: payment_id : Id for which payment object has to be transfered Returns: Payment dict after getting transfered """ url = "{}/{}/transfers".format(self.base_url, payment_id) return self.post_url(url, data, **kwargs)
python
def transfer(self, payment_id, data={}, **kwargs): """" Create Transfer for given Payment Id Args: payment_id : Id for which payment object has to be transfered Returns: Payment dict after getting transfered """ url = "{}/{}/transfers".format(self.base_url, payment_id) return self.post_url(url, data, **kwargs)
['def', 'transfer', '(', 'self', ',', 'payment_id', ',', 'data', '=', '{', '}', ',', '*', '*', 'kwargs', ')', ':', 'url', '=', '"{}/{}/transfers"', '.', 'format', '(', 'self', '.', 'base_url', ',', 'payment_id', ')', 'return', 'self', '.', 'post_url', '(', 'url', ',', 'data', ',', '*', '*', 'kwargs', ')']
Create Transfer for given Payment Id Args: payment_id : Id for which payment object has to be transfered Returns: Payment dict after getting transfered
['Create', 'Transfer', 'for', 'given', 'Payment', 'Id']
train
https://github.com/razorpay/razorpay-python/blob/5bc63fd8452165a4b54556888492e555222c8afe/razorpay/resources/payment.py#L67-L78
4,374
spacetelescope/drizzlepac
drizzlepac/processInput.py
createImageObjectList
def createImageObjectList(files,instrpars,group=None, undistort=True, inmemory=False): """ Returns a list of imageObject instances, 1 for each input image in the list of input filenames. """ imageObjList = [] mtflag = False mt_refimg = None for img in files: image = _getInputImage(img,group=group) image.setInstrumentParameters(instrpars) image.compute_wcslin(undistort=undistort) if 'MTFLAG' in image._image['PRIMARY'].header: # check to see whether we are dealing with moving target observations... _keyval = image._image['PRIMARY'].header['MTFLAG'] if not util.is_blank(_keyval): if isinstance(_keyval,bool): mtflag = _keyval else: if 'T' in _keyval: mtflag = True else: mtflag = False else: mtflag = False if mtflag: print("#####\nProcessing Moving Target Observations using reference image as WCS for all inputs!\n#####\n") if mt_refimg is None: mt_refimg = image else: image.set_mt_wcs(mt_refimg) image.inmemory = inmemory # set flag for inmemory processing # Now add (possibly updated) image object to list imageObjList.append(image) return imageObjList
python
def createImageObjectList(files,instrpars,group=None, undistort=True, inmemory=False): """ Returns a list of imageObject instances, 1 for each input image in the list of input filenames. """ imageObjList = [] mtflag = False mt_refimg = None for img in files: image = _getInputImage(img,group=group) image.setInstrumentParameters(instrpars) image.compute_wcslin(undistort=undistort) if 'MTFLAG' in image._image['PRIMARY'].header: # check to see whether we are dealing with moving target observations... _keyval = image._image['PRIMARY'].header['MTFLAG'] if not util.is_blank(_keyval): if isinstance(_keyval,bool): mtflag = _keyval else: if 'T' in _keyval: mtflag = True else: mtflag = False else: mtflag = False if mtflag: print("#####\nProcessing Moving Target Observations using reference image as WCS for all inputs!\n#####\n") if mt_refimg is None: mt_refimg = image else: image.set_mt_wcs(mt_refimg) image.inmemory = inmemory # set flag for inmemory processing # Now add (possibly updated) image object to list imageObjList.append(image) return imageObjList
['def', 'createImageObjectList', '(', 'files', ',', 'instrpars', ',', 'group', '=', 'None', ',', 'undistort', '=', 'True', ',', 'inmemory', '=', 'False', ')', ':', 'imageObjList', '=', '[', ']', 'mtflag', '=', 'False', 'mt_refimg', '=', 'None', 'for', 'img', 'in', 'files', ':', 'image', '=', '_getInputImage', '(', 'img', ',', 'group', '=', 'group', ')', 'image', '.', 'setInstrumentParameters', '(', 'instrpars', ')', 'image', '.', 'compute_wcslin', '(', 'undistort', '=', 'undistort', ')', 'if', "'MTFLAG'", 'in', 'image', '.', '_image', '[', "'PRIMARY'", ']', '.', 'header', ':', '# check to see whether we are dealing with moving target observations...', '_keyval', '=', 'image', '.', '_image', '[', "'PRIMARY'", ']', '.', 'header', '[', "'MTFLAG'", ']', 'if', 'not', 'util', '.', 'is_blank', '(', '_keyval', ')', ':', 'if', 'isinstance', '(', '_keyval', ',', 'bool', ')', ':', 'mtflag', '=', '_keyval', 'else', ':', 'if', "'T'", 'in', '_keyval', ':', 'mtflag', '=', 'True', 'else', ':', 'mtflag', '=', 'False', 'else', ':', 'mtflag', '=', 'False', 'if', 'mtflag', ':', 'print', '(', '"#####\\nProcessing Moving Target Observations using reference image as WCS for all inputs!\\n#####\\n"', ')', 'if', 'mt_refimg', 'is', 'None', ':', 'mt_refimg', '=', 'image', 'else', ':', 'image', '.', 'set_mt_wcs', '(', 'mt_refimg', ')', 'image', '.', 'inmemory', '=', 'inmemory', '# set flag for inmemory processing', '# Now add (possibly updated) image object to list', 'imageObjList', '.', 'append', '(', 'image', ')', 'return', 'imageObjList']
Returns a list of imageObject instances, 1 for each input image in the list of input filenames.
['Returns', 'a', 'list', 'of', 'imageObject', 'instances', '1', 'for', 'each', 'input', 'image', 'in', 'the', 'list', 'of', 'input', 'filenames', '.']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/processInput.py#L328-L362
4,375
saltstack/salt
salt/modules/mount.py
set_filesystems
def set_filesystems( name, device, vfstype, opts='-', mount='true', config='/etc/filesystems', test=False, match_on='auto', **kwargs): ''' .. versionadded:: 2018.3.3 Verify that this mount is represented in the filesystems, change the mount to match the data passed, or add the mount if it is not present on AIX Provide information if the path is mounted :param name: The name of the mount point where the device is mounted. :param device: The device that is being mounted. :param vfstype: The file system that is used (AIX has two fstypes, fstype and vfstype - similar to Linux fstype) :param opts: Additional options used when mounting the device. :param mount: Mount if not mounted, default True. :param config: Configuration file, default /etc/filesystems. :param match: File systems type to match on, default auto CLI Example: .. code-block:: bash salt '*' mount.set_filesystems /mnt/foo /dev/sdz1 jfs2 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) # preserve arguments for updating entry_args = { 'name': name, 'dev': device.replace('\\ ', '\\040'), 'vfstype': vfstype, 'opts': opts, 'mount': mount, } view_lines = [] ret = None if 'AIX' not in __grains__['kernel']: return ret # Transform match_on into list--items will be checked later if isinstance(match_on, list): pass elif not isinstance(match_on, six.string_types): raise CommandExecutionError('match_on must be a string or list of strings') elif match_on == 'auto': # Try to guess right criteria for auto.... # added IBM types from sys/vmount.h after btrfs # NOTE: missing some special fstypes here specialFSes = frozenset([ 'none', 'tmpfs', 'sysfs', 'proc', 'fusectl', 'debugfs', 'securityfs', 'devtmpfs', 'cgroup', 'btrfs', 'cdrfs', 'procfs', 'jfs', 'jfs2', 'nfs', 'sfs', 'nfs3', 'cachefs', 'udfs', 'cifs', 'namefs', 'pmemfs', 'ahafs', 'nfs4', 'autofs', 'stnfs']) if vfstype in specialFSes: match_on = ['name'] else: match_on = ['dev'] else: match_on = [match_on] # generate entry and criteria objects, handle invalid keys in match_on entry_ip = _FileSystemsEntry.from_line(entry_args, kwargs) try: criteria = entry_ip.pick(match_on) except KeyError: filterFn = lambda key: key not in _FileSystemsEntry.compatibility_keys invalid_keys = filter(filterFn, match_on) raise CommandExecutionError('Unrecognized keys in match_on: "{0}"'.format(invalid_keys)) # parse file, use ret to cache status if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) # read in block of filesystem, block starts with '/' till empty line try: fsys_filedict = _filesystems(config, False) for fsys_view in six.viewitems(fsys_filedict): if criteria.match(fsys_view): ret = 'present' if entry_ip.match(fsys_view): view_lines.append(fsys_view) else: ret = 'change' kv = entry_ip['name'] view_lines.append((kv, entry_ip)) else: view_lines.append(fsys_view) except (IOError, OSError) as exc: raise CommandExecutionError('Couldn\'t read from {0}: {1}'.format(config, exc)) # add line if not present or changed if ret is None: for dict_view in six.viewitems(entry_ip.dict_from_entry()): view_lines.append(dict_view) ret = 'new' if ret != 'present': # ret in ['new', 'change']: try: with salt.utils.files.fopen(config, 'wb') as ofile: # The line was changed, commit it! for fsys_view in view_lines: entry = fsys_view[1] mystrg = _FileSystemsEntry.dict_to_lines(entry) ofile.writelines(salt.utils.data.encode(mystrg)) except (IOError, OSError): raise CommandExecutionError('File not writable {0}'.format(config)) return ret
python
def set_filesystems( name, device, vfstype, opts='-', mount='true', config='/etc/filesystems', test=False, match_on='auto', **kwargs): ''' .. versionadded:: 2018.3.3 Verify that this mount is represented in the filesystems, change the mount to match the data passed, or add the mount if it is not present on AIX Provide information if the path is mounted :param name: The name of the mount point where the device is mounted. :param device: The device that is being mounted. :param vfstype: The file system that is used (AIX has two fstypes, fstype and vfstype - similar to Linux fstype) :param opts: Additional options used when mounting the device. :param mount: Mount if not mounted, default True. :param config: Configuration file, default /etc/filesystems. :param match: File systems type to match on, default auto CLI Example: .. code-block:: bash salt '*' mount.set_filesystems /mnt/foo /dev/sdz1 jfs2 ''' # Fix the opts type if it is a list if isinstance(opts, list): opts = ','.join(opts) # preserve arguments for updating entry_args = { 'name': name, 'dev': device.replace('\\ ', '\\040'), 'vfstype': vfstype, 'opts': opts, 'mount': mount, } view_lines = [] ret = None if 'AIX' not in __grains__['kernel']: return ret # Transform match_on into list--items will be checked later if isinstance(match_on, list): pass elif not isinstance(match_on, six.string_types): raise CommandExecutionError('match_on must be a string or list of strings') elif match_on == 'auto': # Try to guess right criteria for auto.... # added IBM types from sys/vmount.h after btrfs # NOTE: missing some special fstypes here specialFSes = frozenset([ 'none', 'tmpfs', 'sysfs', 'proc', 'fusectl', 'debugfs', 'securityfs', 'devtmpfs', 'cgroup', 'btrfs', 'cdrfs', 'procfs', 'jfs', 'jfs2', 'nfs', 'sfs', 'nfs3', 'cachefs', 'udfs', 'cifs', 'namefs', 'pmemfs', 'ahafs', 'nfs4', 'autofs', 'stnfs']) if vfstype in specialFSes: match_on = ['name'] else: match_on = ['dev'] else: match_on = [match_on] # generate entry and criteria objects, handle invalid keys in match_on entry_ip = _FileSystemsEntry.from_line(entry_args, kwargs) try: criteria = entry_ip.pick(match_on) except KeyError: filterFn = lambda key: key not in _FileSystemsEntry.compatibility_keys invalid_keys = filter(filterFn, match_on) raise CommandExecutionError('Unrecognized keys in match_on: "{0}"'.format(invalid_keys)) # parse file, use ret to cache status if not os.path.isfile(config): raise CommandExecutionError('Bad config file "{0}"'.format(config)) # read in block of filesystem, block starts with '/' till empty line try: fsys_filedict = _filesystems(config, False) for fsys_view in six.viewitems(fsys_filedict): if criteria.match(fsys_view): ret = 'present' if entry_ip.match(fsys_view): view_lines.append(fsys_view) else: ret = 'change' kv = entry_ip['name'] view_lines.append((kv, entry_ip)) else: view_lines.append(fsys_view) except (IOError, OSError) as exc: raise CommandExecutionError('Couldn\'t read from {0}: {1}'.format(config, exc)) # add line if not present or changed if ret is None: for dict_view in six.viewitems(entry_ip.dict_from_entry()): view_lines.append(dict_view) ret = 'new' if ret != 'present': # ret in ['new', 'change']: try: with salt.utils.files.fopen(config, 'wb') as ofile: # The line was changed, commit it! for fsys_view in view_lines: entry = fsys_view[1] mystrg = _FileSystemsEntry.dict_to_lines(entry) ofile.writelines(salt.utils.data.encode(mystrg)) except (IOError, OSError): raise CommandExecutionError('File not writable {0}'.format(config)) return ret
['def', 'set_filesystems', '(', 'name', ',', 'device', ',', 'vfstype', ',', 'opts', '=', "'-'", ',', 'mount', '=', "'true'", ',', 'config', '=', "'/etc/filesystems'", ',', 'test', '=', 'False', ',', 'match_on', '=', "'auto'", ',', '*', '*', 'kwargs', ')', ':', '# Fix the opts type if it is a list', 'if', 'isinstance', '(', 'opts', ',', 'list', ')', ':', 'opts', '=', "','", '.', 'join', '(', 'opts', ')', '# preserve arguments for updating', 'entry_args', '=', '{', "'name'", ':', 'name', ',', "'dev'", ':', 'device', '.', 'replace', '(', "'\\\\ '", ',', "'\\\\040'", ')', ',', "'vfstype'", ':', 'vfstype', ',', "'opts'", ':', 'opts', ',', "'mount'", ':', 'mount', ',', '}', 'view_lines', '=', '[', ']', 'ret', '=', 'None', 'if', "'AIX'", 'not', 'in', '__grains__', '[', "'kernel'", ']', ':', 'return', 'ret', '# Transform match_on into list--items will be checked later', 'if', 'isinstance', '(', 'match_on', ',', 'list', ')', ':', 'pass', 'elif', 'not', 'isinstance', '(', 'match_on', ',', 'six', '.', 'string_types', ')', ':', 'raise', 'CommandExecutionError', '(', "'match_on must be a string or list of strings'", ')', 'elif', 'match_on', '==', "'auto'", ':', '# Try to guess right criteria for auto....', '# added IBM types from sys/vmount.h after btrfs', '# NOTE: missing some special fstypes here', 'specialFSes', '=', 'frozenset', '(', '[', "'none'", ',', "'tmpfs'", ',', "'sysfs'", ',', "'proc'", ',', "'fusectl'", ',', "'debugfs'", ',', "'securityfs'", ',', "'devtmpfs'", ',', "'cgroup'", ',', "'btrfs'", ',', "'cdrfs'", ',', "'procfs'", ',', "'jfs'", ',', "'jfs2'", ',', "'nfs'", ',', "'sfs'", ',', "'nfs3'", ',', "'cachefs'", ',', "'udfs'", ',', "'cifs'", ',', "'namefs'", ',', "'pmemfs'", ',', "'ahafs'", ',', "'nfs4'", ',', "'autofs'", ',', "'stnfs'", ']', ')', 'if', 'vfstype', 'in', 'specialFSes', ':', 'match_on', '=', '[', "'name'", ']', 'else', ':', 'match_on', '=', '[', "'dev'", ']', 'else', ':', 'match_on', '=', '[', 'match_on', ']', '# generate entry and criteria objects, handle invalid keys in match_on', 'entry_ip', '=', '_FileSystemsEntry', '.', 'from_line', '(', 'entry_args', ',', 'kwargs', ')', 'try', ':', 'criteria', '=', 'entry_ip', '.', 'pick', '(', 'match_on', ')', 'except', 'KeyError', ':', 'filterFn', '=', 'lambda', 'key', ':', 'key', 'not', 'in', '_FileSystemsEntry', '.', 'compatibility_keys', 'invalid_keys', '=', 'filter', '(', 'filterFn', ',', 'match_on', ')', 'raise', 'CommandExecutionError', '(', '\'Unrecognized keys in match_on: "{0}"\'', '.', 'format', '(', 'invalid_keys', ')', ')', '# parse file, use ret to cache status', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'config', ')', ':', 'raise', 'CommandExecutionError', '(', '\'Bad config file "{0}"\'', '.', 'format', '(', 'config', ')', ')', "# read in block of filesystem, block starts with '/' till empty line", 'try', ':', 'fsys_filedict', '=', '_filesystems', '(', 'config', ',', 'False', ')', 'for', 'fsys_view', 'in', 'six', '.', 'viewitems', '(', 'fsys_filedict', ')', ':', 'if', 'criteria', '.', 'match', '(', 'fsys_view', ')', ':', 'ret', '=', "'present'", 'if', 'entry_ip', '.', 'match', '(', 'fsys_view', ')', ':', 'view_lines', '.', 'append', '(', 'fsys_view', ')', 'else', ':', 'ret', '=', "'change'", 'kv', '=', 'entry_ip', '[', "'name'", ']', 'view_lines', '.', 'append', '(', '(', 'kv', ',', 'entry_ip', ')', ')', 'else', ':', 'view_lines', '.', 'append', '(', 'fsys_view', ')', 'except', '(', 'IOError', ',', 'OSError', ')', 'as', 'exc', ':', 'raise', 'CommandExecutionError', '(', "'Couldn\\'t read from {0}: {1}'", '.', 'format', '(', 'config', ',', 'exc', ')', ')', '# add line if not present or changed', 'if', 'ret', 'is', 'None', ':', 'for', 'dict_view', 'in', 'six', '.', 'viewitems', '(', 'entry_ip', '.', 'dict_from_entry', '(', ')', ')', ':', 'view_lines', '.', 'append', '(', 'dict_view', ')', 'ret', '=', "'new'", 'if', 'ret', '!=', "'present'", ':', "# ret in ['new', 'change']:", 'try', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'config', ',', "'wb'", ')', 'as', 'ofile', ':', '# The line was changed, commit it!', 'for', 'fsys_view', 'in', 'view_lines', ':', 'entry', '=', 'fsys_view', '[', '1', ']', 'mystrg', '=', '_FileSystemsEntry', '.', 'dict_to_lines', '(', 'entry', ')', 'ofile', '.', 'writelines', '(', 'salt', '.', 'utils', '.', 'data', '.', 'encode', '(', 'mystrg', ')', ')', 'except', '(', 'IOError', ',', 'OSError', ')', ':', 'raise', 'CommandExecutionError', '(', "'File not writable {0}'", '.', 'format', '(', 'config', ')', ')', 'return', 'ret']
.. versionadded:: 2018.3.3 Verify that this mount is represented in the filesystems, change the mount to match the data passed, or add the mount if it is not present on AIX Provide information if the path is mounted :param name: The name of the mount point where the device is mounted. :param device: The device that is being mounted. :param vfstype: The file system that is used (AIX has two fstypes, fstype and vfstype - similar to Linux fstype) :param opts: Additional options used when mounting the device. :param mount: Mount if not mounted, default True. :param config: Configuration file, default /etc/filesystems. :param match: File systems type to match on, default auto CLI Example: .. code-block:: bash salt '*' mount.set_filesystems /mnt/foo /dev/sdz1 jfs2
['..', 'versionadded', '::', '2018', '.', '3', '.', '3']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mount.py#L1666-L1810
4,376
twisted/axiom
axiom/store.py
Store.batchInsert
def batchInsert(self, itemType, itemAttributes, dataRows): """ Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None. """ class FakeItem: pass _NEEDS_DEFAULT = object() # token for lookup failure fakeOSelf = FakeItem() fakeOSelf.store = self sql = itemType._baseInsertSQL(self) indices = {} schema = [attr for (name, attr) in itemType.getSchema()] for i, attr in enumerate(itemAttributes): indices[attr] = i for row in dataRows: oid = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(itemType)]) insertArgs = [oid] for attr in schema: i = indices.get(attr, _NEEDS_DEFAULT) if i is _NEEDS_DEFAULT: pyval = attr.default else: pyval = row[i] dbval = attr._convertPyval(fakeOSelf, pyval) insertArgs.append(dbval) self.executeSQL(sql, insertArgs)
python
def batchInsert(self, itemType, itemAttributes, dataRows): """ Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None. """ class FakeItem: pass _NEEDS_DEFAULT = object() # token for lookup failure fakeOSelf = FakeItem() fakeOSelf.store = self sql = itemType._baseInsertSQL(self) indices = {} schema = [attr for (name, attr) in itemType.getSchema()] for i, attr in enumerate(itemAttributes): indices[attr] = i for row in dataRows: oid = self.store.executeSchemaSQL( _schema.CREATE_OBJECT, [self.store.getTypeID(itemType)]) insertArgs = [oid] for attr in schema: i = indices.get(attr, _NEEDS_DEFAULT) if i is _NEEDS_DEFAULT: pyval = attr.default else: pyval = row[i] dbval = attr._convertPyval(fakeOSelf, pyval) insertArgs.append(dbval) self.executeSQL(sql, insertArgs)
['def', 'batchInsert', '(', 'self', ',', 'itemType', ',', 'itemAttributes', ',', 'dataRows', ')', ':', 'class', 'FakeItem', ':', 'pass', '_NEEDS_DEFAULT', '=', 'object', '(', ')', '# token for lookup failure', 'fakeOSelf', '=', 'FakeItem', '(', ')', 'fakeOSelf', '.', 'store', '=', 'self', 'sql', '=', 'itemType', '.', '_baseInsertSQL', '(', 'self', ')', 'indices', '=', '{', '}', 'schema', '=', '[', 'attr', 'for', '(', 'name', ',', 'attr', ')', 'in', 'itemType', '.', 'getSchema', '(', ')', ']', 'for', 'i', ',', 'attr', 'in', 'enumerate', '(', 'itemAttributes', ')', ':', 'indices', '[', 'attr', ']', '=', 'i', 'for', 'row', 'in', 'dataRows', ':', 'oid', '=', 'self', '.', 'store', '.', 'executeSchemaSQL', '(', '_schema', '.', 'CREATE_OBJECT', ',', '[', 'self', '.', 'store', '.', 'getTypeID', '(', 'itemType', ')', ']', ')', 'insertArgs', '=', '[', 'oid', ']', 'for', 'attr', 'in', 'schema', ':', 'i', '=', 'indices', '.', 'get', '(', 'attr', ',', '_NEEDS_DEFAULT', ')', 'if', 'i', 'is', '_NEEDS_DEFAULT', ':', 'pyval', '=', 'attr', '.', 'default', 'else', ':', 'pyval', '=', 'row', '[', 'i', ']', 'dbval', '=', 'attr', '.', '_convertPyval', '(', 'fakeOSelf', ',', 'pyval', ')', 'insertArgs', '.', 'append', '(', 'dbval', ')', 'self', '.', 'executeSQL', '(', 'sql', ',', 'insertArgs', ')']
Create multiple items in the store without loading corresponding Python objects into memory. the items' C{stored} callback will not be called. Example:: myData = [(37, u"Fred", u"Wichita"), (28, u"Jim", u"Fresno"), (43, u"Betty", u"Dubuque")] myStore.batchInsert(FooItem, [FooItem.age, FooItem.name, FooItem.city], myData) @param itemType: an Item subclass to create instances of. @param itemAttributes: an iterable of attributes on the Item subclass. @param dataRows: an iterable of iterables, each the same length as C{itemAttributes} and containing data corresponding to each attribute in it. @return: None.
['Create', 'multiple', 'items', 'in', 'the', 'store', 'without', 'loading', 'corresponding', 'Python', 'objects', 'into', 'memory', '.']
train
https://github.com/twisted/axiom/blob/7de70bc8fe1bb81f9c2339fba8daec9eb2e92b68/axiom/store.py#L1731-L1779
4,377
thespacedoctor/fundamentals
fundamentals/files/recursive_directory_listing.py
recursive_directory_listing
def recursive_directory_listing( log, baseFolderPath, whatToList="all" ): """*list directory contents recursively.* Options to list only files or only directories. **Key Arguments:** - ``log`` -- logger - ``baseFolderPath`` -- path to the base folder to list contained files and folders recursively - ``whatToList`` -- list files only, durectories only or all [ "files" | "dirs" | "all" ] **Return:** - ``matchedPathList`` -- the matched paths **Usage:** .. code-block:: python from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp" ) # OR JUST FILE from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp", whatToList="files" ) # OR JUST FOLDERS from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp", whatToList="dirs" ) print theseFiles """ log.debug('starting the ``recursive_directory_listing`` function') ## VARIABLES ## matchedPathList = [] parentDirectoryList = [baseFolderPath, ] count = 0 while os.listdir(baseFolderPath) and count < 20: count += 1 while len(parentDirectoryList) != 0: childDirList = [] for parentDir in parentDirectoryList: try: thisDirList = os.listdir(parentDir) except Exception, e: log.error(e) continue for d in thisDirList: fullPath = os.path.join(parentDir, d) if whatToList is "all": matched = True elif whatToList is "dirs": matched = os.path.isdir(fullPath) elif whatToList is "files": matched = os.path.isfile(fullPath) else: log.error( 'cound not list files in %s, `whatToList` variable incorrect: [ "files" | "dirs" | "all" ]' % (baseFolderPath,)) sys.exit(0) if matched: matchedPathList.append(fullPath) # UPDATE DIRECTORY LISTING if os.path.isdir(fullPath): childDirList.append(fullPath) parentDirectoryList = childDirList log.debug('completed the ``recursive_directory_listing`` function') return matchedPathList
python
def recursive_directory_listing( log, baseFolderPath, whatToList="all" ): """*list directory contents recursively.* Options to list only files or only directories. **Key Arguments:** - ``log`` -- logger - ``baseFolderPath`` -- path to the base folder to list contained files and folders recursively - ``whatToList`` -- list files only, durectories only or all [ "files" | "dirs" | "all" ] **Return:** - ``matchedPathList`` -- the matched paths **Usage:** .. code-block:: python from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp" ) # OR JUST FILE from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp", whatToList="files" ) # OR JUST FOLDERS from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp", whatToList="dirs" ) print theseFiles """ log.debug('starting the ``recursive_directory_listing`` function') ## VARIABLES ## matchedPathList = [] parentDirectoryList = [baseFolderPath, ] count = 0 while os.listdir(baseFolderPath) and count < 20: count += 1 while len(parentDirectoryList) != 0: childDirList = [] for parentDir in parentDirectoryList: try: thisDirList = os.listdir(parentDir) except Exception, e: log.error(e) continue for d in thisDirList: fullPath = os.path.join(parentDir, d) if whatToList is "all": matched = True elif whatToList is "dirs": matched = os.path.isdir(fullPath) elif whatToList is "files": matched = os.path.isfile(fullPath) else: log.error( 'cound not list files in %s, `whatToList` variable incorrect: [ "files" | "dirs" | "all" ]' % (baseFolderPath,)) sys.exit(0) if matched: matchedPathList.append(fullPath) # UPDATE DIRECTORY LISTING if os.path.isdir(fullPath): childDirList.append(fullPath) parentDirectoryList = childDirList log.debug('completed the ``recursive_directory_listing`` function') return matchedPathList
['def', 'recursive_directory_listing', '(', 'log', ',', 'baseFolderPath', ',', 'whatToList', '=', '"all"', ')', ':', 'log', '.', 'debug', '(', "'starting the ``recursive_directory_listing`` function'", ')', '## VARIABLES ##', 'matchedPathList', '=', '[', ']', 'parentDirectoryList', '=', '[', 'baseFolderPath', ',', ']', 'count', '=', '0', 'while', 'os', '.', 'listdir', '(', 'baseFolderPath', ')', 'and', 'count', '<', '20', ':', 'count', '+=', '1', 'while', 'len', '(', 'parentDirectoryList', ')', '!=', '0', ':', 'childDirList', '=', '[', ']', 'for', 'parentDir', 'in', 'parentDirectoryList', ':', 'try', ':', 'thisDirList', '=', 'os', '.', 'listdir', '(', 'parentDir', ')', 'except', 'Exception', ',', 'e', ':', 'log', '.', 'error', '(', 'e', ')', 'continue', 'for', 'd', 'in', 'thisDirList', ':', 'fullPath', '=', 'os', '.', 'path', '.', 'join', '(', 'parentDir', ',', 'd', ')', 'if', 'whatToList', 'is', '"all"', ':', 'matched', '=', 'True', 'elif', 'whatToList', 'is', '"dirs"', ':', 'matched', '=', 'os', '.', 'path', '.', 'isdir', '(', 'fullPath', ')', 'elif', 'whatToList', 'is', '"files"', ':', 'matched', '=', 'os', '.', 'path', '.', 'isfile', '(', 'fullPath', ')', 'else', ':', 'log', '.', 'error', '(', '\'cound not list files in %s, `whatToList` variable incorrect: [ "files" | "dirs" | "all" ]\'', '%', '(', 'baseFolderPath', ',', ')', ')', 'sys', '.', 'exit', '(', '0', ')', 'if', 'matched', ':', 'matchedPathList', '.', 'append', '(', 'fullPath', ')', '# UPDATE DIRECTORY LISTING', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'fullPath', ')', ':', 'childDirList', '.', 'append', '(', 'fullPath', ')', 'parentDirectoryList', '=', 'childDirList', 'log', '.', 'debug', '(', "'completed the ``recursive_directory_listing`` function'", ')', 'return', 'matchedPathList']
*list directory contents recursively.* Options to list only files or only directories. **Key Arguments:** - ``log`` -- logger - ``baseFolderPath`` -- path to the base folder to list contained files and folders recursively - ``whatToList`` -- list files only, durectories only or all [ "files" | "dirs" | "all" ] **Return:** - ``matchedPathList`` -- the matched paths **Usage:** .. code-block:: python from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp" ) # OR JUST FILE from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp", whatToList="files" ) # OR JUST FOLDERS from fundamentals.files import recursive_directory_listing theseFiles = recursive_directory_listing( log, baseFolderPath="/tmp", whatToList="dirs" ) print theseFiles
['*', 'list', 'directory', 'contents', 'recursively', '.', '*']
train
https://github.com/thespacedoctor/fundamentals/blob/1d2c007ac74442ec2eabde771cfcacdb9c1ab382/fundamentals/files/recursive_directory_listing.py#L19-L109
4,378
core/uricore
uricore/wkz_urls.py
url_decode
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True, errors='replace', separator='&', cls=None): """Parse a querystring and return it as :class:`MultiDict`. Per default only values are decoded into unicode strings. If `decode_keys` is set to `True` the same will happen for keys. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. :param decode_keys: set to `True` if you want the keys to be decoded as well. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. """ if cls is None: cls = MultiDict return cls(_url_decode_impl(str(s).split(separator), charset, decode_keys, include_empty, errors))
python
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True, errors='replace', separator='&', cls=None): """Parse a querystring and return it as :class:`MultiDict`. Per default only values are decoded into unicode strings. If `decode_keys` is set to `True` the same will happen for keys. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. :param decode_keys: set to `True` if you want the keys to be decoded as well. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. """ if cls is None: cls = MultiDict return cls(_url_decode_impl(str(s).split(separator), charset, decode_keys, include_empty, errors))
['def', 'url_decode', '(', 's', ',', 'charset', '=', "'utf-8'", ',', 'decode_keys', '=', 'False', ',', 'include_empty', '=', 'True', ',', 'errors', '=', "'replace'", ',', 'separator', '=', "'&'", ',', 'cls', '=', 'None', ')', ':', 'if', 'cls', 'is', 'None', ':', 'cls', '=', 'MultiDict', 'return', 'cls', '(', '_url_decode_impl', '(', 'str', '(', 's', ')', '.', 'split', '(', 'separator', ')', ',', 'charset', ',', 'decode_keys', ',', 'include_empty', ',', 'errors', ')', ')']
Parse a querystring and return it as :class:`MultiDict`. Per default only values are decoded into unicode strings. If `decode_keys` is set to `True` the same will happen for keys. Per default a missing value for a key will default to an empty key. If you don't want that behavior you can set `include_empty` to `False`. Per default encoding errors are ignored. If you want a different behavior you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a `HTTPUnicodeError` is raised. .. versionchanged:: 0.5 In previous versions ";" and "&" could be used for url decoding. This changed in 0.5 where only "&" is supported. If you want to use ";" instead a different `separator` can be provided. The `cls` parameter was added. :param s: a string with the query string to decode. :param charset: the charset of the query string. :param decode_keys: set to `True` if you want the keys to be decoded as well. :param include_empty: Set to `False` if you don't want empty values to appear in the dict. :param errors: the decoding error behavior. :param separator: the pair separator to be used, defaults to ``&`` :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used.
['Parse', 'a', 'querystring', 'and', 'return', 'it', 'as', ':', 'class', ':', 'MultiDict', '.', 'Per', 'default', 'only', 'values', 'are', 'decoded', 'into', 'unicode', 'strings', '.', 'If', 'decode_keys', 'is', 'set', 'to', 'True', 'the', 'same', 'will', 'happen', 'for', 'keys', '.']
train
https://github.com/core/uricore/blob/dc5ef4be7bd93da4c39e5c1cbd1ae4f3ad3f1f2a/uricore/wkz_urls.py#L222-L256
4,379
lrq3000/pyFileFixity
pyFileFixity/header_ecc.py
ecc_correct_intra
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False): """ Correct an intra-field with its corresponding intra-ecc if necessary """ fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
python
def ecc_correct_intra(ecc_manager_intra, ecc_params_intra, field, ecc, enable_erasures=False, erasures_char="\x00", only_erasures=False): """ Correct an intra-field with its corresponding intra-ecc if necessary """ fentry_fields = {"ecc_field": ecc} field_correct = [] # will store each block of the corrected (or already correct) filepath fcorrupted = False # check if field was corrupted fcorrected = True # check if field was corrected (if it was corrupted) errmsg = '' # Decode each block of the filepath for e in entry_assemble(fentry_fields, ecc_params_intra, len(field), '', field): # Check if this block of the filepath is OK, if yes then we just copy it over if ecc_manager_intra.check(e["message"], e["ecc"]): field_correct.append(e["message"]) else: # Else this block is corrupted, we will try to fix it using the ecc fcorrupted = True # Repair the message block and the ecc try: repaired_block, repaired_ecc = ecc_manager_intra.decode(e["message"], e["ecc"], enable_erasures=enable_erasures, erasures_char=erasures_char, only_erasures=only_erasures) except (ReedSolomonError, RSCodecError), exc: # the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files. repaired_block = None repaired_ecc = None errmsg += "- Error: metadata field at offset %i: %s\n" % (entry_pos[0], exc) # Check if the block was successfully repaired: if yes then we copy the repaired block... if repaired_block is not None and ecc_manager_intra.check(repaired_block, repaired_ecc): field_correct.append(repaired_block) else: # ... else it failed, then we copy the original corrupted block and report an error later field_correct.append(e["message"]) fcorrected = False # Join all the blocks into one string to build the final filepath if isinstance(field_correct[0], bytearray): field_correct = [str(x) for x in field_correct] # workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str field = ''.join(field_correct) # Report errors return (field, fcorrupted, fcorrected, errmsg)
['def', 'ecc_correct_intra', '(', 'ecc_manager_intra', ',', 'ecc_params_intra', ',', 'field', ',', 'ecc', ',', 'enable_erasures', '=', 'False', ',', 'erasures_char', '=', '"\\x00"', ',', 'only_erasures', '=', 'False', ')', ':', 'fentry_fields', '=', '{', '"ecc_field"', ':', 'ecc', '}', 'field_correct', '=', '[', ']', '# will store each block of the corrected (or already correct) filepath', 'fcorrupted', '=', 'False', '# check if field was corrupted', 'fcorrected', '=', 'True', '# check if field was corrected (if it was corrupted)', 'errmsg', '=', "''", '# Decode each block of the filepath', 'for', 'e', 'in', 'entry_assemble', '(', 'fentry_fields', ',', 'ecc_params_intra', ',', 'len', '(', 'field', ')', ',', "''", ',', 'field', ')', ':', '# Check if this block of the filepath is OK, if yes then we just copy it over', 'if', 'ecc_manager_intra', '.', 'check', '(', 'e', '[', '"message"', ']', ',', 'e', '[', '"ecc"', ']', ')', ':', 'field_correct', '.', 'append', '(', 'e', '[', '"message"', ']', ')', 'else', ':', '# Else this block is corrupted, we will try to fix it using the ecc', 'fcorrupted', '=', 'True', '# Repair the message block and the ecc', 'try', ':', 'repaired_block', ',', 'repaired_ecc', '=', 'ecc_manager_intra', '.', 'decode', '(', 'e', '[', '"message"', ']', ',', 'e', '[', '"ecc"', ']', ',', 'enable_erasures', '=', 'enable_erasures', ',', 'erasures_char', '=', 'erasures_char', ',', 'only_erasures', '=', 'only_erasures', ')', 'except', '(', 'ReedSolomonError', ',', 'RSCodecError', ')', ',', 'exc', ':', "# the reedsolo lib may raise an exception when it can't decode. We ensure that we can still continue to decode the rest of the file, and the other files.", 'repaired_block', '=', 'None', 'repaired_ecc', '=', 'None', 'errmsg', '+=', '"- Error: metadata field at offset %i: %s\\n"', '%', '(', 'entry_pos', '[', '0', ']', ',', 'exc', ')', '# Check if the block was successfully repaired: if yes then we copy the repaired block...', 'if', 'repaired_block', 'is', 'not', 'None', 'and', 'ecc_manager_intra', '.', 'check', '(', 'repaired_block', ',', 'repaired_ecc', ')', ':', 'field_correct', '.', 'append', '(', 'repaired_block', ')', 'else', ':', '# ... else it failed, then we copy the original corrupted block and report an error later', 'field_correct', '.', 'append', '(', 'e', '[', '"message"', ']', ')', 'fcorrected', '=', 'False', '# Join all the blocks into one string to build the final filepath', 'if', 'isinstance', '(', 'field_correct', '[', '0', ']', ',', 'bytearray', ')', ':', 'field_correct', '=', '[', 'str', '(', 'x', ')', 'for', 'x', 'in', 'field_correct', ']', '# workaround when using --ecc_algo 3 or 4, because we get a list of bytearrays instead of str', 'field', '=', "''", '.', 'join', '(', 'field_correct', ')', '# Report errors', 'return', '(', 'field', ',', 'fcorrupted', ',', 'fcorrected', ',', 'errmsg', ')']
Correct an intra-field with its corresponding intra-ecc if necessary
['Correct', 'an', 'intra', '-', 'field', 'with', 'its', 'corresponding', 'intra', '-', 'ecc', 'if', 'necessary']
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/header_ecc.py#L174-L205
4,380
tensorflow/hub
tensorflow_hub/resolver.py
DownloadManager.download_and_uncompress
def download_and_uncompress(self, fileobj, dst_path): """Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file. """ try: with tarfile.open(mode="r|*", fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: # We do not support symlinks and other uncommon objects. raise ValueError( "Unexpected object type in tar archive: %s" % tarinfo.type) total_size_str = tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True) self._print_download_progress_msg( "Downloaded %s, Total size: %s" % (self._url, total_size_str), flush=True) except tarfile.ReadError: raise IOError("%s does not appear to be a valid module." % self._url)
python
def download_and_uncompress(self, fileobj, dst_path): """Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file. """ try: with tarfile.open(mode="r|*", fileobj=fileobj) as tgz: for tarinfo in tgz: abs_target_path = _merge_relative_path(dst_path, tarinfo.name) if tarinfo.isfile(): self._extract_file(tgz, tarinfo, abs_target_path) elif tarinfo.isdir(): tf_v1.gfile.MakeDirs(abs_target_path) else: # We do not support symlinks and other uncommon objects. raise ValueError( "Unexpected object type in tar archive: %s" % tarinfo.type) total_size_str = tf_utils.bytes_to_readable_str( self._total_bytes_downloaded, True) self._print_download_progress_msg( "Downloaded %s, Total size: %s" % (self._url, total_size_str), flush=True) except tarfile.ReadError: raise IOError("%s does not appear to be a valid module." % self._url)
['def', 'download_and_uncompress', '(', 'self', ',', 'fileobj', ',', 'dst_path', ')', ':', 'try', ':', 'with', 'tarfile', '.', 'open', '(', 'mode', '=', '"r|*"', ',', 'fileobj', '=', 'fileobj', ')', 'as', 'tgz', ':', 'for', 'tarinfo', 'in', 'tgz', ':', 'abs_target_path', '=', '_merge_relative_path', '(', 'dst_path', ',', 'tarinfo', '.', 'name', ')', 'if', 'tarinfo', '.', 'isfile', '(', ')', ':', 'self', '.', '_extract_file', '(', 'tgz', ',', 'tarinfo', ',', 'abs_target_path', ')', 'elif', 'tarinfo', '.', 'isdir', '(', ')', ':', 'tf_v1', '.', 'gfile', '.', 'MakeDirs', '(', 'abs_target_path', ')', 'else', ':', '# We do not support symlinks and other uncommon objects.', 'raise', 'ValueError', '(', '"Unexpected object type in tar archive: %s"', '%', 'tarinfo', '.', 'type', ')', 'total_size_str', '=', 'tf_utils', '.', 'bytes_to_readable_str', '(', 'self', '.', '_total_bytes_downloaded', ',', 'True', ')', 'self', '.', '_print_download_progress_msg', '(', '"Downloaded %s, Total size: %s"', '%', '(', 'self', '.', '_url', ',', 'total_size_str', ')', ',', 'flush', '=', 'True', ')', 'except', 'tarfile', '.', 'ReadError', ':', 'raise', 'IOError', '(', '"%s does not appear to be a valid module."', '%', 'self', '.', '_url', ')']
Streams the content for the 'fileobj' and stores the result in dst_path. Args: fileobj: File handle pointing to .tar/.tar.gz content. dst_path: Absolute path where to store uncompressed data from 'fileobj'. Raises: ValueError: Unknown object encountered inside the TAR file.
['Streams', 'the', 'content', 'for', 'the', 'fileobj', 'and', 'stores', 'the', 'result', 'in', 'dst_path', '.']
train
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/resolver.py#L159-L189
4,381
ActiveState/simplealchemy
simplealchemy.py
_remove_unicode_keys
def _remove_unicode_keys(dictobj): """Convert keys from 'unicode' to 'str' type. workaround for <http://bugs.python.org/issue2646> """ if sys.version_info[:2] >= (3, 0): return dictobj assert isinstance(dictobj, dict) newdict = {} for key, value in dictobj.items(): if type(key) is unicode: key = key.encode('utf-8') newdict[key] = value return newdict
python
def _remove_unicode_keys(dictobj): """Convert keys from 'unicode' to 'str' type. workaround for <http://bugs.python.org/issue2646> """ if sys.version_info[:2] >= (3, 0): return dictobj assert isinstance(dictobj, dict) newdict = {} for key, value in dictobj.items(): if type(key) is unicode: key = key.encode('utf-8') newdict[key] = value return newdict
['def', '_remove_unicode_keys', '(', 'dictobj', ')', ':', 'if', 'sys', '.', 'version_info', '[', ':', '2', ']', '>=', '(', '3', ',', '0', ')', ':', 'return', 'dictobj', 'assert', 'isinstance', '(', 'dictobj', ',', 'dict', ')', 'newdict', '=', '{', '}', 'for', 'key', ',', 'value', 'in', 'dictobj', '.', 'items', '(', ')', ':', 'if', 'type', '(', 'key', ')', 'is', 'unicode', ':', 'key', '=', 'key', '.', 'encode', '(', "'utf-8'", ')', 'newdict', '[', 'key', ']', '=', 'value', 'return', 'newdict']
Convert keys from 'unicode' to 'str' type. workaround for <http://bugs.python.org/issue2646>
['Convert', 'keys', 'from', 'unicode', 'to', 'str', 'type', '.']
train
https://github.com/ActiveState/simplealchemy/blob/f745847793f57701776a804ec74791a1f6a66947/simplealchemy.py#L239-L253
4,382
booktype/python-ooxml
ooxml/serialize.py
_get_numbering
def _get_numbering(document, numid, ilvl): """Returns type for the list. :Returns: Returns type for the list. Returns "bullet" by default or in case of an error. """ try: abs_num = document.numbering[numid] return document.abstruct_numbering[abs_num][ilvl]['numFmt'] except: return 'bullet'
python
def _get_numbering(document, numid, ilvl): """Returns type for the list. :Returns: Returns type for the list. Returns "bullet" by default or in case of an error. """ try: abs_num = document.numbering[numid] return document.abstruct_numbering[abs_num][ilvl]['numFmt'] except: return 'bullet'
['def', '_get_numbering', '(', 'document', ',', 'numid', ',', 'ilvl', ')', ':', 'try', ':', 'abs_num', '=', 'document', '.', 'numbering', '[', 'numid', ']', 'return', 'document', '.', 'abstruct_numbering', '[', 'abs_num', ']', '[', 'ilvl', ']', '[', "'numFmt'", ']', 'except', ':', 'return', "'bullet'"]
Returns type for the list. :Returns: Returns type for the list. Returns "bullet" by default or in case of an error.
['Returns', 'type', 'for', 'the', 'list', '.']
train
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L74-L85
4,383
llazzaro/analyzerdam
analyzerdam/excelDAM.py
ExcelDAM.writeTicks
def writeTicks(self, ticks): ''' read quotes ''' self.__writeData(self.targetPath(ExcelDAM.TICK), TICK_FIELDS, [[getattr(tick, field) for field in TICK_FIELDS] for tick in ticks])
python
def writeTicks(self, ticks): ''' read quotes ''' self.__writeData(self.targetPath(ExcelDAM.TICK), TICK_FIELDS, [[getattr(tick, field) for field in TICK_FIELDS] for tick in ticks])
['def', 'writeTicks', '(', 'self', ',', 'ticks', ')', ':', 'self', '.', '__writeData', '(', 'self', '.', 'targetPath', '(', 'ExcelDAM', '.', 'TICK', ')', ',', 'TICK_FIELDS', ',', '[', '[', 'getattr', '(', 'tick', ',', 'field', ')', 'for', 'field', 'in', 'TICK_FIELDS', ']', 'for', 'tick', 'in', 'ticks', ']', ')']
read quotes
['read', 'quotes']
train
https://github.com/llazzaro/analyzerdam/blob/c5bc7483dae23bd2e14bbf36147b7a43a0067bc0/analyzerdam/excelDAM.py#L91-L95
4,384
watson-developer-cloud/python-sdk
ibm_watson/tone_analyzer_v3.py
SentenceAnalysis._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'sentence_id') and self.sentence_id is not None: _dict['sentence_id'] = self.sentence_id if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'tones') and self.tones is not None: _dict['tones'] = [x._to_dict() for x in self.tones] if hasattr(self, 'tone_categories') and self.tone_categories is not None: _dict['tone_categories'] = [ x._to_dict() for x in self.tone_categories ] if hasattr(self, 'input_from') and self.input_from is not None: _dict['input_from'] = self.input_from if hasattr(self, 'input_to') and self.input_to is not None: _dict['input_to'] = self.input_to return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'sentence_id') and self.sentence_id is not None: _dict['sentence_id'] = self.sentence_id if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'tones') and self.tones is not None: _dict['tones'] = [x._to_dict() for x in self.tones] if hasattr(self, 'tone_categories') and self.tone_categories is not None: _dict['tone_categories'] = [ x._to_dict() for x in self.tone_categories ] if hasattr(self, 'input_from') and self.input_from is not None: _dict['input_from'] = self.input_from if hasattr(self, 'input_to') and self.input_to is not None: _dict['input_to'] = self.input_to return _dict
['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'sentence_id'", ')', 'and', 'self', '.', 'sentence_id', 'is', 'not', 'None', ':', '_dict', '[', "'sentence_id'", ']', '=', 'self', '.', 'sentence_id', 'if', 'hasattr', '(', 'self', ',', "'text'", ')', 'and', 'self', '.', 'text', 'is', 'not', 'None', ':', '_dict', '[', "'text'", ']', '=', 'self', '.', 'text', 'if', 'hasattr', '(', 'self', ',', "'tones'", ')', 'and', 'self', '.', 'tones', 'is', 'not', 'None', ':', '_dict', '[', "'tones'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'tones', ']', 'if', 'hasattr', '(', 'self', ',', "'tone_categories'", ')', 'and', 'self', '.', 'tone_categories', 'is', 'not', 'None', ':', '_dict', '[', "'tone_categories'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'tone_categories', ']', 'if', 'hasattr', '(', 'self', ',', "'input_from'", ')', 'and', 'self', '.', 'input_from', 'is', 'not', 'None', ':', '_dict', '[', "'input_from'", ']', '=', 'self', '.', 'input_from', 'if', 'hasattr', '(', 'self', ',', "'input_to'", ')', 'and', 'self', '.', 'input_to', 'is', 'not', 'None', ':', '_dict', '[', "'input_to'", ']', '=', 'self', '.', 'input_to', 'return', '_dict']
Return a json dictionary representing this model.
['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/tone_analyzer_v3.py#L460-L478
4,385
mitsei/dlkit
dlkit/json_/repository/objects.py
AssetForm.clear_copyright_registration
def clear_copyright_registration(self): """Removes the copyright registration. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetContentForm.clear_url_template if (self.get_copyright_registration_metadata().is_read_only() or self.get_copyright_registration_metadata().is_required()): raise errors.NoAccess() self._my_map['copyrightRegistration'] = self._copyright_registration_default
python
def clear_copyright_registration(self): """Removes the copyright registration. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetContentForm.clear_url_template if (self.get_copyright_registration_metadata().is_read_only() or self.get_copyright_registration_metadata().is_required()): raise errors.NoAccess() self._my_map['copyrightRegistration'] = self._copyright_registration_default
['def', 'clear_copyright_registration', '(', 'self', ')', ':', '# Implemented from template for osid.repository.AssetContentForm.clear_url_template', 'if', '(', 'self', '.', 'get_copyright_registration_metadata', '(', ')', '.', 'is_read_only', '(', ')', 'or', 'self', '.', 'get_copyright_registration_metadata', '(', ')', '.', 'is_required', '(', ')', ')', ':', 'raise', 'errors', '.', 'NoAccess', '(', ')', 'self', '.', '_my_map', '[', "'copyrightRegistration'", ']', '=', 'self', '.', '_copyright_registration_default']
Removes the copyright registration. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
['Removes', 'the', 'copyright', 'registration', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L797-L809
4,386
BrewBlox/brewblox-service
brewblox_service/events.py
post_subscribe
async def post_subscribe(request): """ --- tags: - Events summary: Subscribe to events. operationId: events.subscribe produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string """ args = await request.json() get_listener(request.app).subscribe( args['exchange'], args['routing'] ) return web.Response()
python
async def post_subscribe(request): """ --- tags: - Events summary: Subscribe to events. operationId: events.subscribe produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string """ args = await request.json() get_listener(request.app).subscribe( args['exchange'], args['routing'] ) return web.Response()
['async', 'def', 'post_subscribe', '(', 'request', ')', ':', 'args', '=', 'await', 'request', '.', 'json', '(', ')', 'get_listener', '(', 'request', '.', 'app', ')', '.', 'subscribe', '(', 'args', '[', "'exchange'", ']', ',', 'args', '[', "'routing'", ']', ')', 'return', 'web', '.', 'Response', '(', ')']
--- tags: - Events summary: Subscribe to events. operationId: events.subscribe produces: - text/plain parameters: - in: body name: body description: Event message required: true schema: type: object properties: exchange: type: string routing: type: string
['---', 'tags', ':', '-', 'Events', 'summary', ':', 'Subscribe', 'to', 'events', '.', 'operationId', ':', 'events', '.', 'subscribe', 'produces', ':', '-', 'text', '/', 'plain', 'parameters', ':', '-', 'in', ':', 'body', 'name', ':', 'body', 'description', ':', 'Event', 'message', 'required', ':', 'true', 'schema', ':', 'type', ':', 'object', 'properties', ':', 'exchange', ':', 'type', ':', 'string', 'routing', ':', 'type', ':', 'string']
train
https://github.com/BrewBlox/brewblox-service/blob/f2572fcb5ea337c24aa5a28c2b0b19ebcfc076eb/brewblox_service/events.py#L503-L531
4,387
delfick/harpoon
harpoon/option_spec/task_objs.py
Task.run
def run(self, collector, image, available_actions, tasks, **extras): """Run this task""" task_func = available_actions[self.action] configuration = collector.configuration.wrapped() if self.options: if image: configuration.update({"images": {image: self.options}}) else: configuration.update(self.options) # args like --port and the like should override what's in the options # But themselves be overridden by the overrides configuration.update(configuration["args_dict"].as_dict(), source="<args_dict>") if self.overrides: overrides = {} for key, val in self.overrides.items(): overrides[key] = val if isinstance(val, MergedOptions): overrides[key] = dict(val.items()) configuration.update(overrides) if task_func.needs_image: self.find_image(image, configuration) image = configuration["images"][image] image.find_missing_env() from harpoon.collector import Collector new_collector = Collector() new_collector.configuration = configuration new_collector.configuration_file = collector.configuration_file artifact = configuration["harpoon"].artifact return task_func(new_collector, image=image, tasks=tasks, artifact=artifact, **extras)
python
def run(self, collector, image, available_actions, tasks, **extras): """Run this task""" task_func = available_actions[self.action] configuration = collector.configuration.wrapped() if self.options: if image: configuration.update({"images": {image: self.options}}) else: configuration.update(self.options) # args like --port and the like should override what's in the options # But themselves be overridden by the overrides configuration.update(configuration["args_dict"].as_dict(), source="<args_dict>") if self.overrides: overrides = {} for key, val in self.overrides.items(): overrides[key] = val if isinstance(val, MergedOptions): overrides[key] = dict(val.items()) configuration.update(overrides) if task_func.needs_image: self.find_image(image, configuration) image = configuration["images"][image] image.find_missing_env() from harpoon.collector import Collector new_collector = Collector() new_collector.configuration = configuration new_collector.configuration_file = collector.configuration_file artifact = configuration["harpoon"].artifact return task_func(new_collector, image=image, tasks=tasks, artifact=artifact, **extras)
['def', 'run', '(', 'self', ',', 'collector', ',', 'image', ',', 'available_actions', ',', 'tasks', ',', '*', '*', 'extras', ')', ':', 'task_func', '=', 'available_actions', '[', 'self', '.', 'action', ']', 'configuration', '=', 'collector', '.', 'configuration', '.', 'wrapped', '(', ')', 'if', 'self', '.', 'options', ':', 'if', 'image', ':', 'configuration', '.', 'update', '(', '{', '"images"', ':', '{', 'image', ':', 'self', '.', 'options', '}', '}', ')', 'else', ':', 'configuration', '.', 'update', '(', 'self', '.', 'options', ')', "# args like --port and the like should override what's in the options", '# But themselves be overridden by the overrides', 'configuration', '.', 'update', '(', 'configuration', '[', '"args_dict"', ']', '.', 'as_dict', '(', ')', ',', 'source', '=', '"<args_dict>"', ')', 'if', 'self', '.', 'overrides', ':', 'overrides', '=', '{', '}', 'for', 'key', ',', 'val', 'in', 'self', '.', 'overrides', '.', 'items', '(', ')', ':', 'overrides', '[', 'key', ']', '=', 'val', 'if', 'isinstance', '(', 'val', ',', 'MergedOptions', ')', ':', 'overrides', '[', 'key', ']', '=', 'dict', '(', 'val', '.', 'items', '(', ')', ')', 'configuration', '.', 'update', '(', 'overrides', ')', 'if', 'task_func', '.', 'needs_image', ':', 'self', '.', 'find_image', '(', 'image', ',', 'configuration', ')', 'image', '=', 'configuration', '[', '"images"', ']', '[', 'image', ']', 'image', '.', 'find_missing_env', '(', ')', 'from', 'harpoon', '.', 'collector', 'import', 'Collector', 'new_collector', '=', 'Collector', '(', ')', 'new_collector', '.', 'configuration', '=', 'configuration', 'new_collector', '.', 'configuration_file', '=', 'collector', '.', 'configuration_file', 'artifact', '=', 'configuration', '[', '"harpoon"', ']', '.', 'artifact', 'return', 'task_func', '(', 'new_collector', ',', 'image', '=', 'image', ',', 'tasks', '=', 'tasks', ',', 'artifact', '=', 'artifact', ',', '*', '*', 'extras', ')']
Run this task
['Run', 'this', 'task']
train
https://github.com/delfick/harpoon/blob/a2d39311d6127b7da2e15f40468bf320d598e461/harpoon/option_spec/task_objs.py#L42-L75
4,388
DLR-RM/RAFCON
source/rafcon/gui/controllers/utils/extended_controller.py
ExtendedController.destroy
def destroy(self): """Recursively destroy all Controllers The method remove all controllers, which calls the destroy method of the child controllers. Then, all registered models are relieved and and the widget hand by the initial view argument is destroyed. """ self.disconnect_all_signals() controller_names = [key for key in self.__child_controllers] for controller_name in controller_names: self.remove_controller(controller_name) self.relieve_all_models() if self.parent: self.__parent = None if self._view_initialized: # print(self.__class__.__name__, "destroy view", self.view, self) self.view.get_top_widget().destroy() self.view = None self._Observer__PROP_TO_METHS.clear() # prop name --> set of observing methods self._Observer__METH_TO_PROPS.clear() # method --> set of observed properties self._Observer__PAT_TO_METHS.clear() # like __PROP_TO_METHS but only for pattern names (to optimize search) self._Observer__METH_TO_PAT.clear() # method --> pattern self._Observer__PAT_METH_TO_KWARGS.clear() # (pattern, method) --> info self.observe = None else: logger.warning("The controller {0} seems to be destroyed before the view was fully initialized. {1} " "Check if you maybe do not call {2} or there exist most likely threading problems." "".format(self.__class__.__name__, self.model, ExtendedController.register_view))
python
def destroy(self): """Recursively destroy all Controllers The method remove all controllers, which calls the destroy method of the child controllers. Then, all registered models are relieved and and the widget hand by the initial view argument is destroyed. """ self.disconnect_all_signals() controller_names = [key for key in self.__child_controllers] for controller_name in controller_names: self.remove_controller(controller_name) self.relieve_all_models() if self.parent: self.__parent = None if self._view_initialized: # print(self.__class__.__name__, "destroy view", self.view, self) self.view.get_top_widget().destroy() self.view = None self._Observer__PROP_TO_METHS.clear() # prop name --> set of observing methods self._Observer__METH_TO_PROPS.clear() # method --> set of observed properties self._Observer__PAT_TO_METHS.clear() # like __PROP_TO_METHS but only for pattern names (to optimize search) self._Observer__METH_TO_PAT.clear() # method --> pattern self._Observer__PAT_METH_TO_KWARGS.clear() # (pattern, method) --> info self.observe = None else: logger.warning("The controller {0} seems to be destroyed before the view was fully initialized. {1} " "Check if you maybe do not call {2} or there exist most likely threading problems." "".format(self.__class__.__name__, self.model, ExtendedController.register_view))
['def', 'destroy', '(', 'self', ')', ':', 'self', '.', 'disconnect_all_signals', '(', ')', 'controller_names', '=', '[', 'key', 'for', 'key', 'in', 'self', '.', '__child_controllers', ']', 'for', 'controller_name', 'in', 'controller_names', ':', 'self', '.', 'remove_controller', '(', 'controller_name', ')', 'self', '.', 'relieve_all_models', '(', ')', 'if', 'self', '.', 'parent', ':', 'self', '.', '__parent', '=', 'None', 'if', 'self', '.', '_view_initialized', ':', '# print(self.__class__.__name__, "destroy view", self.view, self)', 'self', '.', 'view', '.', 'get_top_widget', '(', ')', '.', 'destroy', '(', ')', 'self', '.', 'view', '=', 'None', 'self', '.', '_Observer__PROP_TO_METHS', '.', 'clear', '(', ')', '# prop name --> set of observing methods', 'self', '.', '_Observer__METH_TO_PROPS', '.', 'clear', '(', ')', '# method --> set of observed properties', 'self', '.', '_Observer__PAT_TO_METHS', '.', 'clear', '(', ')', '# like __PROP_TO_METHS but only for pattern names (to optimize search)', 'self', '.', '_Observer__METH_TO_PAT', '.', 'clear', '(', ')', '# method --> pattern', 'self', '.', '_Observer__PAT_METH_TO_KWARGS', '.', 'clear', '(', ')', '# (pattern, method) --> info', 'self', '.', 'observe', '=', 'None', 'else', ':', 'logger', '.', 'warning', '(', '"The controller {0} seems to be destroyed before the view was fully initialized. {1} "', '"Check if you maybe do not call {2} or there exist most likely threading problems."', '""', '.', 'format', '(', 'self', '.', '__class__', '.', '__name__', ',', 'self', '.', 'model', ',', 'ExtendedController', '.', 'register_view', ')', ')']
Recursively destroy all Controllers The method remove all controllers, which calls the destroy method of the child controllers. Then, all registered models are relieved and and the widget hand by the initial view argument is destroyed.
['Recursively', 'destroy', 'all', 'Controllers']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/extended_controller.py#L186-L212
4,389
sethmlarson/virtualbox-python
virtualbox/library.py
IGuestSession.symlink_exists
def symlink_exists(self, symlink): """Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet. """ if not isinstance(symlink, basestring): raise TypeError("symlink can only be an instance of type basestring") exists = self._call("symlinkExists", in_p=[symlink]) return exists
python
def symlink_exists(self, symlink): """Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet. """ if not isinstance(symlink, basestring): raise TypeError("symlink can only be an instance of type basestring") exists = self._call("symlinkExists", in_p=[symlink]) return exists
['def', 'symlink_exists', '(', 'self', ',', 'symlink', ')', ':', 'if', 'not', 'isinstance', '(', 'symlink', ',', 'basestring', ')', ':', 'raise', 'TypeError', '(', '"symlink can only be an instance of type basestring"', ')', 'exists', '=', 'self', '.', '_call', '(', '"symlinkExists"', ',', 'in_p', '=', '[', 'symlink', ']', ')', 'return', 'exists']
Checks whether a symbolic link exists in the guest. in symlink of type str Path to the alleged symbolic link. Guest path style. return exists of type bool Returns @c true if the symbolic link exists. Returns @c false if it does not exist, if the file system object identified by the path is not a symbolic link, or if the object type is inaccessible to the user, or if the @a symlink argument is empty. raises :class:`OleErrorNotimpl` The method is not implemented yet.
['Checks', 'whether', 'a', 'symbolic', 'link', 'exists', 'in', 'the', 'guest', '.']
train
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L20325-L20345
4,390
sernst/cauldron
cauldron/environ/logger.py
add_to_message
def add_to_message(data, indent_level=0) -> list: """Adds data to the message object""" message = [] if isinstance(data, str): message.append(indent( dedent(data.strip('\n')).strip(), indent_level * ' ' )) return message for line in data: offset = 0 if isinstance(line, str) else 1 message += add_to_message(line, indent_level + offset) return message
python
def add_to_message(data, indent_level=0) -> list: """Adds data to the message object""" message = [] if isinstance(data, str): message.append(indent( dedent(data.strip('\n')).strip(), indent_level * ' ' )) return message for line in data: offset = 0 if isinstance(line, str) else 1 message += add_to_message(line, indent_level + offset) return message
['def', 'add_to_message', '(', 'data', ',', 'indent_level', '=', '0', ')', '->', 'list', ':', 'message', '=', '[', ']', 'if', 'isinstance', '(', 'data', ',', 'str', ')', ':', 'message', '.', 'append', '(', 'indent', '(', 'dedent', '(', 'data', '.', 'strip', '(', "'\\n'", ')', ')', '.', 'strip', '(', ')', ',', 'indent_level', '*', "' '", ')', ')', 'return', 'message', 'for', 'line', 'in', 'data', ':', 'offset', '=', '0', 'if', 'isinstance', '(', 'line', ',', 'str', ')', 'else', '1', 'message', '+=', 'add_to_message', '(', 'line', ',', 'indent_level', '+', 'offset', ')', 'return', 'message']
Adds data to the message object
['Adds', 'data', 'to', 'the', 'message', 'object']
train
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/environ/logger.py#L226-L240
4,391
AtteqCom/zsl
src/zsl/utils/rss.py
rss_create
def rss_create(channel, articles): """Create RSS xml feed. :param channel: channel info [title, link, description, language] :type channel: dict(str, str) :param articles: list of articles, an article is a dictionary with some \ required fields [title, description, link] and any optional, which will \ result to `<dict_key>dict_value</dict_key>` :type articles: list(dict(str,str)) :return: root element :rtype: ElementTree.Element """ channel = channel.copy() # TODO use deepcopy # list will not clone the dictionaries in the list and `elemen_from_dict` # pops items from them articles = list(articles) rss = ET.Element('rss') rss.set('version', '2.0') channel_node = ET.SubElement(rss, 'channel') element_from_dict(channel_node, channel, 'title') element_from_dict(channel_node, channel, 'link') element_from_dict(channel_node, channel, 'description') element_from_dict(channel_node, channel, 'language') for article in articles: item = ET.SubElement(channel_node, 'item') element_from_dict(item, article, 'title') element_from_dict(item, article, 'description') element_from_dict(item, article, 'link') for key in article: complex_el_from_dict(item, article, key) return ET.ElementTree(rss)
python
def rss_create(channel, articles): """Create RSS xml feed. :param channel: channel info [title, link, description, language] :type channel: dict(str, str) :param articles: list of articles, an article is a dictionary with some \ required fields [title, description, link] and any optional, which will \ result to `<dict_key>dict_value</dict_key>` :type articles: list(dict(str,str)) :return: root element :rtype: ElementTree.Element """ channel = channel.copy() # TODO use deepcopy # list will not clone the dictionaries in the list and `elemen_from_dict` # pops items from them articles = list(articles) rss = ET.Element('rss') rss.set('version', '2.0') channel_node = ET.SubElement(rss, 'channel') element_from_dict(channel_node, channel, 'title') element_from_dict(channel_node, channel, 'link') element_from_dict(channel_node, channel, 'description') element_from_dict(channel_node, channel, 'language') for article in articles: item = ET.SubElement(channel_node, 'item') element_from_dict(item, article, 'title') element_from_dict(item, article, 'description') element_from_dict(item, article, 'link') for key in article: complex_el_from_dict(item, article, key) return ET.ElementTree(rss)
['def', 'rss_create', '(', 'channel', ',', 'articles', ')', ':', 'channel', '=', 'channel', '.', 'copy', '(', ')', '# TODO use deepcopy', '# list will not clone the dictionaries in the list and `elemen_from_dict`', '# pops items from them', 'articles', '=', 'list', '(', 'articles', ')', 'rss', '=', 'ET', '.', 'Element', '(', "'rss'", ')', 'rss', '.', 'set', '(', "'version'", ',', "'2.0'", ')', 'channel_node', '=', 'ET', '.', 'SubElement', '(', 'rss', ',', "'channel'", ')', 'element_from_dict', '(', 'channel_node', ',', 'channel', ',', "'title'", ')', 'element_from_dict', '(', 'channel_node', ',', 'channel', ',', "'link'", ')', 'element_from_dict', '(', 'channel_node', ',', 'channel', ',', "'description'", ')', 'element_from_dict', '(', 'channel_node', ',', 'channel', ',', "'language'", ')', 'for', 'article', 'in', 'articles', ':', 'item', '=', 'ET', '.', 'SubElement', '(', 'channel_node', ',', "'item'", ')', 'element_from_dict', '(', 'item', ',', 'article', ',', "'title'", ')', 'element_from_dict', '(', 'item', ',', 'article', ',', "'description'", ')', 'element_from_dict', '(', 'item', ',', 'article', ',', "'link'", ')', 'for', 'key', 'in', 'article', ':', 'complex_el_from_dict', '(', 'item', ',', 'article', ',', 'key', ')', 'return', 'ET', '.', 'ElementTree', '(', 'rss', ')']
Create RSS xml feed. :param channel: channel info [title, link, description, language] :type channel: dict(str, str) :param articles: list of articles, an article is a dictionary with some \ required fields [title, description, link] and any optional, which will \ result to `<dict_key>dict_value</dict_key>` :type articles: list(dict(str,str)) :return: root element :rtype: ElementTree.Element
['Create', 'RSS', 'xml', 'feed', '.']
train
https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/utils/rss.py#L62-L101
4,392
bububa/pyTOP
pyTOP/logistics.py
LogisticsOnline.confirm
def confirm(self, tid, out_sid, session): '''taobao.logistics.online.confirm 确认发货通知接口 确认发货的目的是让交易流程继承走下去,确认发货后交易状态会由【买家已付款】变为【卖家已发货】,然后买家才可以确认收货,货款打入卖家账号。货到付款的订单除外''' request = TOPRequest('taobao.logistics.online.confirm') request['tid'] = tid request['out_sid'] = out_sid self.create(self.execute(request, session), fields = ['shipping',], models = {'shipping':Shipping}) return self.shipping
python
def confirm(self, tid, out_sid, session): '''taobao.logistics.online.confirm 确认发货通知接口 确认发货的目的是让交易流程继承走下去,确认发货后交易状态会由【买家已付款】变为【卖家已发货】,然后买家才可以确认收货,货款打入卖家账号。货到付款的订单除外''' request = TOPRequest('taobao.logistics.online.confirm') request['tid'] = tid request['out_sid'] = out_sid self.create(self.execute(request, session), fields = ['shipping',], models = {'shipping':Shipping}) return self.shipping
['def', 'confirm', '(', 'self', ',', 'tid', ',', 'out_sid', ',', 'session', ')', ':', 'request', '=', 'TOPRequest', '(', "'taobao.logistics.online.confirm'", ')', 'request', '[', "'tid'", ']', '=', 'tid', 'request', '[', "'out_sid'", ']', '=', 'out_sid', 'self', '.', 'create', '(', 'self', '.', 'execute', '(', 'request', ',', 'session', ')', ',', 'fields', '=', '[', "'shipping'", ',', ']', ',', 'models', '=', '{', "'shipping'", ':', 'Shipping', '}', ')', 'return', 'self', '.', 'shipping']
taobao.logistics.online.confirm 确认发货通知接口 确认发货的目的是让交易流程继承走下去,确认发货后交易状态会由【买家已付款】变为【卖家已发货】,然后买家才可以确认收货,货款打入卖家账号。货到付款的订单除外
['taobao', '.', 'logistics', '.', 'online', '.', 'confirm', '确认发货通知接口', '确认发货的目的是让交易流程继承走下去,确认发货后交易状态会由【买家已付款】变为【卖家已发货】,然后买家才可以确认收货,货款打入卖家账号。货到付款的订单除外']
train
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/logistics.py#L412-L420
4,393
Gandi/gandi.cli
gandi/cli/commands/vm.py
datacenters
def datacenters(gandi, id): """List available datacenters.""" output_keys = ['iso', 'name', 'country', 'dc_code', 'status'] if id: output_keys.append('id') result = gandi.datacenter.list() for num, dc in enumerate(result): if num: gandi.separator_line() output_datacenter(gandi, dc, output_keys, justify=10) return result
python
def datacenters(gandi, id): """List available datacenters.""" output_keys = ['iso', 'name', 'country', 'dc_code', 'status'] if id: output_keys.append('id') result = gandi.datacenter.list() for num, dc in enumerate(result): if num: gandi.separator_line() output_datacenter(gandi, dc, output_keys, justify=10) return result
['def', 'datacenters', '(', 'gandi', ',', 'id', ')', ':', 'output_keys', '=', '[', "'iso'", ',', "'name'", ',', "'country'", ',', "'dc_code'", ',', "'status'", ']', 'if', 'id', ':', 'output_keys', '.', 'append', '(', "'id'", ')', 'result', '=', 'gandi', '.', 'datacenter', '.', 'list', '(', ')', 'for', 'num', ',', 'dc', 'in', 'enumerate', '(', 'result', ')', ':', 'if', 'num', ':', 'gandi', '.', 'separator_line', '(', ')', 'output_datacenter', '(', 'gandi', ',', 'dc', ',', 'output_keys', ',', 'justify', '=', '10', ')', 'return', 'result']
List available datacenters.
['List', 'available', 'datacenters', '.']
train
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/vm.py#L500-L512
4,394
pyQode/pyqode.core
pyqode/core/cache.py
Cache.set_file_encoding
def set_file_encoding(self, path, encoding): """ Cache encoding for the specified file path. :param path: path of the file to cache :param encoding: encoding to cache """ try: map = json.loads(self._settings.value('cachedFileEncodings')) except TypeError: map = {} map[path] = encoding self._settings.setValue('cachedFileEncodings', json.dumps(map))
python
def set_file_encoding(self, path, encoding): """ Cache encoding for the specified file path. :param path: path of the file to cache :param encoding: encoding to cache """ try: map = json.loads(self._settings.value('cachedFileEncodings')) except TypeError: map = {} map[path] = encoding self._settings.setValue('cachedFileEncodings', json.dumps(map))
['def', 'set_file_encoding', '(', 'self', ',', 'path', ',', 'encoding', ')', ':', 'try', ':', 'map', '=', 'json', '.', 'loads', '(', 'self', '.', '_settings', '.', 'value', '(', "'cachedFileEncodings'", ')', ')', 'except', 'TypeError', ':', 'map', '=', '{', '}', 'map', '[', 'path', ']', '=', 'encoding', 'self', '.', '_settings', '.', 'setValue', '(', "'cachedFileEncodings'", ',', 'json', '.', 'dumps', '(', 'map', ')', ')']
Cache encoding for the specified file path. :param path: path of the file to cache :param encoding: encoding to cache
['Cache', 'encoding', 'for', 'the', 'specified', 'file', 'path', '.']
train
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/cache.py#L104-L116
4,395
atztogo/phonopy
phonopy/harmonic/force_constants.py
set_permutation_symmetry
def set_permutation_symmetry(force_constants): """Enforce permutation symmetry to force cosntants by Phi_ij_ab = Phi_ji_ba i, j: atom index a, b: Cartesian axis index This is not necessary for harmonic phonon calculation because this condition is imposed when making dynamical matrix Hermite in dynamical_matrix.py. """ fc_copy = force_constants.copy() for i in range(force_constants.shape[0]): for j in range(force_constants.shape[1]): force_constants[i, j] = (force_constants[i, j] + fc_copy[j, i].T) / 2
python
def set_permutation_symmetry(force_constants): """Enforce permutation symmetry to force cosntants by Phi_ij_ab = Phi_ji_ba i, j: atom index a, b: Cartesian axis index This is not necessary for harmonic phonon calculation because this condition is imposed when making dynamical matrix Hermite in dynamical_matrix.py. """ fc_copy = force_constants.copy() for i in range(force_constants.shape[0]): for j in range(force_constants.shape[1]): force_constants[i, j] = (force_constants[i, j] + fc_copy[j, i].T) / 2
['def', 'set_permutation_symmetry', '(', 'force_constants', ')', ':', 'fc_copy', '=', 'force_constants', '.', 'copy', '(', ')', 'for', 'i', 'in', 'range', '(', 'force_constants', '.', 'shape', '[', '0', ']', ')', ':', 'for', 'j', 'in', 'range', '(', 'force_constants', '.', 'shape', '[', '1', ']', ')', ':', 'force_constants', '[', 'i', ',', 'j', ']', '=', '(', 'force_constants', '[', 'i', ',', 'j', ']', '+', 'fc_copy', '[', 'j', ',', 'i', ']', '.', 'T', ')', '/', '2']
Enforce permutation symmetry to force cosntants by Phi_ij_ab = Phi_ji_ba i, j: atom index a, b: Cartesian axis index This is not necessary for harmonic phonon calculation because this condition is imposed when making dynamical matrix Hermite in dynamical_matrix.py.
['Enforce', 'permutation', 'symmetry', 'to', 'force', 'cosntants', 'by']
train
https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/harmonic/force_constants.py#L462-L480
4,396
saltstack/salt
salt/states/pcs.py
_file_read
def _file_read(path): ''' Read a file and return content ''' content = False if os.path.exists(path): with salt.utils.files.fopen(path, 'r+') as fp_: content = salt.utils.stringutils.to_unicode(fp_.read()) fp_.close() return content
python
def _file_read(path): ''' Read a file and return content ''' content = False if os.path.exists(path): with salt.utils.files.fopen(path, 'r+') as fp_: content = salt.utils.stringutils.to_unicode(fp_.read()) fp_.close() return content
['def', '_file_read', '(', 'path', ')', ':', 'content', '=', 'False', 'if', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'with', 'salt', '.', 'utils', '.', 'files', '.', 'fopen', '(', 'path', ',', "'r+'", ')', 'as', 'fp_', ':', 'content', '=', 'salt', '.', 'utils', '.', 'stringutils', '.', 'to_unicode', '(', 'fp_', '.', 'read', '(', ')', ')', 'fp_', '.', 'close', '(', ')', 'return', 'content']
Read a file and return content
['Read', 'a', 'file', 'and', 'return', 'content']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pcs.py#L191-L200
4,397
Kane610/axis
axis/port_cgi.py
Ports.process_raw
def process_raw(self, raw: dict) -> None: """Pre-process raw dict. Prepare parameters to work with APIItems. """ raw_ports = {} for param in raw: port_index = REGEX_PORT_INDEX.search(param).group(0) if port_index not in raw_ports: raw_ports[port_index] = {} name = param.replace(IOPORT + '.I' + port_index + '.', '') raw_ports[port_index][name] = raw[param] super().process_raw(raw_ports)
python
def process_raw(self, raw: dict) -> None: """Pre-process raw dict. Prepare parameters to work with APIItems. """ raw_ports = {} for param in raw: port_index = REGEX_PORT_INDEX.search(param).group(0) if port_index not in raw_ports: raw_ports[port_index] = {} name = param.replace(IOPORT + '.I' + port_index + '.', '') raw_ports[port_index][name] = raw[param] super().process_raw(raw_ports)
['def', 'process_raw', '(', 'self', ',', 'raw', ':', 'dict', ')', '->', 'None', ':', 'raw_ports', '=', '{', '}', 'for', 'param', 'in', 'raw', ':', 'port_index', '=', 'REGEX_PORT_INDEX', '.', 'search', '(', 'param', ')', '.', 'group', '(', '0', ')', 'if', 'port_index', 'not', 'in', 'raw_ports', ':', 'raw_ports', '[', 'port_index', ']', '=', '{', '}', 'name', '=', 'param', '.', 'replace', '(', 'IOPORT', '+', "'.I'", '+', 'port_index', '+', "'.'", ',', "''", ')', 'raw_ports', '[', 'port_index', ']', '[', 'name', ']', '=', 'raw', '[', 'param', ']', 'super', '(', ')', '.', 'process_raw', '(', 'raw_ports', ')']
Pre-process raw dict. Prepare parameters to work with APIItems.
['Pre', '-', 'process', 'raw', 'dict', '.']
train
https://github.com/Kane610/axis/blob/b2b44ce595c7b722b5e13eabcab7b91f048e1808/axis/port_cgi.py#L42-L58
4,398
Trax-air/swagger-parser
swagger_parser/swagger_parser.py
SwaggerParser._definition_from_example
def _definition_from_example(example): """Generates a swagger definition json from a given example Works only for simple types in the dict Args: example: The example for which we want a definition Type is DICT Returns: A dict that is the swagger definition json """ assert isinstance(example, dict) def _has_simple_type(value): accepted = (str, int, float, bool) return isinstance(value, accepted) definition = { 'type': 'object', 'properties': {}, } for key, value in example.items(): if not _has_simple_type(value): raise Exception("Not implemented yet") ret_value = None if isinstance(value, str): ret_value = {'type': 'string'} elif isinstance(value, int): ret_value = {'type': 'integer', 'format': 'int64'} elif isinstance(value, float): ret_value = {'type': 'number', 'format': 'double'} elif isinstance(value, bool): ret_value = {'type': 'boolean'} else: raise Exception("Not implemented yet") definition['properties'][key] = ret_value return definition
python
def _definition_from_example(example): """Generates a swagger definition json from a given example Works only for simple types in the dict Args: example: The example for which we want a definition Type is DICT Returns: A dict that is the swagger definition json """ assert isinstance(example, dict) def _has_simple_type(value): accepted = (str, int, float, bool) return isinstance(value, accepted) definition = { 'type': 'object', 'properties': {}, } for key, value in example.items(): if not _has_simple_type(value): raise Exception("Not implemented yet") ret_value = None if isinstance(value, str): ret_value = {'type': 'string'} elif isinstance(value, int): ret_value = {'type': 'integer', 'format': 'int64'} elif isinstance(value, float): ret_value = {'type': 'number', 'format': 'double'} elif isinstance(value, bool): ret_value = {'type': 'boolean'} else: raise Exception("Not implemented yet") definition['properties'][key] = ret_value return definition
['def', '_definition_from_example', '(', 'example', ')', ':', 'assert', 'isinstance', '(', 'example', ',', 'dict', ')', 'def', '_has_simple_type', '(', 'value', ')', ':', 'accepted', '=', '(', 'str', ',', 'int', ',', 'float', ',', 'bool', ')', 'return', 'isinstance', '(', 'value', ',', 'accepted', ')', 'definition', '=', '{', "'type'", ':', "'object'", ',', "'properties'", ':', '{', '}', ',', '}', 'for', 'key', ',', 'value', 'in', 'example', '.', 'items', '(', ')', ':', 'if', 'not', '_has_simple_type', '(', 'value', ')', ':', 'raise', 'Exception', '(', '"Not implemented yet"', ')', 'ret_value', '=', 'None', 'if', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'ret_value', '=', '{', "'type'", ':', "'string'", '}', 'elif', 'isinstance', '(', 'value', ',', 'int', ')', ':', 'ret_value', '=', '{', "'type'", ':', "'integer'", ',', "'format'", ':', "'int64'", '}', 'elif', 'isinstance', '(', 'value', ',', 'float', ')', ':', 'ret_value', '=', '{', "'type'", ':', "'number'", ',', "'format'", ':', "'double'", '}', 'elif', 'isinstance', '(', 'value', ',', 'bool', ')', ':', 'ret_value', '=', '{', "'type'", ':', "'boolean'", '}', 'else', ':', 'raise', 'Exception', '(', '"Not implemented yet"', ')', 'definition', '[', "'properties'", ']', '[', 'key', ']', '=', 'ret_value', 'return', 'definition']
Generates a swagger definition json from a given example Works only for simple types in the dict Args: example: The example for which we want a definition Type is DICT Returns: A dict that is the swagger definition json
['Generates', 'a', 'swagger', 'definition', 'json', 'from', 'a', 'given', 'example', 'Works', 'only', 'for', 'simple', 'types', 'in', 'the', 'dict']
train
https://github.com/Trax-air/swagger-parser/blob/d97f962a417e76320c59c33dcb223e4373e516d5/swagger_parser/swagger_parser.py#L278-L315
4,399
Opentrons/opentrons
api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
SmoothieDriver_3_0_0._save_current
def _save_current(self, settings, axes_active=True): ''' Sets the current in Amperes (A) by axis. Currents are limited to be between 0.0-2.0 amps per axis motor. Note: this method does not send gcode commands, but instead stores the desired current setting. A seperate call to _generate_current_command() will return a gcode command that can be used to set Smoothie's current settings Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C') and floating point number for current (generally between 0.1 and 2) ''' self._active_axes.update({ ax: axes_active for ax in settings.keys() }) self._current_settings['now'].update(settings) log.debug("_save_current: {}".format(self.current))
python
def _save_current(self, settings, axes_active=True): ''' Sets the current in Amperes (A) by axis. Currents are limited to be between 0.0-2.0 amps per axis motor. Note: this method does not send gcode commands, but instead stores the desired current setting. A seperate call to _generate_current_command() will return a gcode command that can be used to set Smoothie's current settings Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C') and floating point number for current (generally between 0.1 and 2) ''' self._active_axes.update({ ax: axes_active for ax in settings.keys() }) self._current_settings['now'].update(settings) log.debug("_save_current: {}".format(self.current))
['def', '_save_current', '(', 'self', ',', 'settings', ',', 'axes_active', '=', 'True', ')', ':', 'self', '.', '_active_axes', '.', 'update', '(', '{', 'ax', ':', 'axes_active', 'for', 'ax', 'in', 'settings', '.', 'keys', '(', ')', '}', ')', 'self', '.', '_current_settings', '[', "'now'", ']', '.', 'update', '(', 'settings', ')', 'log', '.', 'debug', '(', '"_save_current: {}"', '.', 'format', '(', 'self', '.', 'current', ')', ')']
Sets the current in Amperes (A) by axis. Currents are limited to be between 0.0-2.0 amps per axis motor. Note: this method does not send gcode commands, but instead stores the desired current setting. A seperate call to _generate_current_command() will return a gcode command that can be used to set Smoothie's current settings Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C') and floating point number for current (generally between 0.1 and 2)
['Sets', 'the', 'current', 'in', 'Amperes', '(', 'A', ')', 'by', 'axis', '.', 'Currents', 'are', 'limited', 'to', 'be', 'between', '0', '.', '0', '-', '2', '.', '0', 'amps', 'per', 'axis', 'motor', '.']
train
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py#L696-L714