Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
2,400 | astraw/stdeb | stdeb/util.py | apply_patch | def apply_patch(patchfile,cwd=None,posix=False,level=0):
"""call 'patch -p[level] [--posix] < arg1'
posix mode is sometimes necessary. It keeps empty files so that
dpkg-source removes their contents.
"""
if not os.path.exists(patchfile):
raise RuntimeError('patchfile "%s" does not exist'%patchfile)
fd = open(patchfile,mode='r')
level_str = '-p%d'%level
args = ['/usr/bin/patch',level_str]
if posix:
args.append('--posix')
log.info('PATCH COMMAND: %s < %s', ' '.join(args), patchfile)
log.info(' PATCHING in dir: %s', cwd)
# print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile
# print >> sys.stderr, ' PATCHING in dir:',cwd
res = subprocess.Popen(
args, cwd=cwd,
stdin=fd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
returncode=None
while returncode is None:
returncode = res.poll()
ready = select.select( [res.stdout,res.stderr],[],[],0.1)
# XXX figure out how to do this without reading byte-by-byte
if res.stdout in ready[0]:
sys.stdout.write(res.stdout.read(1))
sys.stdout.flush()
if res.stderr in ready[0]:
sys.stderr.write(res.stderr.read(1))
sys.stderr.flush()
# finish outputting file
sys.stdout.write(res.stdout.read())
sys.stdout.flush()
sys.stderr.write(res.stderr.read())
sys.stderr.flush()
if returncode:
log.error('ERROR running: %s', ' '.join(args))
log.error('ERROR in %s', cwd)
# print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),)
# print >> sys.stderr, 'ERROR in',cwd
raise RuntimeError('returncode %d'%returncode) | python | def apply_patch(patchfile,cwd=None,posix=False,level=0):
"""call 'patch -p[level] [--posix] < arg1'
posix mode is sometimes necessary. It keeps empty files so that
dpkg-source removes their contents.
"""
if not os.path.exists(patchfile):
raise RuntimeError('patchfile "%s" does not exist'%patchfile)
fd = open(patchfile,mode='r')
level_str = '-p%d'%level
args = ['/usr/bin/patch',level_str]
if posix:
args.append('--posix')
log.info('PATCH COMMAND: %s < %s', ' '.join(args), patchfile)
log.info(' PATCHING in dir: %s', cwd)
# print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile
# print >> sys.stderr, ' PATCHING in dir:',cwd
res = subprocess.Popen(
args, cwd=cwd,
stdin=fd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
returncode=None
while returncode is None:
returncode = res.poll()
ready = select.select( [res.stdout,res.stderr],[],[],0.1)
# XXX figure out how to do this without reading byte-by-byte
if res.stdout in ready[0]:
sys.stdout.write(res.stdout.read(1))
sys.stdout.flush()
if res.stderr in ready[0]:
sys.stderr.write(res.stderr.read(1))
sys.stderr.flush()
# finish outputting file
sys.stdout.write(res.stdout.read())
sys.stdout.flush()
sys.stderr.write(res.stderr.read())
sys.stderr.flush()
if returncode:
log.error('ERROR running: %s', ' '.join(args))
log.error('ERROR in %s', cwd)
# print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),)
# print >> sys.stderr, 'ERROR in',cwd
raise RuntimeError('returncode %d'%returncode) | ['def', 'apply_patch', '(', 'patchfile', ',', 'cwd', '=', 'None', ',', 'posix', '=', 'False', ',', 'level', '=', '0', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'patchfile', ')', ':', 'raise', 'RuntimeError', '(', '\'patchfile "%s" does not exist\'', '%', 'patchfile', ')', 'fd', '=', 'open', '(', 'patchfile', ',', 'mode', '=', "'r'", ')', 'level_str', '=', "'-p%d'", '%', 'level', 'args', '=', '[', "'/usr/bin/patch'", ',', 'level_str', ']', 'if', 'posix', ':', 'args', '.', 'append', '(', "'--posix'", ')', 'log', '.', 'info', '(', "'PATCH COMMAND: %s < %s'", ',', "' '", '.', 'join', '(', 'args', ')', ',', 'patchfile', ')', 'log', '.', 'info', '(', "' PATCHING in dir: %s'", ',', 'cwd', ')', "# print >> sys.stderr, 'PATCH COMMAND:',' '.join(args),'<',patchfile", "# print >> sys.stderr, ' PATCHING in dir:',cwd", 'res', '=', 'subprocess', '.', 'Popen', '(', 'args', ',', 'cwd', '=', 'cwd', ',', 'stdin', '=', 'fd', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ',', 'universal_newlines', '=', 'True', ')', 'returncode', '=', 'None', 'while', 'returncode', 'is', 'None', ':', 'returncode', '=', 'res', '.', 'poll', '(', ')', 'ready', '=', 'select', '.', 'select', '(', '[', 'res', '.', 'stdout', ',', 'res', '.', 'stderr', ']', ',', '[', ']', ',', '[', ']', ',', '0.1', ')', '# XXX figure out how to do this without reading byte-by-byte', 'if', 'res', '.', 'stdout', 'in', 'ready', '[', '0', ']', ':', 'sys', '.', 'stdout', '.', 'write', '(', 'res', '.', 'stdout', '.', 'read', '(', '1', ')', ')', 'sys', '.', 'stdout', '.', 'flush', '(', ')', 'if', 'res', '.', 'stderr', 'in', 'ready', '[', '0', ']', ':', 'sys', '.', 'stderr', '.', 'write', '(', 'res', '.', 'stderr', '.', 'read', '(', '1', ')', ')', 'sys', '.', 'stderr', '.', 'flush', '(', ')', '# finish outputting file', 'sys', '.', 'stdout', '.', 'write', '(', 'res', '.', 'stdout', '.', 'read', '(', ')', ')', 'sys', '.', 'stdout', '.', 'flush', '(', ')', 'sys', '.', 'stderr', '.', 'write', '(', 'res', '.', 'stderr', '.', 'read', '(', ')', ')', 'sys', '.', 'stderr', '.', 'flush', '(', ')', 'if', 'returncode', ':', 'log', '.', 'error', '(', "'ERROR running: %s'", ',', "' '", '.', 'join', '(', 'args', ')', ')', 'log', '.', 'error', '(', "'ERROR in %s'", ',', 'cwd', ')', "# print >> sys.stderr, 'ERROR running: %s'%(' '.join(args),)", "# print >> sys.stderr, 'ERROR in',cwd", 'raise', 'RuntimeError', '(', "'returncode %d'", '%', 'returncode', ')'] | call 'patch -p[level] [--posix] < arg1'
posix mode is sometimes necessary. It keeps empty files so that
dpkg-source removes their contents. | ['call', 'patch', '-', 'p', '[', 'level', ']', '[', '--', 'posix', ']', '<', 'arg1'] | train | https://github.com/astraw/stdeb/blob/493ab88e8a60be053b1baef81fb39b45e17ceef5/stdeb/util.py#L544-L593 |
2,401 | jkitzes/macroeco | macroeco/models/_distributions.py | plnorm_gen.rank | def rank(self, n, mu, sigma, crit=.5, upper=10000, xtol=1):
"""%(super)s
Additional Parameters
----------------------
{0}
"""
return _make_rank(self, n, mu, sigma, crit=crit, upper=upper,
xtol=xtol) | python | def rank(self, n, mu, sigma, crit=.5, upper=10000, xtol=1):
"""%(super)s
Additional Parameters
----------------------
{0}
"""
return _make_rank(self, n, mu, sigma, crit=crit, upper=upper,
xtol=xtol) | ['def', 'rank', '(', 'self', ',', 'n', ',', 'mu', ',', 'sigma', ',', 'crit', '=', '.5', ',', 'upper', '=', '10000', ',', 'xtol', '=', '1', ')', ':', 'return', '_make_rank', '(', 'self', ',', 'n', ',', 'mu', ',', 'sigma', ',', 'crit', '=', 'crit', ',', 'upper', '=', 'upper', ',', 'xtol', '=', 'xtol', ')'] | %(super)s
Additional Parameters
----------------------
{0} | ['%', '(', 'super', ')', 's'] | train | https://github.com/jkitzes/macroeco/blob/ee5fac5560a2d64de3a64738b5bc6833e2d7ff2e/macroeco/models/_distributions.py#L1430-L1440 |
2,402 | vertexproject/synapse | synapse/lib/compat.py | cellAuthToHive | async def cellAuthToHive(dirn, auth):
'''
Migrate old cell Auth() data into a HiveAuth().
'''
logger.warning('migrating old cell auth to hive')
path = os.path.join(dirn, 'auth.lmdb')
lenv = lmdb.open(path, max_dbs=128)
userdb = lenv.open_db(b'users')
roledb = lenv.open_db(b'roles')
migrated_roles = False
migrated_users = False
with lenv.begin() as xact:
with xact.cursor(db=roledb) as curs:
for lkey, lval in curs.iternext():
name = lkey.decode('utf8')
info = s_msgpack.un(lval)
logger.info(f'Migrating role: {name}')
role = auth.getRoleByName(name)
if role is None:
logger.info(f'Creating role: {name}')
role = await auth.addRole(name)
rules = info.get('rules', ())
await role.setRules(rules)
migrated_roles = True
if not migrated_roles: # pragma: no cover
logger.info('No roles were migrated.')
with xact.cursor(db=userdb) as curs:
for lkey, lval in curs.iternext():
name = lkey.decode('utf8')
info = s_msgpack.un(lval)
logger.info(f'Migrating user: {name}')
user = auth.getUserByName(name)
if user is None:
logger.info(f'Creating user: {name}')
user = await auth.addUser(name)
if info.get('admin', False):
await user.setAdmin(True)
if info.get('locked', False):
await user.setLocked(True)
# set this directly since we only have the shadow
shadow = info.get('shadow')
if shadow is not None:
await user.info.set('passwd', shadow)
rules = info.get('rules', ())
await user.setRules(rules)
for name in info.get('roles', ()):
await user.grant(name)
migrated_users = True
if not migrated_users: # pragma: no cover
logger.info('No users were migrated.')
lenv.sync()
lenv.close() | python | async def cellAuthToHive(dirn, auth):
'''
Migrate old cell Auth() data into a HiveAuth().
'''
logger.warning('migrating old cell auth to hive')
path = os.path.join(dirn, 'auth.lmdb')
lenv = lmdb.open(path, max_dbs=128)
userdb = lenv.open_db(b'users')
roledb = lenv.open_db(b'roles')
migrated_roles = False
migrated_users = False
with lenv.begin() as xact:
with xact.cursor(db=roledb) as curs:
for lkey, lval in curs.iternext():
name = lkey.decode('utf8')
info = s_msgpack.un(lval)
logger.info(f'Migrating role: {name}')
role = auth.getRoleByName(name)
if role is None:
logger.info(f'Creating role: {name}')
role = await auth.addRole(name)
rules = info.get('rules', ())
await role.setRules(rules)
migrated_roles = True
if not migrated_roles: # pragma: no cover
logger.info('No roles were migrated.')
with xact.cursor(db=userdb) as curs:
for lkey, lval in curs.iternext():
name = lkey.decode('utf8')
info = s_msgpack.un(lval)
logger.info(f'Migrating user: {name}')
user = auth.getUserByName(name)
if user is None:
logger.info(f'Creating user: {name}')
user = await auth.addUser(name)
if info.get('admin', False):
await user.setAdmin(True)
if info.get('locked', False):
await user.setLocked(True)
# set this directly since we only have the shadow
shadow = info.get('shadow')
if shadow is not None:
await user.info.set('passwd', shadow)
rules = info.get('rules', ())
await user.setRules(rules)
for name in info.get('roles', ()):
await user.grant(name)
migrated_users = True
if not migrated_users: # pragma: no cover
logger.info('No users were migrated.')
lenv.sync()
lenv.close() | ['async', 'def', 'cellAuthToHive', '(', 'dirn', ',', 'auth', ')', ':', 'logger', '.', 'warning', '(', "'migrating old cell auth to hive'", ')', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'dirn', ',', "'auth.lmdb'", ')', 'lenv', '=', 'lmdb', '.', 'open', '(', 'path', ',', 'max_dbs', '=', '128', ')', 'userdb', '=', 'lenv', '.', 'open_db', '(', "b'users'", ')', 'roledb', '=', 'lenv', '.', 'open_db', '(', "b'roles'", ')', 'migrated_roles', '=', 'False', 'migrated_users', '=', 'False', 'with', 'lenv', '.', 'begin', '(', ')', 'as', 'xact', ':', 'with', 'xact', '.', 'cursor', '(', 'db', '=', 'roledb', ')', 'as', 'curs', ':', 'for', 'lkey', ',', 'lval', 'in', 'curs', '.', 'iternext', '(', ')', ':', 'name', '=', 'lkey', '.', 'decode', '(', "'utf8'", ')', 'info', '=', 's_msgpack', '.', 'un', '(', 'lval', ')', 'logger', '.', 'info', '(', "f'Migrating role: {name}'", ')', 'role', '=', 'auth', '.', 'getRoleByName', '(', 'name', ')', 'if', 'role', 'is', 'None', ':', 'logger', '.', 'info', '(', "f'Creating role: {name}'", ')', 'role', '=', 'await', 'auth', '.', 'addRole', '(', 'name', ')', 'rules', '=', 'info', '.', 'get', '(', "'rules'", ',', '(', ')', ')', 'await', 'role', '.', 'setRules', '(', 'rules', ')', 'migrated_roles', '=', 'True', 'if', 'not', 'migrated_roles', ':', '# pragma: no cover', 'logger', '.', 'info', '(', "'No roles were migrated.'", ')', 'with', 'xact', '.', 'cursor', '(', 'db', '=', 'userdb', ')', 'as', 'curs', ':', 'for', 'lkey', ',', 'lval', 'in', 'curs', '.', 'iternext', '(', ')', ':', 'name', '=', 'lkey', '.', 'decode', '(', "'utf8'", ')', 'info', '=', 's_msgpack', '.', 'un', '(', 'lval', ')', 'logger', '.', 'info', '(', "f'Migrating user: {name}'", ')', 'user', '=', 'auth', '.', 'getUserByName', '(', 'name', ')', 'if', 'user', 'is', 'None', ':', 'logger', '.', 'info', '(', "f'Creating user: {name}'", ')', 'user', '=', 'await', 'auth', '.', 'addUser', '(', 'name', ')', 'if', 'info', '.', 'get', '(', "'admin'", ',', 'False', ')', ':', 'await', 'user', '.', 'setAdmin', '(', 'True', ')', 'if', 'info', '.', 'get', '(', "'locked'", ',', 'False', ')', ':', 'await', 'user', '.', 'setLocked', '(', 'True', ')', '# set this directly since we only have the shadow', 'shadow', '=', 'info', '.', 'get', '(', "'shadow'", ')', 'if', 'shadow', 'is', 'not', 'None', ':', 'await', 'user', '.', 'info', '.', 'set', '(', "'passwd'", ',', 'shadow', ')', 'rules', '=', 'info', '.', 'get', '(', "'rules'", ',', '(', ')', ')', 'await', 'user', '.', 'setRules', '(', 'rules', ')', 'for', 'name', 'in', 'info', '.', 'get', '(', "'roles'", ',', '(', ')', ')', ':', 'await', 'user', '.', 'grant', '(', 'name', ')', 'migrated_users', '=', 'True', 'if', 'not', 'migrated_users', ':', '# pragma: no cover', 'logger', '.', 'info', '(', "'No users were migrated.'", ')', 'lenv', '.', 'sync', '(', ')', 'lenv', '.', 'close', '(', ')'] | Migrate old cell Auth() data into a HiveAuth(). | ['Migrate', 'old', 'cell', 'Auth', '()', 'data', 'into', 'a', 'HiveAuth', '()', '.'] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/compat.py#L13-L91 |
2,403 | praekelt/django-ultracache | ultracache/decorators.py | cached_get | def cached_get(timeout, *params):
"""Decorator applied specifically to a view's get method"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(view_or_request, *args, **kwargs):
# The type of the request gets muddled when using a function based
# decorator. We must use a function based decorator so it can be
# used in urls.py.
request = getattr(view_or_request, "request", view_or_request)
if not hasattr(_thread_locals, "ultracache_request"):
setattr(_thread_locals, "ultracache_request", request)
# If request not GET or HEAD never cache
if request.method.lower() not in ("get", "head"):
return view_func(view_or_request, *args, **kwargs)
# If request contains messages never cache
l = 0
try:
l = len(request._messages)
except (AttributeError, TypeError):
pass
if l:
return view_func(view_or_request, *args, **kwargs)
# Compute a cache key
li = [str(view_or_request.__class__), view_func.__name__]
# request.get_full_path is implicitly added it no other request
# path is provided. get_full_path includes the querystring and is
# the more conservative approach but makes it trivially easy for a
# request to bust through the cache.
if not set(params).intersection(set((
"request.get_full_path()", "request.path", "request.path_info"
))):
li.append(request.get_full_path())
if "django.contrib.sites" in settings.INSTALLED_APPS:
li.append(get_current_site_pk(request))
# Pre-sort kwargs
keys = list(kwargs.keys())
keys.sort()
for key in keys:
li.append("%s,%s" % (key, kwargs[key]))
# Extend cache key with custom variables
for param in params:
if not isinstance(param, str):
param = str(param)
li.append(eval(param))
s = ":".join([str(l) for l in li])
hashed = hashlib.md5(s.encode("utf-8")).hexdigest()
cache_key = "ucache-get-%s" % hashed
cached = cache.get(cache_key, None)
if cached is None:
# The get view as outermost caller may bluntly set _ultracache
request._ultracache = []
response = view_func(view_or_request, *args, **kwargs)
content = None
if isinstance(response, TemplateResponse):
content = response.render().rendered_content
elif isinstance(response, HttpResponse):
content = response.content
if content is not None:
headers = getattr(response, "_headers", {})
cache.set(
cache_key,
{"content": content, "headers": headers},
timeout
)
cache_meta(request, cache_key)
else:
response = HttpResponse(cached["content"])
# Headers has a non-obvious format
for k, v in cached["headers"].items():
response[v[0]] = v[1]
return response
return _wrapped_view
return decorator | python | def cached_get(timeout, *params):
"""Decorator applied specifically to a view's get method"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(view_or_request, *args, **kwargs):
# The type of the request gets muddled when using a function based
# decorator. We must use a function based decorator so it can be
# used in urls.py.
request = getattr(view_or_request, "request", view_or_request)
if not hasattr(_thread_locals, "ultracache_request"):
setattr(_thread_locals, "ultracache_request", request)
# If request not GET or HEAD never cache
if request.method.lower() not in ("get", "head"):
return view_func(view_or_request, *args, **kwargs)
# If request contains messages never cache
l = 0
try:
l = len(request._messages)
except (AttributeError, TypeError):
pass
if l:
return view_func(view_or_request, *args, **kwargs)
# Compute a cache key
li = [str(view_or_request.__class__), view_func.__name__]
# request.get_full_path is implicitly added it no other request
# path is provided. get_full_path includes the querystring and is
# the more conservative approach but makes it trivially easy for a
# request to bust through the cache.
if not set(params).intersection(set((
"request.get_full_path()", "request.path", "request.path_info"
))):
li.append(request.get_full_path())
if "django.contrib.sites" in settings.INSTALLED_APPS:
li.append(get_current_site_pk(request))
# Pre-sort kwargs
keys = list(kwargs.keys())
keys.sort()
for key in keys:
li.append("%s,%s" % (key, kwargs[key]))
# Extend cache key with custom variables
for param in params:
if not isinstance(param, str):
param = str(param)
li.append(eval(param))
s = ":".join([str(l) for l in li])
hashed = hashlib.md5(s.encode("utf-8")).hexdigest()
cache_key = "ucache-get-%s" % hashed
cached = cache.get(cache_key, None)
if cached is None:
# The get view as outermost caller may bluntly set _ultracache
request._ultracache = []
response = view_func(view_or_request, *args, **kwargs)
content = None
if isinstance(response, TemplateResponse):
content = response.render().rendered_content
elif isinstance(response, HttpResponse):
content = response.content
if content is not None:
headers = getattr(response, "_headers", {})
cache.set(
cache_key,
{"content": content, "headers": headers},
timeout
)
cache_meta(request, cache_key)
else:
response = HttpResponse(cached["content"])
# Headers has a non-obvious format
for k, v in cached["headers"].items():
response[v[0]] = v[1]
return response
return _wrapped_view
return decorator | ['def', 'cached_get', '(', 'timeout', ',', '*', 'params', ')', ':', 'def', 'decorator', '(', 'view_func', ')', ':', '@', 'wraps', '(', 'view_func', ',', 'assigned', '=', 'available_attrs', '(', 'view_func', ')', ')', 'def', '_wrapped_view', '(', 'view_or_request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# The type of the request gets muddled when using a function based', '# decorator. We must use a function based decorator so it can be', '# used in urls.py.', 'request', '=', 'getattr', '(', 'view_or_request', ',', '"request"', ',', 'view_or_request', ')', 'if', 'not', 'hasattr', '(', '_thread_locals', ',', '"ultracache_request"', ')', ':', 'setattr', '(', '_thread_locals', ',', '"ultracache_request"', ',', 'request', ')', '# If request not GET or HEAD never cache', 'if', 'request', '.', 'method', '.', 'lower', '(', ')', 'not', 'in', '(', '"get"', ',', '"head"', ')', ':', 'return', 'view_func', '(', 'view_or_request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', '# If request contains messages never cache', 'l', '=', '0', 'try', ':', 'l', '=', 'len', '(', 'request', '.', '_messages', ')', 'except', '(', 'AttributeError', ',', 'TypeError', ')', ':', 'pass', 'if', 'l', ':', 'return', 'view_func', '(', 'view_or_request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', '# Compute a cache key', 'li', '=', '[', 'str', '(', 'view_or_request', '.', '__class__', ')', ',', 'view_func', '.', '__name__', ']', '# request.get_full_path is implicitly added it no other request', '# path is provided. get_full_path includes the querystring and is', '# the more conservative approach but makes it trivially easy for a', '# request to bust through the cache.', 'if', 'not', 'set', '(', 'params', ')', '.', 'intersection', '(', 'set', '(', '(', '"request.get_full_path()"', ',', '"request.path"', ',', '"request.path_info"', ')', ')', ')', ':', 'li', '.', 'append', '(', 'request', '.', 'get_full_path', '(', ')', ')', 'if', '"django.contrib.sites"', 'in', 'settings', '.', 'INSTALLED_APPS', ':', 'li', '.', 'append', '(', 'get_current_site_pk', '(', 'request', ')', ')', '# Pre-sort kwargs', 'keys', '=', 'list', '(', 'kwargs', '.', 'keys', '(', ')', ')', 'keys', '.', 'sort', '(', ')', 'for', 'key', 'in', 'keys', ':', 'li', '.', 'append', '(', '"%s,%s"', '%', '(', 'key', ',', 'kwargs', '[', 'key', ']', ')', ')', '# Extend cache key with custom variables', 'for', 'param', 'in', 'params', ':', 'if', 'not', 'isinstance', '(', 'param', ',', 'str', ')', ':', 'param', '=', 'str', '(', 'param', ')', 'li', '.', 'append', '(', 'eval', '(', 'param', ')', ')', 's', '=', '":"', '.', 'join', '(', '[', 'str', '(', 'l', ')', 'for', 'l', 'in', 'li', ']', ')', 'hashed', '=', 'hashlib', '.', 'md5', '(', 's', '.', 'encode', '(', '"utf-8"', ')', ')', '.', 'hexdigest', '(', ')', 'cache_key', '=', '"ucache-get-%s"', '%', 'hashed', 'cached', '=', 'cache', '.', 'get', '(', 'cache_key', ',', 'None', ')', 'if', 'cached', 'is', 'None', ':', '# The get view as outermost caller may bluntly set _ultracache', 'request', '.', '_ultracache', '=', '[', ']', 'response', '=', 'view_func', '(', 'view_or_request', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'content', '=', 'None', 'if', 'isinstance', '(', 'response', ',', 'TemplateResponse', ')', ':', 'content', '=', 'response', '.', 'render', '(', ')', '.', 'rendered_content', 'elif', 'isinstance', '(', 'response', ',', 'HttpResponse', ')', ':', 'content', '=', 'response', '.', 'content', 'if', 'content', 'is', 'not', 'None', ':', 'headers', '=', 'getattr', '(', 'response', ',', '"_headers"', ',', '{', '}', ')', 'cache', '.', 'set', '(', 'cache_key', ',', '{', '"content"', ':', 'content', ',', '"headers"', ':', 'headers', '}', ',', 'timeout', ')', 'cache_meta', '(', 'request', ',', 'cache_key', ')', 'else', ':', 'response', '=', 'HttpResponse', '(', 'cached', '[', '"content"', ']', ')', '# Headers has a non-obvious format', 'for', 'k', ',', 'v', 'in', 'cached', '[', '"headers"', ']', '.', 'items', '(', ')', ':', 'response', '[', 'v', '[', '0', ']', ']', '=', 'v', '[', '1', ']', 'return', 'response', 'return', '_wrapped_view', 'return', 'decorator'] | Decorator applied specifically to a view's get method | ['Decorator', 'applied', 'specifically', 'to', 'a', 'view', 's', 'get', 'method'] | train | https://github.com/praekelt/django-ultracache/blob/8898f10e50fc8f8d0a4cb7d3fe4d945bf257bd9f/ultracache/decorators.py#L16-L101 |
2,404 | inasafe/inasafe | safe/plugin.py | Plugin._create_metadata_converter_action | def _create_metadata_converter_action(self):
"""Create action for showing metadata converter dialog."""
icon = resources_path('img', 'icons', 'show-metadata-converter.svg')
self.action_metadata_converter = QAction(
QIcon(icon),
self.tr('InaSAFE Metadata Converter'),
self.iface.mainWindow())
self.action_metadata_converter.setStatusTip(self.tr(
'Convert metadata from version 4.3 to version 3.5.'))
self.action_metadata_converter.setWhatsThis(self.tr(
'Use this tool to convert metadata 4.3 to version 3.5'))
self.action_metadata_converter.triggered.connect(
self.show_metadata_converter)
self.add_action(
self.action_metadata_converter, add_to_toolbar=self.full_toolbar) | python | def _create_metadata_converter_action(self):
"""Create action for showing metadata converter dialog."""
icon = resources_path('img', 'icons', 'show-metadata-converter.svg')
self.action_metadata_converter = QAction(
QIcon(icon),
self.tr('InaSAFE Metadata Converter'),
self.iface.mainWindow())
self.action_metadata_converter.setStatusTip(self.tr(
'Convert metadata from version 4.3 to version 3.5.'))
self.action_metadata_converter.setWhatsThis(self.tr(
'Use this tool to convert metadata 4.3 to version 3.5'))
self.action_metadata_converter.triggered.connect(
self.show_metadata_converter)
self.add_action(
self.action_metadata_converter, add_to_toolbar=self.full_toolbar) | ['def', '_create_metadata_converter_action', '(', 'self', ')', ':', 'icon', '=', 'resources_path', '(', "'img'", ',', "'icons'", ',', "'show-metadata-converter.svg'", ')', 'self', '.', 'action_metadata_converter', '=', 'QAction', '(', 'QIcon', '(', 'icon', ')', ',', 'self', '.', 'tr', '(', "'InaSAFE Metadata Converter'", ')', ',', 'self', '.', 'iface', '.', 'mainWindow', '(', ')', ')', 'self', '.', 'action_metadata_converter', '.', 'setStatusTip', '(', 'self', '.', 'tr', '(', "'Convert metadata from version 4.3 to version 3.5.'", ')', ')', 'self', '.', 'action_metadata_converter', '.', 'setWhatsThis', '(', 'self', '.', 'tr', '(', "'Use this tool to convert metadata 4.3 to version 3.5'", ')', ')', 'self', '.', 'action_metadata_converter', '.', 'triggered', '.', 'connect', '(', 'self', '.', 'show_metadata_converter', ')', 'self', '.', 'add_action', '(', 'self', '.', 'action_metadata_converter', ',', 'add_to_toolbar', '=', 'self', '.', 'full_toolbar', ')'] | Create action for showing metadata converter dialog. | ['Create', 'action', 'for', 'showing', 'metadata', 'converter', 'dialog', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/plugin.py#L381-L395 |
2,405 | sanger-pathogens/circlator | circlator/merge.py | Merger._orientation_ok_to_bridge_contigs | def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit):
'''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits'''
assert start_hit.qry_name == end_hit.qry_name
if start_hit.ref_name == end_hit.ref_name:
return False
if (
(self._is_at_ref_end(start_hit) and start_hit.on_same_strand())
or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand())
):
start_hit_ok = True
else:
start_hit_ok = False
if (
(self._is_at_ref_start(end_hit) and end_hit.on_same_strand())
or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand())
):
end_hit_ok = True
else:
end_hit_ok = False
return start_hit_ok and end_hit_ok | python | def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit):
'''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits'''
assert start_hit.qry_name == end_hit.qry_name
if start_hit.ref_name == end_hit.ref_name:
return False
if (
(self._is_at_ref_end(start_hit) and start_hit.on_same_strand())
or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand())
):
start_hit_ok = True
else:
start_hit_ok = False
if (
(self._is_at_ref_start(end_hit) and end_hit.on_same_strand())
or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand())
):
end_hit_ok = True
else:
end_hit_ok = False
return start_hit_ok and end_hit_ok | ['def', '_orientation_ok_to_bridge_contigs', '(', 'self', ',', 'start_hit', ',', 'end_hit', ')', ':', 'assert', 'start_hit', '.', 'qry_name', '==', 'end_hit', '.', 'qry_name', 'if', 'start_hit', '.', 'ref_name', '==', 'end_hit', '.', 'ref_name', ':', 'return', 'False', 'if', '(', '(', 'self', '.', '_is_at_ref_end', '(', 'start_hit', ')', 'and', 'start_hit', '.', 'on_same_strand', '(', ')', ')', 'or', '(', 'self', '.', '_is_at_ref_start', '(', 'start_hit', ')', 'and', 'not', 'start_hit', '.', 'on_same_strand', '(', ')', ')', ')', ':', 'start_hit_ok', '=', 'True', 'else', ':', 'start_hit_ok', '=', 'False', 'if', '(', '(', 'self', '.', '_is_at_ref_start', '(', 'end_hit', ')', 'and', 'end_hit', '.', 'on_same_strand', '(', ')', ')', 'or', '(', 'self', '.', '_is_at_ref_end', '(', 'end_hit', ')', 'and', 'not', 'end_hit', '.', 'on_same_strand', '(', ')', ')', ')', ':', 'end_hit_ok', '=', 'True', 'else', ':', 'end_hit_ok', '=', 'False', 'return', 'start_hit_ok', 'and', 'end_hit_ok'] | Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits | ['Returns', 'True', 'iff', 'the', 'orientation', 'of', 'the', 'hits', 'means', 'that', 'the', 'query', 'contig', 'of', 'both', 'hits', 'can', 'bridge', 'the', 'reference', 'contigs', 'of', 'the', 'hits'] | train | https://github.com/sanger-pathogens/circlator/blob/a4befb8c9dbbcd4b3ad1899a95aa3e689d58b638/circlator/merge.py#L451-L473 |
2,406 | inasafe/inasafe | safe/utilities/pivot_table.py | FlatTable.get_value | def get_value(self, **kwargs):
"""Return the value for a specific key."""
key = tuple(kwargs[group] for group in self.groups)
if key not in self.data:
self.data[key] = 0
return self.data[key] | python | def get_value(self, **kwargs):
"""Return the value for a specific key."""
key = tuple(kwargs[group] for group in self.groups)
if key not in self.data:
self.data[key] = 0
return self.data[key] | ['def', 'get_value', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'key', '=', 'tuple', '(', 'kwargs', '[', 'group', ']', 'for', 'group', 'in', 'self', '.', 'groups', ')', 'if', 'key', 'not', 'in', 'self', '.', 'data', ':', 'self', '.', 'data', '[', 'key', ']', '=', '0', 'return', 'self', '.', 'data', '[', 'key', ']'] | Return the value for a specific key. | ['Return', 'the', 'value', 'for', 'a', 'specific', 'key', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/pivot_table.py#L52-L57 |
2,407 | DeepHorizons/iarm | iarm/arm_instructions/_meta.py | _Meta.get_one_parameter | def get_one_parameter(self, regex_exp, parameters):
"""
Get three parameters from a given regex expression
Raise an exception if more than three were found
:param regex_exp:
:param parameters:
:return:
"""
Rx, other = self.get_parameters(regex_exp, parameters)
if other is not None and other.strip():
raise iarm.exceptions.ParsingError("Extra arguments found: {}".format(other))
return Rx.upper() | python | def get_one_parameter(self, regex_exp, parameters):
"""
Get three parameters from a given regex expression
Raise an exception if more than three were found
:param regex_exp:
:param parameters:
:return:
"""
Rx, other = self.get_parameters(regex_exp, parameters)
if other is not None and other.strip():
raise iarm.exceptions.ParsingError("Extra arguments found: {}".format(other))
return Rx.upper() | ['def', 'get_one_parameter', '(', 'self', ',', 'regex_exp', ',', 'parameters', ')', ':', 'Rx', ',', 'other', '=', 'self', '.', 'get_parameters', '(', 'regex_exp', ',', 'parameters', ')', 'if', 'other', 'is', 'not', 'None', 'and', 'other', '.', 'strip', '(', ')', ':', 'raise', 'iarm', '.', 'exceptions', '.', 'ParsingError', '(', '"Extra arguments found: {}"', '.', 'format', '(', 'other', ')', ')', 'return', 'Rx', '.', 'upper', '(', ')'] | Get three parameters from a given regex expression
Raise an exception if more than three were found
:param regex_exp:
:param parameters:
:return: | ['Get', 'three', 'parameters', 'from', 'a', 'given', 'regex', 'expression'] | train | https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/_meta.py#L259-L271 |
2,408 | spyder-ide/spyder | spyder/plugins/ipythonconsole/widgets/shell.py | ShellWidget.silent_execute | def silent_execute(self, code):
"""Execute code in the kernel without increasing the prompt"""
try:
self.kernel_client.execute(to_text_string(code), silent=True)
except AttributeError:
pass | python | def silent_execute(self, code):
"""Execute code in the kernel without increasing the prompt"""
try:
self.kernel_client.execute(to_text_string(code), silent=True)
except AttributeError:
pass | ['def', 'silent_execute', '(', 'self', ',', 'code', ')', ':', 'try', ':', 'self', '.', 'kernel_client', '.', 'execute', '(', 'to_text_string', '(', 'code', ')', ',', 'silent', '=', 'True', ')', 'except', 'AttributeError', ':', 'pass'] | Execute code in the kernel without increasing the prompt | ['Execute', 'code', 'in', 'the', 'kernel', 'without', 'increasing', 'the', 'prompt'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/widgets/shell.py#L326-L331 |
2,409 | diffeo/rejester | rejester/_queue.py | RejesterQueue.check_out_item | def check_out_item(self, expiration):
"""Get the highest-priority item out of this queue.
Returns the item, or None if no items are available. The item
must be either ``return_item()`` or ``renew_item()`` before
``expiration`` seconds pass, or it will become available to
future callers. The item will be marked as being owned by
``worker_id``.
"""
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
expiration += time.time()
script = conn.register_script("""
local item = redis.call("zrevrange", KEYS[1], 0, 0)
if #item == 0 then return nil end
item = item[1]
redis.call("zrem", KEYS[1], item)
redis.call("zadd", KEYS[2], ARGV[1], item)
redis.call("hset", KEYS[3], "i" .. item, "w" .. ARGV[2])
redis.call("hset", KEYS[3], "w" .. ARGV[2], "i" .. item)
return item
""")
result = script(keys=[self._key_available(), self._key_expiration(),
self._key_workers()],
args=[expiration, self._get_worker_id(conn)])
return result | python | def check_out_item(self, expiration):
"""Get the highest-priority item out of this queue.
Returns the item, or None if no items are available. The item
must be either ``return_item()`` or ``renew_item()`` before
``expiration`` seconds pass, or it will become available to
future callers. The item will be marked as being owned by
``worker_id``.
"""
conn = redis.StrictRedis(connection_pool=self.pool)
self._run_expiration(conn)
expiration += time.time()
script = conn.register_script("""
local item = redis.call("zrevrange", KEYS[1], 0, 0)
if #item == 0 then return nil end
item = item[1]
redis.call("zrem", KEYS[1], item)
redis.call("zadd", KEYS[2], ARGV[1], item)
redis.call("hset", KEYS[3], "i" .. item, "w" .. ARGV[2])
redis.call("hset", KEYS[3], "w" .. ARGV[2], "i" .. item)
return item
""")
result = script(keys=[self._key_available(), self._key_expiration(),
self._key_workers()],
args=[expiration, self._get_worker_id(conn)])
return result | ['def', 'check_out_item', '(', 'self', ',', 'expiration', ')', ':', 'conn', '=', 'redis', '.', 'StrictRedis', '(', 'connection_pool', '=', 'self', '.', 'pool', ')', 'self', '.', '_run_expiration', '(', 'conn', ')', 'expiration', '+=', 'time', '.', 'time', '(', ')', 'script', '=', 'conn', '.', 'register_script', '(', '"""\n local item = redis.call("zrevrange", KEYS[1], 0, 0)\n if #item == 0 then return nil end\n item = item[1]\n redis.call("zrem", KEYS[1], item)\n redis.call("zadd", KEYS[2], ARGV[1], item)\n redis.call("hset", KEYS[3], "i" .. item, "w" .. ARGV[2])\n redis.call("hset", KEYS[3], "w" .. ARGV[2], "i" .. item)\n return item\n """', ')', 'result', '=', 'script', '(', 'keys', '=', '[', 'self', '.', '_key_available', '(', ')', ',', 'self', '.', '_key_expiration', '(', ')', ',', 'self', '.', '_key_workers', '(', ')', ']', ',', 'args', '=', '[', 'expiration', ',', 'self', '.', '_get_worker_id', '(', 'conn', ')', ']', ')', 'return', 'result'] | Get the highest-priority item out of this queue.
Returns the item, or None if no items are available. The item
must be either ``return_item()`` or ``renew_item()`` before
``expiration`` seconds pass, or it will become available to
future callers. The item will be marked as being owned by
``worker_id``. | ['Get', 'the', 'highest', '-', 'priority', 'item', 'out', 'of', 'this', 'queue', '.'] | train | https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_queue.py#L170-L196 |
2,410 | OnroerendErfgoed/pyramid_urireferencer | pyramid_urireferencer/renderers.py | application_adapter | def application_adapter(obj, request):
"""
Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json.
:param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered.
:rtype: :class:`dict`
"""
return {
'title': obj.title,
'uri': obj.uri,
'service_url': obj.service_url,
'success': obj.success,
'has_references': obj.has_references,
'count': obj.count,
'items': [{
'uri': i.uri,
'title': i.title
} for i in obj.items] if obj.items is not None else None
} | python | def application_adapter(obj, request):
"""
Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json.
:param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered.
:rtype: :class:`dict`
"""
return {
'title': obj.title,
'uri': obj.uri,
'service_url': obj.service_url,
'success': obj.success,
'has_references': obj.has_references,
'count': obj.count,
'items': [{
'uri': i.uri,
'title': i.title
} for i in obj.items] if obj.items is not None else None
} | ['def', 'application_adapter', '(', 'obj', ',', 'request', ')', ':', 'return', '{', "'title'", ':', 'obj', '.', 'title', ',', "'uri'", ':', 'obj', '.', 'uri', ',', "'service_url'", ':', 'obj', '.', 'service_url', ',', "'success'", ':', 'obj', '.', 'success', ',', "'has_references'", ':', 'obj', '.', 'has_references', ',', "'count'", ':', 'obj', '.', 'count', ',', "'items'", ':', '[', '{', "'uri'", ':', 'i', '.', 'uri', ',', "'title'", ':', 'i', '.', 'title', '}', 'for', 'i', 'in', 'obj', '.', 'items', ']', 'if', 'obj', '.', 'items', 'is', 'not', 'None', 'else', 'None', '}'] | Adapter for rendering a :class:`pyramid_urireferencer.models.ApplicationResponse` to json.
:param pyramid_urireferencer.models.ApplicationResponse obj: The response to be rendered.
:rtype: :class:`dict` | ['Adapter', 'for', 'rendering', 'a', ':', 'class', ':', 'pyramid_urireferencer', '.', 'models', '.', 'ApplicationResponse', 'to', 'json', '.'] | train | https://github.com/OnroerendErfgoed/pyramid_urireferencer/blob/c6ee4ba863e32ced304b9cf00f3f5b450757a29a/pyramid_urireferencer/renderers.py#L41-L59 |
2,411 | ValvePython/steam | steam/client/__init__.py | SteamClient.store_sentry | def store_sentry(self, username, sentry_bytes):
"""
Store sentry bytes under a username
:param username: username
:type username: :class:`str`
:return: Whenver the operation succeed
:rtype: :class:`bool`
"""
filepath = self._get_sentry_path(username)
if filepath:
try:
with open(filepath, 'wb') as f:
f.write(sentry_bytes)
return True
except IOError as e:
self._LOG.error("store_sentry: %s" % str(e))
return False | python | def store_sentry(self, username, sentry_bytes):
"""
Store sentry bytes under a username
:param username: username
:type username: :class:`str`
:return: Whenver the operation succeed
:rtype: :class:`bool`
"""
filepath = self._get_sentry_path(username)
if filepath:
try:
with open(filepath, 'wb') as f:
f.write(sentry_bytes)
return True
except IOError as e:
self._LOG.error("store_sentry: %s" % str(e))
return False | ['def', 'store_sentry', '(', 'self', ',', 'username', ',', 'sentry_bytes', ')', ':', 'filepath', '=', 'self', '.', '_get_sentry_path', '(', 'username', ')', 'if', 'filepath', ':', 'try', ':', 'with', 'open', '(', 'filepath', ',', "'wb'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'sentry_bytes', ')', 'return', 'True', 'except', 'IOError', 'as', 'e', ':', 'self', '.', '_LOG', '.', 'error', '(', '"store_sentry: %s"', '%', 'str', '(', 'e', ')', ')', 'return', 'False'] | Store sentry bytes under a username
:param username: username
:type username: :class:`str`
:return: Whenver the operation succeed
:rtype: :class:`bool` | ['Store', 'sentry', 'bytes', 'under', 'a', 'username'] | train | https://github.com/ValvePython/steam/blob/2de1364c47598410b572114e6129eab8fff71d5b/steam/client/__init__.py#L386-L404 |
2,412 | jobovy/galpy | galpy/actionAngle/actionAngleTorus.py | actionAngleTorus.Freqs | def Freqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
Freqs
PURPOSE:
return the frequencies corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
(OmegaR,Omegaphi,Omegaz)
HISTORY:
2015-08-07 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_Freqs_c(\
self._pot,
jr,jphi,jz,
tol=kwargs.get('tol',self._tol))
if out[3] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[3],_autofit_errvals[out[3]]),
galpyWarning)
return out | python | def Freqs(self,jr,jphi,jz,**kwargs):
"""
NAME:
Freqs
PURPOSE:
return the frequencies corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
(OmegaR,Omegaphi,Omegaz)
HISTORY:
2015-08-07 - Written - Bovy (UofT)
"""
out= actionAngleTorus_c.actionAngleTorus_Freqs_c(\
self._pot,
jr,jphi,jz,
tol=kwargs.get('tol',self._tol))
if out[3] != 0:
warnings.warn("actionAngleTorus' AutoFit exited with non-zero return status %i: %s" % (out[3],_autofit_errvals[out[3]]),
galpyWarning)
return out | ['def', 'Freqs', '(', 'self', ',', 'jr', ',', 'jphi', ',', 'jz', ',', '*', '*', 'kwargs', ')', ':', 'out', '=', 'actionAngleTorus_c', '.', 'actionAngleTorus_Freqs_c', '(', 'self', '.', '_pot', ',', 'jr', ',', 'jphi', ',', 'jz', ',', 'tol', '=', 'kwargs', '.', 'get', '(', "'tol'", ',', 'self', '.', '_tol', ')', ')', 'if', 'out', '[', '3', ']', '!=', '0', ':', 'warnings', '.', 'warn', '(', '"actionAngleTorus\' AutoFit exited with non-zero return status %i: %s"', '%', '(', 'out', '[', '3', ']', ',', '_autofit_errvals', '[', 'out', '[', '3', ']', ']', ')', ',', 'galpyWarning', ')', 'return', 'out'] | NAME:
Freqs
PURPOSE:
return the frequencies corresponding to a torus
INPUT:
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
tol= (object-wide value) goal for |dJ|/|J| along the torus
OUTPUT:
(OmegaR,Omegaphi,Omegaz)
HISTORY:
2015-08-07 - Written - Bovy (UofT) | ['NAME', ':'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleTorus.py#L159-L195 |
2,413 | GeospatialPython/pyshp | shapefile.py | Writer.__dbfRecord | def __dbfRecord(self, record):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
if self.recNum == 0:
# first records, so all fields should be set
# allowing us to write the dbf header
# cannot change the fields after this point
self.__dbfHeader()
# begin
self.recNum += 1
if not self.fields[0][0].startswith("Deletion"):
f.write(b' ') # deletion flag
for (fieldName, fieldType, size, deci), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType in ("N","F"):
# numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field.
if value in MISSING:
value = b"*"*size # QGIS NULL
elif not deci:
# force to int
try:
# first try to force directly to int.
# forcing a large int to float and back to int
# will lose information and result in wrong nr.
value = int(value)
except ValueError:
# forcing directly to int failed, so was probably a float.
value = int(float(value))
value = format(value, "d")[:size].rjust(size) # caps the size if exceeds the field size
else:
value = float(value)
value = format(value, ".%sf"%deci)[:size].rjust(size) # caps the size if exceeds the field size
elif fieldType == "D":
# date: 8 bytes - date stored as a string in the format YYYYMMDD.
if isinstance(value, date):
value = '{:04d}{:02d}{:02d}'.format(value.year, value.month, value.day)
elif isinstance(value, list) and len(value) == 3:
value = '{:04d}{:02d}{:02d}'.format(*value)
elif value in MISSING:
value = b'0' * 8 # QGIS NULL for date type
elif is_string(value) and len(value) == 8:
pass # value is already a date string
else:
raise ShapefileException("Date values must be either a datetime.date object, a list, a YYYYMMDD string, or a missing value.")
elif fieldType == 'L':
# logical: 1 byte - initialized to 0x20 (space) otherwise T or F.
if value in MISSING:
value = b' ' # missing is set to space
elif value in [True,1]:
value = b'T'
elif value in [False,0]:
value = b'F'
else:
value = b' ' # unknown is set to space
else:
# anything else is forced to string, truncated to the length of the field
value = b(value, self.encoding, self.encodingErrors)[:size].ljust(size)
if not isinstance(value, bytes):
# just in case some of the numeric format() and date strftime() results are still in unicode (Python 3 only)
value = b(value, 'ascii', self.encodingErrors) # should be default ascii encoding
if len(value) != size:
raise ShapefileException(
"Shapefile Writer unable to pack incorrect sized value"
" (size %d) into field '%s' (size %d)." % (len(value), fieldName, size))
f.write(value) | python | def __dbfRecord(self, record):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
if self.recNum == 0:
# first records, so all fields should be set
# allowing us to write the dbf header
# cannot change the fields after this point
self.__dbfHeader()
# begin
self.recNum += 1
if not self.fields[0][0].startswith("Deletion"):
f.write(b' ') # deletion flag
for (fieldName, fieldType, size, deci), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType in ("N","F"):
# numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field.
if value in MISSING:
value = b"*"*size # QGIS NULL
elif not deci:
# force to int
try:
# first try to force directly to int.
# forcing a large int to float and back to int
# will lose information and result in wrong nr.
value = int(value)
except ValueError:
# forcing directly to int failed, so was probably a float.
value = int(float(value))
value = format(value, "d")[:size].rjust(size) # caps the size if exceeds the field size
else:
value = float(value)
value = format(value, ".%sf"%deci)[:size].rjust(size) # caps the size if exceeds the field size
elif fieldType == "D":
# date: 8 bytes - date stored as a string in the format YYYYMMDD.
if isinstance(value, date):
value = '{:04d}{:02d}{:02d}'.format(value.year, value.month, value.day)
elif isinstance(value, list) and len(value) == 3:
value = '{:04d}{:02d}{:02d}'.format(*value)
elif value in MISSING:
value = b'0' * 8 # QGIS NULL for date type
elif is_string(value) and len(value) == 8:
pass # value is already a date string
else:
raise ShapefileException("Date values must be either a datetime.date object, a list, a YYYYMMDD string, or a missing value.")
elif fieldType == 'L':
# logical: 1 byte - initialized to 0x20 (space) otherwise T or F.
if value in MISSING:
value = b' ' # missing is set to space
elif value in [True,1]:
value = b'T'
elif value in [False,0]:
value = b'F'
else:
value = b' ' # unknown is set to space
else:
# anything else is forced to string, truncated to the length of the field
value = b(value, self.encoding, self.encodingErrors)[:size].ljust(size)
if not isinstance(value, bytes):
# just in case some of the numeric format() and date strftime() results are still in unicode (Python 3 only)
value = b(value, 'ascii', self.encodingErrors) # should be default ascii encoding
if len(value) != size:
raise ShapefileException(
"Shapefile Writer unable to pack incorrect sized value"
" (size %d) into field '%s' (size %d)." % (len(value), fieldName, size))
f.write(value) | ['def', '__dbfRecord', '(', 'self', ',', 'record', ')', ':', 'f', '=', 'self', '.', '__getFileObj', '(', 'self', '.', 'dbf', ')', 'if', 'self', '.', 'recNum', '==', '0', ':', '# first records, so all fields should be set\r', '# allowing us to write the dbf header\r', '# cannot change the fields after this point\r', 'self', '.', '__dbfHeader', '(', ')', '# begin\r', 'self', '.', 'recNum', '+=', '1', 'if', 'not', 'self', '.', 'fields', '[', '0', ']', '[', '0', ']', '.', 'startswith', '(', '"Deletion"', ')', ':', 'f', '.', 'write', '(', "b' '", ')', '# deletion flag\r', 'for', '(', 'fieldName', ',', 'fieldType', ',', 'size', ',', 'deci', ')', ',', 'value', 'in', 'zip', '(', 'self', '.', 'fields', ',', 'record', ')', ':', 'fieldType', '=', 'fieldType', '.', 'upper', '(', ')', 'size', '=', 'int', '(', 'size', ')', 'if', 'fieldType', 'in', '(', '"N"', ',', '"F"', ')', ':', '# numeric or float: number stored as a string, right justified, and padded with blanks to the width of the field.\r', 'if', 'value', 'in', 'MISSING', ':', 'value', '=', 'b"*"', '*', 'size', '# QGIS NULL\r', 'elif', 'not', 'deci', ':', '# force to int\r', 'try', ':', '# first try to force directly to int.\r', '# forcing a large int to float and back to int\r', '# will lose information and result in wrong nr.\r', 'value', '=', 'int', '(', 'value', ')', 'except', 'ValueError', ':', '# forcing directly to int failed, so was probably a float.\r', 'value', '=', 'int', '(', 'float', '(', 'value', ')', ')', 'value', '=', 'format', '(', 'value', ',', '"d"', ')', '[', ':', 'size', ']', '.', 'rjust', '(', 'size', ')', '# caps the size if exceeds the field size\r', 'else', ':', 'value', '=', 'float', '(', 'value', ')', 'value', '=', 'format', '(', 'value', ',', '".%sf"', '%', 'deci', ')', '[', ':', 'size', ']', '.', 'rjust', '(', 'size', ')', '# caps the size if exceeds the field size\r', 'elif', 'fieldType', '==', '"D"', ':', '# date: 8 bytes - date stored as a string in the format YYYYMMDD.\r', 'if', 'isinstance', '(', 'value', ',', 'date', ')', ':', 'value', '=', "'{:04d}{:02d}{:02d}'", '.', 'format', '(', 'value', '.', 'year', ',', 'value', '.', 'month', ',', 'value', '.', 'day', ')', 'elif', 'isinstance', '(', 'value', ',', 'list', ')', 'and', 'len', '(', 'value', ')', '==', '3', ':', 'value', '=', "'{:04d}{:02d}{:02d}'", '.', 'format', '(', '*', 'value', ')', 'elif', 'value', 'in', 'MISSING', ':', 'value', '=', "b'0'", '*', '8', '# QGIS NULL for date type\r', 'elif', 'is_string', '(', 'value', ')', 'and', 'len', '(', 'value', ')', '==', '8', ':', 'pass', '# value is already a date string\r', 'else', ':', 'raise', 'ShapefileException', '(', '"Date values must be either a datetime.date object, a list, a YYYYMMDD string, or a missing value."', ')', 'elif', 'fieldType', '==', "'L'", ':', '# logical: 1 byte - initialized to 0x20 (space) otherwise T or F.\r', 'if', 'value', 'in', 'MISSING', ':', 'value', '=', "b' '", '# missing is set to space\r', 'elif', 'value', 'in', '[', 'True', ',', '1', ']', ':', 'value', '=', "b'T'", 'elif', 'value', 'in', '[', 'False', ',', '0', ']', ':', 'value', '=', "b'F'", 'else', ':', 'value', '=', "b' '", '# unknown is set to space\r', 'else', ':', '# anything else is forced to string, truncated to the length of the field\r', 'value', '=', 'b', '(', 'value', ',', 'self', '.', 'encoding', ',', 'self', '.', 'encodingErrors', ')', '[', ':', 'size', ']', '.', 'ljust', '(', 'size', ')', 'if', 'not', 'isinstance', '(', 'value', ',', 'bytes', ')', ':', '# just in case some of the numeric format() and date strftime() results are still in unicode (Python 3 only)\r', 'value', '=', 'b', '(', 'value', ',', "'ascii'", ',', 'self', '.', 'encodingErrors', ')', '# should be default ascii encoding\r', 'if', 'len', '(', 'value', ')', '!=', 'size', ':', 'raise', 'ShapefileException', '(', '"Shapefile Writer unable to pack incorrect sized value"', '" (size %d) into field \'%s\' (size %d)."', '%', '(', 'len', '(', 'value', ')', ',', 'fieldName', ',', 'size', ')', ')', 'f', '.', 'write', '(', 'value', ')'] | Writes the dbf records. | ['Writes', 'the', 'dbf', 'records', '.'] | train | https://github.com/GeospatialPython/pyshp/blob/71231ddc5aa54f155d4f0563c56006fffbfc84e7/shapefile.py#L1535-L1600 |
2,414 | RudolfCardinal/pythonlib | cardinal_pythonlib/django/fields/jsonclassfield.py | JsonClassField.to_python | def to_python(self, value):
"""
"Called during deserialization and during the clean() method used
from forms.... [s]hould deal gracefully with... (*) an instance of
the correct type; (*) a string; (*) None (if the field allows
null=True)."
"For ``to_python()``, if anything goes wrong during value conversion,
you should raise a ``ValidationError`` exception."
"""
if value is None:
return value
if not isinstance(value, str):
return value
try:
return json_decode(value)
except Exception as err:
raise ValidationError(repr(err)) | python | def to_python(self, value):
"""
"Called during deserialization and during the clean() method used
from forms.... [s]hould deal gracefully with... (*) an instance of
the correct type; (*) a string; (*) None (if the field allows
null=True)."
"For ``to_python()``, if anything goes wrong during value conversion,
you should raise a ``ValidationError`` exception."
"""
if value is None:
return value
if not isinstance(value, str):
return value
try:
return json_decode(value)
except Exception as err:
raise ValidationError(repr(err)) | ['def', 'to_python', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'None', ':', 'return', 'value', 'if', 'not', 'isinstance', '(', 'value', ',', 'str', ')', ':', 'return', 'value', 'try', ':', 'return', 'json_decode', '(', 'value', ')', 'except', 'Exception', 'as', 'err', ':', 'raise', 'ValidationError', '(', 'repr', '(', 'err', ')', ')'] | "Called during deserialization and during the clean() method used
from forms.... [s]hould deal gracefully with... (*) an instance of
the correct type; (*) a string; (*) None (if the field allows
null=True)."
"For ``to_python()``, if anything goes wrong during value conversion,
you should raise a ``ValidationError`` exception." | ['Called', 'during', 'deserialization', 'and', 'during', 'the', 'clean', '()', 'method', 'used', 'from', 'forms', '....', '[', 's', ']', 'hould', 'deal', 'gracefully', 'with', '...', '(', '*', ')', 'an', 'instance', 'of', 'the', 'correct', 'type', ';', '(', '*', ')', 'a', 'string', ';', '(', '*', ')', 'None', '(', 'if', 'the', 'field', 'allows', 'null', '=', 'True', ')', '.'] | train | https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/django/fields/jsonclassfield.py#L161-L178 |
2,415 | Esri/ArcREST | src/arcrest/common/symbology.py | SimpleMarkerSymbol.value | def value(self):
"""returns the object as dictionary"""
if self._outline is None:
return {
"type" : "esriSMS",
"style" : self._style,
"color" : self._color.value,
"size" : self._size,
"angle" : self._angle,
"xoffset" : self._xoffset,
"yoffset" : self._yoffset
}
else:
return {
"type" : "esriSMS",
"style" : self._style,
"color" : self._color.value,
"size" : self._size,
"angle" : self._angle,
"xoffset" : self._xoffset,
"yoffset" : self._yoffset,
"outline" : {
"width" : self._outline['width'],
"color" : self._color.value
}
} | python | def value(self):
"""returns the object as dictionary"""
if self._outline is None:
return {
"type" : "esriSMS",
"style" : self._style,
"color" : self._color.value,
"size" : self._size,
"angle" : self._angle,
"xoffset" : self._xoffset,
"yoffset" : self._yoffset
}
else:
return {
"type" : "esriSMS",
"style" : self._style,
"color" : self._color.value,
"size" : self._size,
"angle" : self._angle,
"xoffset" : self._xoffset,
"yoffset" : self._yoffset,
"outline" : {
"width" : self._outline['width'],
"color" : self._color.value
}
} | ['def', 'value', '(', 'self', ')', ':', 'if', 'self', '.', '_outline', 'is', 'None', ':', 'return', '{', '"type"', ':', '"esriSMS"', ',', '"style"', ':', 'self', '.', '_style', ',', '"color"', ':', 'self', '.', '_color', '.', 'value', ',', '"size"', ':', 'self', '.', '_size', ',', '"angle"', ':', 'self', '.', '_angle', ',', '"xoffset"', ':', 'self', '.', '_xoffset', ',', '"yoffset"', ':', 'self', '.', '_yoffset', '}', 'else', ':', 'return', '{', '"type"', ':', '"esriSMS"', ',', '"style"', ':', 'self', '.', '_style', ',', '"color"', ':', 'self', '.', '_color', '.', 'value', ',', '"size"', ':', 'self', '.', '_size', ',', '"angle"', ':', 'self', '.', '_angle', ',', '"xoffset"', ':', 'self', '.', '_xoffset', ',', '"yoffset"', ':', 'self', '.', '_yoffset', ',', '"outline"', ':', '{', '"width"', ':', 'self', '.', '_outline', '[', "'width'", ']', ',', '"color"', ':', 'self', '.', '_color', '.', 'value', '}', '}'] | returns the object as dictionary | ['returns', 'the', 'object', 'as', 'dictionary'] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/common/symbology.py#L157-L182 |
2,416 | DataONEorg/d1_python | lib_common/src/d1_common/resource_map.py | ResourceMap.getAggregation | def getAggregation(self):
"""Returns:
str : URIRef of the Aggregation entity
"""
self._check_initialized()
return [
o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.Aggregation)
][0] | python | def getAggregation(self):
"""Returns:
str : URIRef of the Aggregation entity
"""
self._check_initialized()
return [
o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.Aggregation)
][0] | ['def', 'getAggregation', '(', 'self', ')', ':', 'self', '.', '_check_initialized', '(', ')', 'return', '[', 'o', 'for', 'o', 'in', 'self', '.', 'subjects', '(', 'predicate', '=', 'rdflib', '.', 'RDF', '.', 'type', ',', 'object', '=', 'ORE', '.', 'Aggregation', ')', ']', '[', '0', ']'] | Returns:
str : URIRef of the Aggregation entity | ['Returns', ':'] | train | https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L307-L316 |
2,417 | jermnelson/flask-fedora-commons | flask_fedora_commons/__init__.py | Repository.connect | def connect(self,
fedora_url,
data=None,
method='Get'):
"""Method attempts to connect to REST servers of the Fedora
Commons repository using optional data parameter.
Args:
fedora_url(string): Fedora URL
data(dict): Data to through to REST endpoint
method(str): REST Method, defaults to GET
Returns:
result(string): Response string from Fedora
"""
if data is None:
data = {}
if not fedora_url.startswith("http"):
fedora_url = urllib.parse.urljoin(self.base_url, fedora_url)
request = urllib.request.Request(fedora_url,
method=method)
request.add_header('Accept', 'text/turtle')
request.add_header('Content-Type', 'text/turtle')
if len(data) > 0:
request.data = data
try:
response = urllib.request.urlopen(request)
except urllib.error.URLError as err:
if hasattr(err, 'reason'):
print("failed to reach server at {} with {} method".format(
fedora_url,
request.method))
print("Reason: ", err.reason)
print("Data: ", data)
elif hasattr(err, 'code'):
print("Server error {}".format(err.code))
raise err
return response | python | def connect(self,
fedora_url,
data=None,
method='Get'):
"""Method attempts to connect to REST servers of the Fedora
Commons repository using optional data parameter.
Args:
fedora_url(string): Fedora URL
data(dict): Data to through to REST endpoint
method(str): REST Method, defaults to GET
Returns:
result(string): Response string from Fedora
"""
if data is None:
data = {}
if not fedora_url.startswith("http"):
fedora_url = urllib.parse.urljoin(self.base_url, fedora_url)
request = urllib.request.Request(fedora_url,
method=method)
request.add_header('Accept', 'text/turtle')
request.add_header('Content-Type', 'text/turtle')
if len(data) > 0:
request.data = data
try:
response = urllib.request.urlopen(request)
except urllib.error.URLError as err:
if hasattr(err, 'reason'):
print("failed to reach server at {} with {} method".format(
fedora_url,
request.method))
print("Reason: ", err.reason)
print("Data: ", data)
elif hasattr(err, 'code'):
print("Server error {}".format(err.code))
raise err
return response | ['def', 'connect', '(', 'self', ',', 'fedora_url', ',', 'data', '=', 'None', ',', 'method', '=', "'Get'", ')', ':', 'if', 'data', 'is', 'None', ':', 'data', '=', '{', '}', 'if', 'not', 'fedora_url', '.', 'startswith', '(', '"http"', ')', ':', 'fedora_url', '=', 'urllib', '.', 'parse', '.', 'urljoin', '(', 'self', '.', 'base_url', ',', 'fedora_url', ')', 'request', '=', 'urllib', '.', 'request', '.', 'Request', '(', 'fedora_url', ',', 'method', '=', 'method', ')', 'request', '.', 'add_header', '(', "'Accept'", ',', "'text/turtle'", ')', 'request', '.', 'add_header', '(', "'Content-Type'", ',', "'text/turtle'", ')', 'if', 'len', '(', 'data', ')', '>', '0', ':', 'request', '.', 'data', '=', 'data', 'try', ':', 'response', '=', 'urllib', '.', 'request', '.', 'urlopen', '(', 'request', ')', 'except', 'urllib', '.', 'error', '.', 'URLError', 'as', 'err', ':', 'if', 'hasattr', '(', 'err', ',', "'reason'", ')', ':', 'print', '(', '"failed to reach server at {} with {} method"', '.', 'format', '(', 'fedora_url', ',', 'request', '.', 'method', ')', ')', 'print', '(', '"Reason: "', ',', 'err', '.', 'reason', ')', 'print', '(', '"Data: "', ',', 'data', ')', 'elif', 'hasattr', '(', 'err', ',', "'code'", ')', ':', 'print', '(', '"Server error {}"', '.', 'format', '(', 'err', '.', 'code', ')', ')', 'raise', 'err', 'return', 'response'] | Method attempts to connect to REST servers of the Fedora
Commons repository using optional data parameter.
Args:
fedora_url(string): Fedora URL
data(dict): Data to through to REST endpoint
method(str): REST Method, defaults to GET
Returns:
result(string): Response string from Fedora | ['Method', 'attempts', 'to', 'connect', 'to', 'REST', 'servers', 'of', 'the', 'Fedora', 'Commons', 'repository', 'using', 'optional', 'data', 'parameter', '.'] | train | https://github.com/jermnelson/flask-fedora-commons/blob/81cee0d8c9e79fa2bdd1a101facb9e8c0f307af4/flask_fedora_commons/__init__.py#L223-L261 |
2,418 | deepmind/sonnet | sonnet/python/modules/conv.py | _ConvND.padding | def padding(self):
"""Returns the padding algorithm used, if this is the same for all dims.
Use `.paddings` if you want a tuple with the padding algorithm used for each
dimension.
Returns:
The padding algorithm used, if this is the same for all dimensions.
Raises:
ValueError: If different padding algorithms are used for different
dimensions.
"""
# This is for backwards compatibility -- previously only a single
# padding setting was supported across all dimensions.
if all(p == self._padding[0] for p in self._padding):
return self._padding[0]
else:
raise ValueError("This layer uses different paddings for different "
"dimensions. Use .paddings if you want a tuple of "
"per-dimension padding settings.") | python | def padding(self):
"""Returns the padding algorithm used, if this is the same for all dims.
Use `.paddings` if you want a tuple with the padding algorithm used for each
dimension.
Returns:
The padding algorithm used, if this is the same for all dimensions.
Raises:
ValueError: If different padding algorithms are used for different
dimensions.
"""
# This is for backwards compatibility -- previously only a single
# padding setting was supported across all dimensions.
if all(p == self._padding[0] for p in self._padding):
return self._padding[0]
else:
raise ValueError("This layer uses different paddings for different "
"dimensions. Use .paddings if you want a tuple of "
"per-dimension padding settings.") | ['def', 'padding', '(', 'self', ')', ':', '# This is for backwards compatibility -- previously only a single', '# padding setting was supported across all dimensions.', 'if', 'all', '(', 'p', '==', 'self', '.', '_padding', '[', '0', ']', 'for', 'p', 'in', 'self', '.', '_padding', ')', ':', 'return', 'self', '.', '_padding', '[', '0', ']', 'else', ':', 'raise', 'ValueError', '(', '"This layer uses different paddings for different "', '"dimensions. Use .paddings if you want a tuple of "', '"per-dimension padding settings."', ')'] | Returns the padding algorithm used, if this is the same for all dims.
Use `.paddings` if you want a tuple with the padding algorithm used for each
dimension.
Returns:
The padding algorithm used, if this is the same for all dimensions.
Raises:
ValueError: If different padding algorithms are used for different
dimensions. | ['Returns', 'the', 'padding', 'algorithm', 'used', 'if', 'this', 'is', 'the', 'same', 'for', 'all', 'dims', '.'] | train | https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/conv.py#L739-L759 |
2,419 | iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py | __detect_cl_tool | def __detect_cl_tool(env, chainkey, cdict, cpriority=None):
"""
Helper function, picks a command line tool from the list
and initializes its environment variables.
"""
if env.get(chainkey,'') == '':
clpath = ''
if cpriority is None:
cpriority = cdict.keys()
for cltool in cpriority:
if __debug_tool_location:
print("DocBook: Looking for %s"%cltool)
clpath = env.WhereIs(cltool)
if clpath:
if __debug_tool_location:
print("DocBook: Found:%s"%cltool)
env[chainkey] = clpath
if not env[chainkey + 'COM']:
env[chainkey + 'COM'] = cdict[cltool]
break | python | def __detect_cl_tool(env, chainkey, cdict, cpriority=None):
"""
Helper function, picks a command line tool from the list
and initializes its environment variables.
"""
if env.get(chainkey,'') == '':
clpath = ''
if cpriority is None:
cpriority = cdict.keys()
for cltool in cpriority:
if __debug_tool_location:
print("DocBook: Looking for %s"%cltool)
clpath = env.WhereIs(cltool)
if clpath:
if __debug_tool_location:
print("DocBook: Found:%s"%cltool)
env[chainkey] = clpath
if not env[chainkey + 'COM']:
env[chainkey + 'COM'] = cdict[cltool]
break | ['def', '__detect_cl_tool', '(', 'env', ',', 'chainkey', ',', 'cdict', ',', 'cpriority', '=', 'None', ')', ':', 'if', 'env', '.', 'get', '(', 'chainkey', ',', "''", ')', '==', "''", ':', 'clpath', '=', "''", 'if', 'cpriority', 'is', 'None', ':', 'cpriority', '=', 'cdict', '.', 'keys', '(', ')', 'for', 'cltool', 'in', 'cpriority', ':', 'if', '__debug_tool_location', ':', 'print', '(', '"DocBook: Looking for %s"', '%', 'cltool', ')', 'clpath', '=', 'env', '.', 'WhereIs', '(', 'cltool', ')', 'if', 'clpath', ':', 'if', '__debug_tool_location', ':', 'print', '(', '"DocBook: Found:%s"', '%', 'cltool', ')', 'env', '[', 'chainkey', ']', '=', 'clpath', 'if', 'not', 'env', '[', 'chainkey', '+', "'COM'", ']', ':', 'env', '[', 'chainkey', '+', "'COM'", ']', '=', 'cdict', '[', 'cltool', ']', 'break'] | Helper function, picks a command line tool from the list
and initializes its environment variables. | ['Helper', 'function', 'picks', 'a', 'command', 'line', 'tool', 'from', 'the', 'list', 'and', 'initializes', 'its', 'environment', 'variables', '.'] | train | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py#L176-L196 |
2,420 | pypa/pipenv | pipenv/vendor/requirementslib/models/utils.py | clean_requires_python | def clean_requires_python(candidates):
"""Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes."""
all_candidates = []
sys_version = ".".join(map(str, sys.version_info[:3]))
from packaging.version import parse as parse_version
py_version = parse_version(os.environ.get("PIP_PYTHON_VERSION", sys_version))
for c in candidates:
from_location = attrgetter("location.requires_python")
requires_python = getattr(c, "requires_python", from_location(c))
if requires_python:
# Old specifications had people setting this to single digits
# which is effectively the same as '>=digit,<digit+1'
if requires_python.isdigit():
requires_python = ">={0},<{1}".format(
requires_python, int(requires_python) + 1
)
try:
specifierset = SpecifierSet(requires_python)
except InvalidSpecifier:
continue
else:
if not specifierset.contains(py_version):
continue
all_candidates.append(c)
return all_candidates | python | def clean_requires_python(candidates):
"""Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes."""
all_candidates = []
sys_version = ".".join(map(str, sys.version_info[:3]))
from packaging.version import parse as parse_version
py_version = parse_version(os.environ.get("PIP_PYTHON_VERSION", sys_version))
for c in candidates:
from_location = attrgetter("location.requires_python")
requires_python = getattr(c, "requires_python", from_location(c))
if requires_python:
# Old specifications had people setting this to single digits
# which is effectively the same as '>=digit,<digit+1'
if requires_python.isdigit():
requires_python = ">={0},<{1}".format(
requires_python, int(requires_python) + 1
)
try:
specifierset = SpecifierSet(requires_python)
except InvalidSpecifier:
continue
else:
if not specifierset.contains(py_version):
continue
all_candidates.append(c)
return all_candidates | ['def', 'clean_requires_python', '(', 'candidates', ')', ':', 'all_candidates', '=', '[', ']', 'sys_version', '=', '"."', '.', 'join', '(', 'map', '(', 'str', ',', 'sys', '.', 'version_info', '[', ':', '3', ']', ')', ')', 'from', 'packaging', '.', 'version', 'import', 'parse', 'as', 'parse_version', 'py_version', '=', 'parse_version', '(', 'os', '.', 'environ', '.', 'get', '(', '"PIP_PYTHON_VERSION"', ',', 'sys_version', ')', ')', 'for', 'c', 'in', 'candidates', ':', 'from_location', '=', 'attrgetter', '(', '"location.requires_python"', ')', 'requires_python', '=', 'getattr', '(', 'c', ',', '"requires_python"', ',', 'from_location', '(', 'c', ')', ')', 'if', 'requires_python', ':', '# Old specifications had people setting this to single digits', "# which is effectively the same as '>=digit,<digit+1'", 'if', 'requires_python', '.', 'isdigit', '(', ')', ':', 'requires_python', '=', '">={0},<{1}"', '.', 'format', '(', 'requires_python', ',', 'int', '(', 'requires_python', ')', '+', '1', ')', 'try', ':', 'specifierset', '=', 'SpecifierSet', '(', 'requires_python', ')', 'except', 'InvalidSpecifier', ':', 'continue', 'else', ':', 'if', 'not', 'specifierset', '.', 'contains', '(', 'py_version', ')', ':', 'continue', 'all_candidates', '.', 'append', '(', 'c', ')', 'return', 'all_candidates'] | Get a cleaned list of all the candidates with valid specifiers in the `requires_python` attributes. | ['Get', 'a', 'cleaned', 'list', 'of', 'all', 'the', 'candidates', 'with', 'valid', 'specifiers', 'in', 'the', 'requires_python', 'attributes', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requirementslib/models/utils.py#L803-L828 |
2,421 | ArchiveTeam/wpull | wpull/network/pool.py | ConnectionPool.acquire | def acquire(self, host: str, port: int, use_ssl: bool=False,
host_key: Optional[Any]=None) \
-> Union[Connection, SSLConnection]:
'''Return an available connection.
Args:
host: A hostname or IP address.
port: Port number.
use_ssl: Whether to return a SSL connection.
host_key: If provided, it overrides the key used for per-host
connection pooling. This is useful for proxies for example.
Coroutine.
'''
assert isinstance(port, int), 'Expect int. Got {}'.format(type(port))
assert not self._closed
yield from self._process_no_wait_releases()
if use_ssl:
connection_factory = functools.partial(
self._ssl_connection_factory, hostname=host)
else:
connection_factory = functools.partial(
self._connection_factory, hostname=host)
connection_factory = functools.partial(
HappyEyeballsConnection, (host, port), connection_factory,
self._resolver, self._happy_eyeballs_table,
is_ssl=use_ssl
)
key = host_key or (host, port, use_ssl)
with (yield from self._host_pools_lock):
if key not in self._host_pools:
host_pool = self._host_pools[key] = HostPool(
connection_factory,
max_connections=self._max_host_count
)
self._host_pool_waiters[key] = 1
else:
host_pool = self._host_pools[key]
self._host_pool_waiters[key] += 1
_logger.debug('Check out %s', key)
connection = yield from host_pool.acquire()
connection.key = key
# TODO: Verify this assert is always true
# assert host_pool.count() <= host_pool.max_connections
# assert key in self._host_pools
# assert self._host_pools[key] == host_pool
with (yield from self._host_pools_lock):
self._host_pool_waiters[key] -= 1
return connection | python | def acquire(self, host: str, port: int, use_ssl: bool=False,
host_key: Optional[Any]=None) \
-> Union[Connection, SSLConnection]:
'''Return an available connection.
Args:
host: A hostname or IP address.
port: Port number.
use_ssl: Whether to return a SSL connection.
host_key: If provided, it overrides the key used for per-host
connection pooling. This is useful for proxies for example.
Coroutine.
'''
assert isinstance(port, int), 'Expect int. Got {}'.format(type(port))
assert not self._closed
yield from self._process_no_wait_releases()
if use_ssl:
connection_factory = functools.partial(
self._ssl_connection_factory, hostname=host)
else:
connection_factory = functools.partial(
self._connection_factory, hostname=host)
connection_factory = functools.partial(
HappyEyeballsConnection, (host, port), connection_factory,
self._resolver, self._happy_eyeballs_table,
is_ssl=use_ssl
)
key = host_key or (host, port, use_ssl)
with (yield from self._host_pools_lock):
if key not in self._host_pools:
host_pool = self._host_pools[key] = HostPool(
connection_factory,
max_connections=self._max_host_count
)
self._host_pool_waiters[key] = 1
else:
host_pool = self._host_pools[key]
self._host_pool_waiters[key] += 1
_logger.debug('Check out %s', key)
connection = yield from host_pool.acquire()
connection.key = key
# TODO: Verify this assert is always true
# assert host_pool.count() <= host_pool.max_connections
# assert key in self._host_pools
# assert self._host_pools[key] == host_pool
with (yield from self._host_pools_lock):
self._host_pool_waiters[key] -= 1
return connection | ['def', 'acquire', '(', 'self', ',', 'host', ':', 'str', ',', 'port', ':', 'int', ',', 'use_ssl', ':', 'bool', '=', 'False', ',', 'host_key', ':', 'Optional', '[', 'Any', ']', '=', 'None', ')', '->', 'Union', '[', 'Connection', ',', 'SSLConnection', ']', ':', 'assert', 'isinstance', '(', 'port', ',', 'int', ')', ',', "'Expect int. Got {}'", '.', 'format', '(', 'type', '(', 'port', ')', ')', 'assert', 'not', 'self', '.', '_closed', 'yield', 'from', 'self', '.', '_process_no_wait_releases', '(', ')', 'if', 'use_ssl', ':', 'connection_factory', '=', 'functools', '.', 'partial', '(', 'self', '.', '_ssl_connection_factory', ',', 'hostname', '=', 'host', ')', 'else', ':', 'connection_factory', '=', 'functools', '.', 'partial', '(', 'self', '.', '_connection_factory', ',', 'hostname', '=', 'host', ')', 'connection_factory', '=', 'functools', '.', 'partial', '(', 'HappyEyeballsConnection', ',', '(', 'host', ',', 'port', ')', ',', 'connection_factory', ',', 'self', '.', '_resolver', ',', 'self', '.', '_happy_eyeballs_table', ',', 'is_ssl', '=', 'use_ssl', ')', 'key', '=', 'host_key', 'or', '(', 'host', ',', 'port', ',', 'use_ssl', ')', 'with', '(', 'yield', 'from', 'self', '.', '_host_pools_lock', ')', ':', 'if', 'key', 'not', 'in', 'self', '.', '_host_pools', ':', 'host_pool', '=', 'self', '.', '_host_pools', '[', 'key', ']', '=', 'HostPool', '(', 'connection_factory', ',', 'max_connections', '=', 'self', '.', '_max_host_count', ')', 'self', '.', '_host_pool_waiters', '[', 'key', ']', '=', '1', 'else', ':', 'host_pool', '=', 'self', '.', '_host_pools', '[', 'key', ']', 'self', '.', '_host_pool_waiters', '[', 'key', ']', '+=', '1', '_logger', '.', 'debug', '(', "'Check out %s'", ',', 'key', ')', 'connection', '=', 'yield', 'from', 'host_pool', '.', 'acquire', '(', ')', 'connection', '.', 'key', '=', 'key', '# TODO: Verify this assert is always true', '# assert host_pool.count() <= host_pool.max_connections', '# assert key in self._host_pools', '# assert self._host_pools[key] == host_pool', 'with', '(', 'yield', 'from', 'self', '.', '_host_pools_lock', ')', ':', 'self', '.', '_host_pool_waiters', '[', 'key', ']', '-=', '1', 'return', 'connection'] | Return an available connection.
Args:
host: A hostname or IP address.
port: Port number.
use_ssl: Whether to return a SSL connection.
host_key: If provided, it overrides the key used for per-host
connection pooling. This is useful for proxies for example.
Coroutine. | ['Return', 'an', 'available', 'connection', '.'] | train | https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/pool.py#L153-L211 |
2,422 | googleapis/google-cloud-python | pubsub/google/cloud/pubsub_v1/_gapic.py | add_methods | def add_methods(source_class, blacklist=()):
"""Add wrapped versions of the `api` member's methods to the class.
Any methods passed in `blacklist` are not added.
Additionally, any methods explicitly defined on the wrapped class are
not added.
"""
def wrap(wrapped_fx):
"""Wrap a GAPIC method; preserve its name and docstring."""
# If this is a static or class method, then we need to *not*
# send self as the first argument.
#
# Similarly, for instance methods, we need to send self.api rather
# than self, since that is where the actual methods were declared.
instance_method = True
# If this is a bound method it's a classmethod.
self = getattr(wrapped_fx, "__self__", None)
if issubclass(type(self), type):
instance_method = False
# Okay, we have figured out what kind of method this is; send
# down the correct wrapper function.
if instance_method:
fx = lambda self, *a, **kw: wrapped_fx(self.api, *a, **kw) # noqa
return functools.wraps(wrapped_fx)(fx)
fx = lambda *a, **kw: wrapped_fx(*a, **kw) # noqa
return staticmethod(functools.wraps(wrapped_fx)(fx))
def actual_decorator(cls):
# Reflectively iterate over most of the methods on the source class
# (the GAPIC) and make wrapped versions available on this client.
for name in dir(source_class):
# Ignore all private and magic methods.
if name.startswith("_"):
continue
# Ignore anything on our blacklist.
if name in blacklist:
continue
# Retrieve the attribute, and ignore it if it is not callable.
attr = getattr(source_class, name)
if not callable(attr):
continue
# Add a wrapper method to this object.
fx = wrap(getattr(source_class, name))
setattr(cls, name, fx)
# Return the augmented class.
return cls
# Simply return the actual decorator; this is returned from this method
# and actually used to decorate the class.
return actual_decorator | python | def add_methods(source_class, blacklist=()):
"""Add wrapped versions of the `api` member's methods to the class.
Any methods passed in `blacklist` are not added.
Additionally, any methods explicitly defined on the wrapped class are
not added.
"""
def wrap(wrapped_fx):
"""Wrap a GAPIC method; preserve its name and docstring."""
# If this is a static or class method, then we need to *not*
# send self as the first argument.
#
# Similarly, for instance methods, we need to send self.api rather
# than self, since that is where the actual methods were declared.
instance_method = True
# If this is a bound method it's a classmethod.
self = getattr(wrapped_fx, "__self__", None)
if issubclass(type(self), type):
instance_method = False
# Okay, we have figured out what kind of method this is; send
# down the correct wrapper function.
if instance_method:
fx = lambda self, *a, **kw: wrapped_fx(self.api, *a, **kw) # noqa
return functools.wraps(wrapped_fx)(fx)
fx = lambda *a, **kw: wrapped_fx(*a, **kw) # noqa
return staticmethod(functools.wraps(wrapped_fx)(fx))
def actual_decorator(cls):
# Reflectively iterate over most of the methods on the source class
# (the GAPIC) and make wrapped versions available on this client.
for name in dir(source_class):
# Ignore all private and magic methods.
if name.startswith("_"):
continue
# Ignore anything on our blacklist.
if name in blacklist:
continue
# Retrieve the attribute, and ignore it if it is not callable.
attr = getattr(source_class, name)
if not callable(attr):
continue
# Add a wrapper method to this object.
fx = wrap(getattr(source_class, name))
setattr(cls, name, fx)
# Return the augmented class.
return cls
# Simply return the actual decorator; this is returned from this method
# and actually used to decorate the class.
return actual_decorator | ['def', 'add_methods', '(', 'source_class', ',', 'blacklist', '=', '(', ')', ')', ':', 'def', 'wrap', '(', 'wrapped_fx', ')', ':', '"""Wrap a GAPIC method; preserve its name and docstring."""', '# If this is a static or class method, then we need to *not*', '# send self as the first argument.', '#', '# Similarly, for instance methods, we need to send self.api rather', '# than self, since that is where the actual methods were declared.', 'instance_method', '=', 'True', "# If this is a bound method it's a classmethod.", 'self', '=', 'getattr', '(', 'wrapped_fx', ',', '"__self__"', ',', 'None', ')', 'if', 'issubclass', '(', 'type', '(', 'self', ')', ',', 'type', ')', ':', 'instance_method', '=', 'False', '# Okay, we have figured out what kind of method this is; send', '# down the correct wrapper function.', 'if', 'instance_method', ':', 'fx', '=', 'lambda', 'self', ',', '*', 'a', ',', '*', '*', 'kw', ':', 'wrapped_fx', '(', 'self', '.', 'api', ',', '*', 'a', ',', '*', '*', 'kw', ')', '# noqa', 'return', 'functools', '.', 'wraps', '(', 'wrapped_fx', ')', '(', 'fx', ')', 'fx', '=', 'lambda', '*', 'a', ',', '*', '*', 'kw', ':', 'wrapped_fx', '(', '*', 'a', ',', '*', '*', 'kw', ')', '# noqa', 'return', 'staticmethod', '(', 'functools', '.', 'wraps', '(', 'wrapped_fx', ')', '(', 'fx', ')', ')', 'def', 'actual_decorator', '(', 'cls', ')', ':', '# Reflectively iterate over most of the methods on the source class', '# (the GAPIC) and make wrapped versions available on this client.', 'for', 'name', 'in', 'dir', '(', 'source_class', ')', ':', '# Ignore all private and magic methods.', 'if', 'name', '.', 'startswith', '(', '"_"', ')', ':', 'continue', '# Ignore anything on our blacklist.', 'if', 'name', 'in', 'blacklist', ':', 'continue', '# Retrieve the attribute, and ignore it if it is not callable.', 'attr', '=', 'getattr', '(', 'source_class', ',', 'name', ')', 'if', 'not', 'callable', '(', 'attr', ')', ':', 'continue', '# Add a wrapper method to this object.', 'fx', '=', 'wrap', '(', 'getattr', '(', 'source_class', ',', 'name', ')', ')', 'setattr', '(', 'cls', ',', 'name', ',', 'fx', ')', '# Return the augmented class.', 'return', 'cls', '# Simply return the actual decorator; this is returned from this method', '# and actually used to decorate the class.', 'return', 'actual_decorator'] | Add wrapped versions of the `api` member's methods to the class.
Any methods passed in `blacklist` are not added.
Additionally, any methods explicitly defined on the wrapped class are
not added. | ['Add', 'wrapped', 'versions', 'of', 'the', 'api', 'member', 's', 'methods', 'to', 'the', 'class', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/pubsub/google/cloud/pubsub_v1/_gapic.py#L20-L77 |
2,423 | AmanoTeam/amanobot | amanobot/__init__.py | Bot.sendDocument | def sendDocument(self, chat_id, document,
thumb=None,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._api_request_with_file('sendDocument', _rectify(p), 'document', document) | python | def sendDocument(self, chat_id, document,
thumb=None,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._api_request_with_file('sendDocument', _rectify(p), 'document', document) | ['def', 'sendDocument', '(', 'self', ',', 'chat_id', ',', 'document', ',', 'thumb', '=', 'None', ',', 'caption', '=', 'None', ',', 'parse_mode', '=', 'None', ',', 'disable_notification', '=', 'None', ',', 'reply_to_message_id', '=', 'None', ',', 'reply_markup', '=', 'None', ')', ':', 'p', '=', '_strip', '(', 'locals', '(', ')', ',', 'more', '=', '[', "'document'", ']', ')', 'return', 'self', '.', '_api_request_with_file', '(', "'sendDocument'", ',', '_rectify', '(', 'p', ')', ',', "'document'", ',', 'document', ')'] | See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto` | ['See', ':', 'https', ':', '//', 'core', '.', 'telegram', '.', 'org', '/', 'bots', '/', 'api#senddocument'] | train | https://github.com/AmanoTeam/amanobot/blob/fe546e2e294eec88e637da0b2567c7e7e8662437/amanobot/__init__.py#L564-L577 |
2,424 | inveniosoftware/invenio-userprofiles | invenio_userprofiles/views.py | handle_verification_form | def handle_verification_form(form):
"""Handle email sending verification form."""
form.process(formdata=request.form)
if form.validate_on_submit():
send_confirmation_instructions(current_user)
# NOTE: Flash message.
flash(_("Verification email sent."), category="success") | python | def handle_verification_form(form):
"""Handle email sending verification form."""
form.process(formdata=request.form)
if form.validate_on_submit():
send_confirmation_instructions(current_user)
# NOTE: Flash message.
flash(_("Verification email sent."), category="success") | ['def', 'handle_verification_form', '(', 'form', ')', ':', 'form', '.', 'process', '(', 'formdata', '=', 'request', '.', 'form', ')', 'if', 'form', '.', 'validate_on_submit', '(', ')', ':', 'send_confirmation_instructions', '(', 'current_user', ')', '# NOTE: Flash message.', 'flash', '(', '_', '(', '"Verification email sent."', ')', ',', 'category', '=', '"success"', ')'] | Handle email sending verification form. | ['Handle', 'email', 'sending', 'verification', 'form', '.'] | train | https://github.com/inveniosoftware/invenio-userprofiles/blob/4c682e7d67a4cab8dc38472a31fa1c34cbba03dd/invenio_userprofiles/views.py#L123-L130 |
2,425 | heronotears/lazyxml | lazyxml/builder.py | Builder.build_tree | def build_tree(self, data, tagname, attrs=None, depth=0):
r"""Build xml tree.
:param data: data for build xml.
:param tagname: element tag name.
:param attrs: element attributes. Default:``None``.
:type attrs: dict or None
:param depth: element depth of the hierarchy. Default:``0``.
:type depth: int
"""
if data is None:
data = ''
indent = ('\n%s' % (self.__options['indent'] * depth)) if self.__options['indent'] else ''
if isinstance(data, utils.DictTypes):
if self.__options['hasattr'] and self.check_structure(data.keys()):
attrs, values = self.pickdata(data)
self.build_tree(values, tagname, attrs, depth)
else:
self.__tree.append('%s%s' % (indent, self.tag_start(tagname, attrs)))
iter = data.iteritems()
if self.__options['ksort']:
iter = sorted(iter, key=lambda x:x[0], reverse=self.__options['reverse'])
for k, v in iter:
attrs = {}
if self.__options['hasattr'] and isinstance(v, utils.DictTypes) and self.check_structure(v.keys()):
attrs, v = self.pickdata(v)
self.build_tree(v, k, attrs, depth+1)
self.__tree.append('%s%s' % (indent, self.tag_end(tagname)))
elif utils.is_iterable(data):
for v in data:
self.build_tree(v, tagname, attrs, depth)
else:
self.__tree.append(indent)
data = self.safedata(data, self.__options['cdata'])
self.__tree.append(self.build_tag(tagname, data, attrs)) | python | def build_tree(self, data, tagname, attrs=None, depth=0):
r"""Build xml tree.
:param data: data for build xml.
:param tagname: element tag name.
:param attrs: element attributes. Default:``None``.
:type attrs: dict or None
:param depth: element depth of the hierarchy. Default:``0``.
:type depth: int
"""
if data is None:
data = ''
indent = ('\n%s' % (self.__options['indent'] * depth)) if self.__options['indent'] else ''
if isinstance(data, utils.DictTypes):
if self.__options['hasattr'] and self.check_structure(data.keys()):
attrs, values = self.pickdata(data)
self.build_tree(values, tagname, attrs, depth)
else:
self.__tree.append('%s%s' % (indent, self.tag_start(tagname, attrs)))
iter = data.iteritems()
if self.__options['ksort']:
iter = sorted(iter, key=lambda x:x[0], reverse=self.__options['reverse'])
for k, v in iter:
attrs = {}
if self.__options['hasattr'] and isinstance(v, utils.DictTypes) and self.check_structure(v.keys()):
attrs, v = self.pickdata(v)
self.build_tree(v, k, attrs, depth+1)
self.__tree.append('%s%s' % (indent, self.tag_end(tagname)))
elif utils.is_iterable(data):
for v in data:
self.build_tree(v, tagname, attrs, depth)
else:
self.__tree.append(indent)
data = self.safedata(data, self.__options['cdata'])
self.__tree.append(self.build_tag(tagname, data, attrs)) | ['def', 'build_tree', '(', 'self', ',', 'data', ',', 'tagname', ',', 'attrs', '=', 'None', ',', 'depth', '=', '0', ')', ':', 'if', 'data', 'is', 'None', ':', 'data', '=', "''", 'indent', '=', '(', "'\\n%s'", '%', '(', 'self', '.', '__options', '[', "'indent'", ']', '*', 'depth', ')', ')', 'if', 'self', '.', '__options', '[', "'indent'", ']', 'else', "''", 'if', 'isinstance', '(', 'data', ',', 'utils', '.', 'DictTypes', ')', ':', 'if', 'self', '.', '__options', '[', "'hasattr'", ']', 'and', 'self', '.', 'check_structure', '(', 'data', '.', 'keys', '(', ')', ')', ':', 'attrs', ',', 'values', '=', 'self', '.', 'pickdata', '(', 'data', ')', 'self', '.', 'build_tree', '(', 'values', ',', 'tagname', ',', 'attrs', ',', 'depth', ')', 'else', ':', 'self', '.', '__tree', '.', 'append', '(', "'%s%s'", '%', '(', 'indent', ',', 'self', '.', 'tag_start', '(', 'tagname', ',', 'attrs', ')', ')', ')', 'iter', '=', 'data', '.', 'iteritems', '(', ')', 'if', 'self', '.', '__options', '[', "'ksort'", ']', ':', 'iter', '=', 'sorted', '(', 'iter', ',', 'key', '=', 'lambda', 'x', ':', 'x', '[', '0', ']', ',', 'reverse', '=', 'self', '.', '__options', '[', "'reverse'", ']', ')', 'for', 'k', ',', 'v', 'in', 'iter', ':', 'attrs', '=', '{', '}', 'if', 'self', '.', '__options', '[', "'hasattr'", ']', 'and', 'isinstance', '(', 'v', ',', 'utils', '.', 'DictTypes', ')', 'and', 'self', '.', 'check_structure', '(', 'v', '.', 'keys', '(', ')', ')', ':', 'attrs', ',', 'v', '=', 'self', '.', 'pickdata', '(', 'v', ')', 'self', '.', 'build_tree', '(', 'v', ',', 'k', ',', 'attrs', ',', 'depth', '+', '1', ')', 'self', '.', '__tree', '.', 'append', '(', "'%s%s'", '%', '(', 'indent', ',', 'self', '.', 'tag_end', '(', 'tagname', ')', ')', ')', 'elif', 'utils', '.', 'is_iterable', '(', 'data', ')', ':', 'for', 'v', 'in', 'data', ':', 'self', '.', 'build_tree', '(', 'v', ',', 'tagname', ',', 'attrs', ',', 'depth', ')', 'else', ':', 'self', '.', '__tree', '.', 'append', '(', 'indent', ')', 'data', '=', 'self', '.', 'safedata', '(', 'data', ',', 'self', '.', '__options', '[', "'cdata'", ']', ')', 'self', '.', '__tree', '.', 'append', '(', 'self', '.', 'build_tag', '(', 'tagname', ',', 'data', ',', 'attrs', ')', ')'] | r"""Build xml tree.
:param data: data for build xml.
:param tagname: element tag name.
:param attrs: element attributes. Default:``None``.
:type attrs: dict or None
:param depth: element depth of the hierarchy. Default:``0``.
:type depth: int | ['r', 'Build', 'xml', 'tree', '.'] | train | https://github.com/heronotears/lazyxml/blob/e3f1ebd3f34cfa03d022ddec90e17d60c1c81953/lazyxml/builder.py#L94-L128 |
2,426 | lago-project/lago | lago/utils.py | _run_command | def _run_command(
command,
input_data=None,
stdin=None,
out_pipe=subprocess.PIPE,
err_pipe=subprocess.PIPE,
env=None,
uuid=None,
**kwargs
):
"""
Runs a command
Args:
command(list of str): args of the command to execute, including the
command itself as command[0] as `['ls', '-l']`
input_data(str): If passed, will feed that data to the subprocess
through stdin
out_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdout
stdin(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdin
err_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stderr
env(dict of str:str): If set, will use the given dict as env for the
subprocess
uuid(uuid): If set the command will be logged with the given uuid
converted to string, otherwise, a uuid v4 will be generated.
**kwargs: Any other keyword args passed will be passed to the
:ref:subprocess.Popen call
Returns:
lago.utils.CommandStatus: result of the interactive execution
"""
# add libexec to PATH if needed
if uuid is None:
uuid = uuid_m.uuid4()
if constants.LIBEXEC_DIR not in os.environ['PATH'].split(':'):
os.environ['PATH'
] = '%s:%s' % (constants.LIBEXEC_DIR, os.environ['PATH'])
if input_data and not stdin:
kwargs['stdin'] = subprocess.PIPE
elif stdin:
kwargs['stdin'] = stdin
if env is None:
env = os.environ.copy()
else:
env['PATH'] = ':'.join(
list(
set(
env.get('PATH', '').split(':') + os.environ['PATH']
.split(':')
),
),
)
popen = subprocess.Popen(
' '.join('"%s"' % arg for arg in command),
stdout=out_pipe,
stderr=err_pipe,
shell=True,
env=env,
**kwargs
)
out, err = popen.communicate(input_data)
LOGGER.debug(
'%s: command exit with return code: %d', str(uuid), popen.returncode
)
if out:
LOGGER.debug('%s: command stdout: %s', str(uuid), out)
if err:
LOGGER.debug('%s: command stderr: %s', str(uuid), err)
return CommandStatus(popen.returncode, out, err) | python | def _run_command(
command,
input_data=None,
stdin=None,
out_pipe=subprocess.PIPE,
err_pipe=subprocess.PIPE,
env=None,
uuid=None,
**kwargs
):
"""
Runs a command
Args:
command(list of str): args of the command to execute, including the
command itself as command[0] as `['ls', '-l']`
input_data(str): If passed, will feed that data to the subprocess
through stdin
out_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdout
stdin(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdin
err_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stderr
env(dict of str:str): If set, will use the given dict as env for the
subprocess
uuid(uuid): If set the command will be logged with the given uuid
converted to string, otherwise, a uuid v4 will be generated.
**kwargs: Any other keyword args passed will be passed to the
:ref:subprocess.Popen call
Returns:
lago.utils.CommandStatus: result of the interactive execution
"""
# add libexec to PATH if needed
if uuid is None:
uuid = uuid_m.uuid4()
if constants.LIBEXEC_DIR not in os.environ['PATH'].split(':'):
os.environ['PATH'
] = '%s:%s' % (constants.LIBEXEC_DIR, os.environ['PATH'])
if input_data and not stdin:
kwargs['stdin'] = subprocess.PIPE
elif stdin:
kwargs['stdin'] = stdin
if env is None:
env = os.environ.copy()
else:
env['PATH'] = ':'.join(
list(
set(
env.get('PATH', '').split(':') + os.environ['PATH']
.split(':')
),
),
)
popen = subprocess.Popen(
' '.join('"%s"' % arg for arg in command),
stdout=out_pipe,
stderr=err_pipe,
shell=True,
env=env,
**kwargs
)
out, err = popen.communicate(input_data)
LOGGER.debug(
'%s: command exit with return code: %d', str(uuid), popen.returncode
)
if out:
LOGGER.debug('%s: command stdout: %s', str(uuid), out)
if err:
LOGGER.debug('%s: command stderr: %s', str(uuid), err)
return CommandStatus(popen.returncode, out, err) | ['def', '_run_command', '(', 'command', ',', 'input_data', '=', 'None', ',', 'stdin', '=', 'None', ',', 'out_pipe', '=', 'subprocess', '.', 'PIPE', ',', 'err_pipe', '=', 'subprocess', '.', 'PIPE', ',', 'env', '=', 'None', ',', 'uuid', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', '# add libexec to PATH if needed', 'if', 'uuid', 'is', 'None', ':', 'uuid', '=', 'uuid_m', '.', 'uuid4', '(', ')', 'if', 'constants', '.', 'LIBEXEC_DIR', 'not', 'in', 'os', '.', 'environ', '[', "'PATH'", ']', '.', 'split', '(', "':'", ')', ':', 'os', '.', 'environ', '[', "'PATH'", ']', '=', "'%s:%s'", '%', '(', 'constants', '.', 'LIBEXEC_DIR', ',', 'os', '.', 'environ', '[', "'PATH'", ']', ')', 'if', 'input_data', 'and', 'not', 'stdin', ':', 'kwargs', '[', "'stdin'", ']', '=', 'subprocess', '.', 'PIPE', 'elif', 'stdin', ':', 'kwargs', '[', "'stdin'", ']', '=', 'stdin', 'if', 'env', 'is', 'None', ':', 'env', '=', 'os', '.', 'environ', '.', 'copy', '(', ')', 'else', ':', 'env', '[', "'PATH'", ']', '=', "':'", '.', 'join', '(', 'list', '(', 'set', '(', 'env', '.', 'get', '(', "'PATH'", ',', "''", ')', '.', 'split', '(', "':'", ')', '+', 'os', '.', 'environ', '[', "'PATH'", ']', '.', 'split', '(', "':'", ')', ')', ',', ')', ',', ')', 'popen', '=', 'subprocess', '.', 'Popen', '(', "' '", '.', 'join', '(', '\'"%s"\'', '%', 'arg', 'for', 'arg', 'in', 'command', ')', ',', 'stdout', '=', 'out_pipe', ',', 'stderr', '=', 'err_pipe', ',', 'shell', '=', 'True', ',', 'env', '=', 'env', ',', '*', '*', 'kwargs', ')', 'out', ',', 'err', '=', 'popen', '.', 'communicate', '(', 'input_data', ')', 'LOGGER', '.', 'debug', '(', "'%s: command exit with return code: %d'", ',', 'str', '(', 'uuid', ')', ',', 'popen', '.', 'returncode', ')', 'if', 'out', ':', 'LOGGER', '.', 'debug', '(', "'%s: command stdout: %s'", ',', 'str', '(', 'uuid', ')', ',', 'out', ')', 'if', 'err', ':', 'LOGGER', '.', 'debug', '(', "'%s: command stderr: %s'", ',', 'str', '(', 'uuid', ')', ',', 'err', ')', 'return', 'CommandStatus', '(', 'popen', '.', 'returncode', ',', 'out', ',', 'err', ')'] | Runs a command
Args:
command(list of str): args of the command to execute, including the
command itself as command[0] as `['ls', '-l']`
input_data(str): If passed, will feed that data to the subprocess
through stdin
out_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdout
stdin(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stdin
err_pipe(int or file): File descriptor as passed to
:ref:subprocess.Popen to use as stderr
env(dict of str:str): If set, will use the given dict as env for the
subprocess
uuid(uuid): If set the command will be logged with the given uuid
converted to string, otherwise, a uuid v4 will be generated.
**kwargs: Any other keyword args passed will be passed to the
:ref:subprocess.Popen call
Returns:
lago.utils.CommandStatus: result of the interactive execution | ['Runs', 'a', 'command'] | train | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/utils.py#L123-L199 |
2,427 | wummel/linkchecker | linkcheck/updater.py | check_update | def check_update ():
"""Return the following values:
(False, errmsg) - online version could not be determined
(True, None) - user has newest version
(True, (version, url string)) - update available
(True, (version, None)) - current version is newer than online version
"""
version, value = get_online_version()
if version is None:
# value is an error message
return False, value
if version == CurrentVersion:
# user has newest version
return True, None
if is_newer_version(version):
# value is an URL linking to the update package
return True, (version, value)
# user is running a local or development version
return True, (version, None) | python | def check_update ():
"""Return the following values:
(False, errmsg) - online version could not be determined
(True, None) - user has newest version
(True, (version, url string)) - update available
(True, (version, None)) - current version is newer than online version
"""
version, value = get_online_version()
if version is None:
# value is an error message
return False, value
if version == CurrentVersion:
# user has newest version
return True, None
if is_newer_version(version):
# value is an URL linking to the update package
return True, (version, value)
# user is running a local or development version
return True, (version, None) | ['def', 'check_update', '(', ')', ':', 'version', ',', 'value', '=', 'get_online_version', '(', ')', 'if', 'version', 'is', 'None', ':', '# value is an error message', 'return', 'False', ',', 'value', 'if', 'version', '==', 'CurrentVersion', ':', '# user has newest version', 'return', 'True', ',', 'None', 'if', 'is_newer_version', '(', 'version', ')', ':', '# value is an URL linking to the update package', 'return', 'True', ',', '(', 'version', ',', 'value', ')', '# user is running a local or development version', 'return', 'True', ',', '(', 'version', ',', 'None', ')'] | Return the following values:
(False, errmsg) - online version could not be determined
(True, None) - user has newest version
(True, (version, url string)) - update available
(True, (version, None)) - current version is newer than online version | ['Return', 'the', 'following', 'values', ':', '(', 'False', 'errmsg', ')', '-', 'online', 'version', 'could', 'not', 'be', 'determined', '(', 'True', 'None', ')', '-', 'user', 'has', 'newest', 'version', '(', 'True', '(', 'version', 'url', 'string', '))', '-', 'update', 'available', '(', 'True', '(', 'version', 'None', '))', '-', 'current', 'version', 'is', 'newer', 'than', 'online', 'version'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/updater.py#L36-L54 |
2,428 | gabrielfalcao/sure | sure/__init__.py | chainproperty | def chainproperty(func):
"""Extend sure with a custom chain property."""
func = assertionproperty(func)
setattr(AssertionBuilder, func.fget.__name__, func)
return func | python | def chainproperty(func):
"""Extend sure with a custom chain property."""
func = assertionproperty(func)
setattr(AssertionBuilder, func.fget.__name__, func)
return func | ['def', 'chainproperty', '(', 'func', ')', ':', 'func', '=', 'assertionproperty', '(', 'func', ')', 'setattr', '(', 'AssertionBuilder', ',', 'func', '.', 'fget', '.', '__name__', ',', 'func', ')', 'return', 'func'] | Extend sure with a custom chain property. | ['Extend', 'sure', 'with', 'a', 'custom', 'chain', 'property', '.'] | train | https://github.com/gabrielfalcao/sure/blob/ac23b6b87306ec502b8719534ab23965d97a95f9/sure/__init__.py#L941-L945 |
2,429 | uber/doubles | doubles/method_double.py | MethodDouble.add_allowance | def add_allowance(self, caller):
"""Adds a new allowance for the method.
:param: tuple caller: A tuple indicating where the method was called
:return: The new ``Allowance``.
:rtype: Allowance
"""
allowance = Allowance(self._target, self._method_name, caller)
self._allowances.insert(0, allowance)
return allowance | python | def add_allowance(self, caller):
"""Adds a new allowance for the method.
:param: tuple caller: A tuple indicating where the method was called
:return: The new ``Allowance``.
:rtype: Allowance
"""
allowance = Allowance(self._target, self._method_name, caller)
self._allowances.insert(0, allowance)
return allowance | ['def', 'add_allowance', '(', 'self', ',', 'caller', ')', ':', 'allowance', '=', 'Allowance', '(', 'self', '.', '_target', ',', 'self', '.', '_method_name', ',', 'caller', ')', 'self', '.', '_allowances', '.', 'insert', '(', '0', ',', 'allowance', ')', 'return', 'allowance'] | Adds a new allowance for the method.
:param: tuple caller: A tuple indicating where the method was called
:return: The new ``Allowance``.
:rtype: Allowance | ['Adds', 'a', 'new', 'allowance', 'for', 'the', 'method', '.'] | train | https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/method_double.py#L30-L40 |
2,430 | rstoneback/pysat | pysat/instruments/supermag_magnetometer.py | load | def load(fnames, tag='', sat_id=None):
""" Load the SuperMAG files
Parameters
-----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (str or NoneType)
Satellite ID for constellations, not used. (default=None)
Returns
--------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
"""
# Ensure that there are files to load
if len(fnames) <= 0 :
return pysat.DataFrame(None), pysat.Meta(None)
# Ensure that the files are in a list
if isinstance(fnames, str):
fnames = [fnames]
# Initialise the output data
data = pds.DataFrame()
baseline = list()
# Cycle through the files
for fname in fnames:
fname = fname[:-11] # Remove date index from end of filename
file_type = path.splitext(fname)[1].lower()
# Open and load the files for each file type
if file_type == ".csv":
if tag != "indices":
temp = load_csv_data(fname, tag)
else:
temp, bline = load_ascii_data(fname, tag)
if bline is not None:
baseline.append(bline)
# Save the loaded data in the output data structure
if len(temp.columns) > 0:
data = pds.concat([data, temp], axis=0)
del temp
# If data was loaded, update the meta data
if len(data.columns) > 0:
meta = pysat.Meta()
for cc in data.columns:
meta[cc] = update_smag_metadata(cc)
meta.info = {'baseline':format_baseline_list(baseline)}
else:
meta = pysat.Meta(None)
return data, meta | python | def load(fnames, tag='', sat_id=None):
""" Load the SuperMAG files
Parameters
-----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (str or NoneType)
Satellite ID for constellations, not used. (default=None)
Returns
--------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units
"""
# Ensure that there are files to load
if len(fnames) <= 0 :
return pysat.DataFrame(None), pysat.Meta(None)
# Ensure that the files are in a list
if isinstance(fnames, str):
fnames = [fnames]
# Initialise the output data
data = pds.DataFrame()
baseline = list()
# Cycle through the files
for fname in fnames:
fname = fname[:-11] # Remove date index from end of filename
file_type = path.splitext(fname)[1].lower()
# Open and load the files for each file type
if file_type == ".csv":
if tag != "indices":
temp = load_csv_data(fname, tag)
else:
temp, bline = load_ascii_data(fname, tag)
if bline is not None:
baseline.append(bline)
# Save the loaded data in the output data structure
if len(temp.columns) > 0:
data = pds.concat([data, temp], axis=0)
del temp
# If data was loaded, update the meta data
if len(data.columns) > 0:
meta = pysat.Meta()
for cc in data.columns:
meta[cc] = update_smag_metadata(cc)
meta.info = {'baseline':format_baseline_list(baseline)}
else:
meta = pysat.Meta(None)
return data, meta | ['def', 'load', '(', 'fnames', ',', 'tag', '=', "''", ',', 'sat_id', '=', 'None', ')', ':', '# Ensure that there are files to load', 'if', 'len', '(', 'fnames', ')', '<=', '0', ':', 'return', 'pysat', '.', 'DataFrame', '(', 'None', ')', ',', 'pysat', '.', 'Meta', '(', 'None', ')', '# Ensure that the files are in a list', 'if', 'isinstance', '(', 'fnames', ',', 'str', ')', ':', 'fnames', '=', '[', 'fnames', ']', '# Initialise the output data', 'data', '=', 'pds', '.', 'DataFrame', '(', ')', 'baseline', '=', 'list', '(', ')', '# Cycle through the files', 'for', 'fname', 'in', 'fnames', ':', 'fname', '=', 'fname', '[', ':', '-', '11', ']', '# Remove date index from end of filename', 'file_type', '=', 'path', '.', 'splitext', '(', 'fname', ')', '[', '1', ']', '.', 'lower', '(', ')', '# Open and load the files for each file type', 'if', 'file_type', '==', '".csv"', ':', 'if', 'tag', '!=', '"indices"', ':', 'temp', '=', 'load_csv_data', '(', 'fname', ',', 'tag', ')', 'else', ':', 'temp', ',', 'bline', '=', 'load_ascii_data', '(', 'fname', ',', 'tag', ')', 'if', 'bline', 'is', 'not', 'None', ':', 'baseline', '.', 'append', '(', 'bline', ')', '# Save the loaded data in the output data structure', 'if', 'len', '(', 'temp', '.', 'columns', ')', '>', '0', ':', 'data', '=', 'pds', '.', 'concat', '(', '[', 'data', ',', 'temp', ']', ',', 'axis', '=', '0', ')', 'del', 'temp', '# If data was loaded, update the meta data', 'if', 'len', '(', 'data', '.', 'columns', ')', '>', '0', ':', 'meta', '=', 'pysat', '.', 'Meta', '(', ')', 'for', 'cc', 'in', 'data', '.', 'columns', ':', 'meta', '[', 'cc', ']', '=', 'update_smag_metadata', '(', 'cc', ')', 'meta', '.', 'info', '=', '{', "'baseline'", ':', 'format_baseline_list', '(', 'baseline', ')', '}', 'else', ':', 'meta', '=', 'pysat', '.', 'Meta', '(', 'None', ')', 'return', 'data', ',', 'meta'] | Load the SuperMAG files
Parameters
-----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Denotes type of file to load. Accepted types are 'indices', 'all',
'stations', and '' (for just magnetometer measurements). (default='')
sat_id : (str or NoneType)
Satellite ID for constellations, not used. (default=None)
Returns
--------
data : (pandas.DataFrame)
Object containing satellite data
meta : (pysat.Meta)
Object containing metadata such as column names and units | ['Load', 'the', 'SuperMAG', 'files'] | train | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/supermag_magnetometer.py#L131-L195 |
2,431 | TkTech/Jawa | jawa/fields.py | FieldTable.create | def create(self, name: str, descriptor: str, value: Constant=None) -> Field:
"""
Creates a new field from `name` and `descriptor`. For example::
>>> from jawa.cf import ClassFile
>>> cf = ClassFile.create('BeerCounter')
>>> field = cf.fields.create('BeerCount', 'I')
To automatically create a static field, pass a value::
>>> from jawa.cf import ClassFile
>>> cf = ClassFile.create('BeerCounter')
>>> field = cf.fields.create(
... 'MaxBeer',
... 'I',
... cf.constants.create_integer(99)
... )
:param name: Name of the new field.
:param descriptor: Type descriptor of the new field.
:param value: Optional static value for the field.
"""
field = Field(self._cf)
name = self._cf.constants.create_utf8(name)
descriptor = self._cf.constants.create_utf8(descriptor)
field._name_index = name.index
field._descriptor_index = descriptor.index
field.access_flags.acc_public = True
if value is not None:
field.attributes.create(ConstantValueAttribute, value)
field.access_flags.acc_static = True
self.append(field)
return field | python | def create(self, name: str, descriptor: str, value: Constant=None) -> Field:
"""
Creates a new field from `name` and `descriptor`. For example::
>>> from jawa.cf import ClassFile
>>> cf = ClassFile.create('BeerCounter')
>>> field = cf.fields.create('BeerCount', 'I')
To automatically create a static field, pass a value::
>>> from jawa.cf import ClassFile
>>> cf = ClassFile.create('BeerCounter')
>>> field = cf.fields.create(
... 'MaxBeer',
... 'I',
... cf.constants.create_integer(99)
... )
:param name: Name of the new field.
:param descriptor: Type descriptor of the new field.
:param value: Optional static value for the field.
"""
field = Field(self._cf)
name = self._cf.constants.create_utf8(name)
descriptor = self._cf.constants.create_utf8(descriptor)
field._name_index = name.index
field._descriptor_index = descriptor.index
field.access_flags.acc_public = True
if value is not None:
field.attributes.create(ConstantValueAttribute, value)
field.access_flags.acc_static = True
self.append(field)
return field | ['def', 'create', '(', 'self', ',', 'name', ':', 'str', ',', 'descriptor', ':', 'str', ',', 'value', ':', 'Constant', '=', 'None', ')', '->', 'Field', ':', 'field', '=', 'Field', '(', 'self', '.', '_cf', ')', 'name', '=', 'self', '.', '_cf', '.', 'constants', '.', 'create_utf8', '(', 'name', ')', 'descriptor', '=', 'self', '.', '_cf', '.', 'constants', '.', 'create_utf8', '(', 'descriptor', ')', 'field', '.', '_name_index', '=', 'name', '.', 'index', 'field', '.', '_descriptor_index', '=', 'descriptor', '.', 'index', 'field', '.', 'access_flags', '.', 'acc_public', '=', 'True', 'if', 'value', 'is', 'not', 'None', ':', 'field', '.', 'attributes', '.', 'create', '(', 'ConstantValueAttribute', ',', 'value', ')', 'field', '.', 'access_flags', '.', 'acc_static', '=', 'True', 'self', '.', 'append', '(', 'field', ')', 'return', 'field'] | Creates a new field from `name` and `descriptor`. For example::
>>> from jawa.cf import ClassFile
>>> cf = ClassFile.create('BeerCounter')
>>> field = cf.fields.create('BeerCount', 'I')
To automatically create a static field, pass a value::
>>> from jawa.cf import ClassFile
>>> cf = ClassFile.create('BeerCounter')
>>> field = cf.fields.create(
... 'MaxBeer',
... 'I',
... cf.constants.create_integer(99)
... )
:param name: Name of the new field.
:param descriptor: Type descriptor of the new field.
:param value: Optional static value for the field. | ['Creates', 'a', 'new', 'field', 'from', 'name', 'and', 'descriptor', '.', 'For', 'example', '::'] | train | https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/fields.py#L110-L144 |
2,432 | hydraplatform/hydra-base | hydra_base/lib/data.py | get_dataset | def get_dataset(dataset_id,**kwargs):
"""
Get a single dataset, by ID
"""
user_id = int(kwargs.get('user_id'))
if dataset_id is None:
return None
try:
dataset_rs = db.DBSession.query(Dataset.id,
Dataset.type,
Dataset.unit_id,
Dataset.name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.value).label('value')).filter(
Dataset.id==dataset_id).outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.id,
DatasetOwner.user_id==user_id)).one()
rs_dict = dataset_rs._asdict()
#convert the value row into a string as it is returned as a binary
if dataset_rs.value is not None:
rs_dict['value'] = str(dataset_rs.value)
if dataset_rs.hidden == 'N' or (dataset_rs.hidden == 'Y' and dataset_rs.user_id is not None):
metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_id).all()
rs_dict['metadata'] = metadata
else:
rs_dict['metadata'] = []
except NoResultFound:
raise HydraError("Dataset %s does not exist."%(dataset_id))
dataset = namedtuple('Dataset', rs_dict.keys())(**rs_dict)
return dataset | python | def get_dataset(dataset_id,**kwargs):
"""
Get a single dataset, by ID
"""
user_id = int(kwargs.get('user_id'))
if dataset_id is None:
return None
try:
dataset_rs = db.DBSession.query(Dataset.id,
Dataset.type,
Dataset.unit_id,
Dataset.name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.value).label('value')).filter(
Dataset.id==dataset_id).outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.id,
DatasetOwner.user_id==user_id)).one()
rs_dict = dataset_rs._asdict()
#convert the value row into a string as it is returned as a binary
if dataset_rs.value is not None:
rs_dict['value'] = str(dataset_rs.value)
if dataset_rs.hidden == 'N' or (dataset_rs.hidden == 'Y' and dataset_rs.user_id is not None):
metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_id).all()
rs_dict['metadata'] = metadata
else:
rs_dict['metadata'] = []
except NoResultFound:
raise HydraError("Dataset %s does not exist."%(dataset_id))
dataset = namedtuple('Dataset', rs_dict.keys())(**rs_dict)
return dataset | ['def', 'get_dataset', '(', 'dataset_id', ',', '*', '*', 'kwargs', ')', ':', 'user_id', '=', 'int', '(', 'kwargs', '.', 'get', '(', "'user_id'", ')', ')', 'if', 'dataset_id', 'is', 'None', ':', 'return', 'None', 'try', ':', 'dataset_rs', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'Dataset', '.', 'id', ',', 'Dataset', '.', 'type', ',', 'Dataset', '.', 'unit_id', ',', 'Dataset', '.', 'name', ',', 'Dataset', '.', 'hidden', ',', 'Dataset', '.', 'cr_date', ',', 'Dataset', '.', 'created_by', ',', 'DatasetOwner', '.', 'user_id', ',', 'null', '(', ')', '.', 'label', '(', "'metadata'", ')', ',', 'case', '(', '[', '(', 'and_', '(', 'Dataset', '.', 'hidden', '==', "'Y'", ',', 'DatasetOwner', '.', 'user_id', 'is', 'not', 'None', ')', ',', 'None', ')', ']', ',', 'else_', '=', 'Dataset', '.', 'value', ')', '.', 'label', '(', "'value'", ')', ')', '.', 'filter', '(', 'Dataset', '.', 'id', '==', 'dataset_id', ')', '.', 'outerjoin', '(', 'DatasetOwner', ',', 'and_', '(', 'DatasetOwner', '.', 'dataset_id', '==', 'Dataset', '.', 'id', ',', 'DatasetOwner', '.', 'user_id', '==', 'user_id', ')', ')', '.', 'one', '(', ')', 'rs_dict', '=', 'dataset_rs', '.', '_asdict', '(', ')', '#convert the value row into a string as it is returned as a binary', 'if', 'dataset_rs', '.', 'value', 'is', 'not', 'None', ':', 'rs_dict', '[', "'value'", ']', '=', 'str', '(', 'dataset_rs', '.', 'value', ')', 'if', 'dataset_rs', '.', 'hidden', '==', "'N'", 'or', '(', 'dataset_rs', '.', 'hidden', '==', "'Y'", 'and', 'dataset_rs', '.', 'user_id', 'is', 'not', 'None', ')', ':', 'metadata', '=', 'db', '.', 'DBSession', '.', 'query', '(', 'Metadata', ')', '.', 'filter', '(', 'Metadata', '.', 'dataset_id', '==', 'dataset_id', ')', '.', 'all', '(', ')', 'rs_dict', '[', "'metadata'", ']', '=', 'metadata', 'else', ':', 'rs_dict', '[', "'metadata'", ']', '=', '[', ']', 'except', 'NoResultFound', ':', 'raise', 'HydraError', '(', '"Dataset %s does not exist."', '%', '(', 'dataset_id', ')', ')', 'dataset', '=', 'namedtuple', '(', "'Dataset'", ',', 'rs_dict', '.', 'keys', '(', ')', ')', '(', '*', '*', 'rs_dict', ')', 'return', 'dataset'] | Get a single dataset, by ID | ['Get', 'a', 'single', 'dataset', 'by', 'ID'] | train | https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/data.py#L63-L106 |
2,433 | cltk/cltk | cltk/corpus/readers.py | TesseraeCorpusReader.lines | def lines(self: object, fileids: str, plaintext: bool = True):
"""
Tokenizes documents in the corpus by line
"""
for text in self.texts(fileids, plaintext):
text = re.sub(r'\n\s*\n', '\n', text, re.MULTILINE) # Remove blank lines
for line in text.split('\n'):
yield line | python | def lines(self: object, fileids: str, plaintext: bool = True):
"""
Tokenizes documents in the corpus by line
"""
for text in self.texts(fileids, plaintext):
text = re.sub(r'\n\s*\n', '\n', text, re.MULTILINE) # Remove blank lines
for line in text.split('\n'):
yield line | ['def', 'lines', '(', 'self', ':', 'object', ',', 'fileids', ':', 'str', ',', 'plaintext', ':', 'bool', '=', 'True', ')', ':', 'for', 'text', 'in', 'self', '.', 'texts', '(', 'fileids', ',', 'plaintext', ')', ':', 'text', '=', 're', '.', 'sub', '(', "r'\\n\\s*\\n'", ',', "'\\n'", ',', 'text', ',', 're', '.', 'MULTILINE', ')', '# Remove blank lines', 'for', 'line', 'in', 'text', '.', 'split', '(', "'\\n'", ')', ':', 'yield', 'line'] | Tokenizes documents in the corpus by line | ['Tokenizes', 'documents', 'in', 'the', 'corpus', 'by', 'line'] | train | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/readers.py#L444-L452 |
2,434 | jbeluch/xbmcswift2 | xbmcswift2/xbmcmixin.py | XBMCMixin._add_subtitles | def _add_subtitles(self, subtitles):
'''Adds subtitles to playing video.
:param subtitles: A URL to a remote subtitles file or a local filename
for a subtitles file.
.. warning:: You must start playing a video before calling this method
or it will loop for an indefinite length.
'''
# This method is named with an underscore to suggest that callers pass
# the subtitles argument to set_resolved_url instead of calling this
# method directly. This is to ensure a video is played before calling
# this method.
player = xbmc.Player()
for _ in xrange(30):
if player.isPlaying():
break
time.sleep(1)
else:
raise Exception('No video playing. Aborted after 30 seconds.')
player.setSubtitles(subtitles) | python | def _add_subtitles(self, subtitles):
'''Adds subtitles to playing video.
:param subtitles: A URL to a remote subtitles file or a local filename
for a subtitles file.
.. warning:: You must start playing a video before calling this method
or it will loop for an indefinite length.
'''
# This method is named with an underscore to suggest that callers pass
# the subtitles argument to set_resolved_url instead of calling this
# method directly. This is to ensure a video is played before calling
# this method.
player = xbmc.Player()
for _ in xrange(30):
if player.isPlaying():
break
time.sleep(1)
else:
raise Exception('No video playing. Aborted after 30 seconds.')
player.setSubtitles(subtitles) | ['def', '_add_subtitles', '(', 'self', ',', 'subtitles', ')', ':', '# This method is named with an underscore to suggest that callers pass', '# the subtitles argument to set_resolved_url instead of calling this', '# method directly. This is to ensure a video is played before calling', '# this method.', 'player', '=', 'xbmc', '.', 'Player', '(', ')', 'for', '_', 'in', 'xrange', '(', '30', ')', ':', 'if', 'player', '.', 'isPlaying', '(', ')', ':', 'break', 'time', '.', 'sleep', '(', '1', ')', 'else', ':', 'raise', 'Exception', '(', "'No video playing. Aborted after 30 seconds.'", ')', 'player', '.', 'setSubtitles', '(', 'subtitles', ')'] | Adds subtitles to playing video.
:param subtitles: A URL to a remote subtitles file or a local filename
for a subtitles file.
.. warning:: You must start playing a video before calling this method
or it will loop for an indefinite length. | ['Adds', 'subtitles', 'to', 'playing', 'video', '.'] | train | https://github.com/jbeluch/xbmcswift2/blob/0e7a3642499554edc8265fdf1ba6c5ee567daa78/xbmcswift2/xbmcmixin.py#L317-L338 |
2,435 | numberoverzero/bloop | bloop/conditions.py | iter_columns | def iter_columns(condition):
"""
Yield all columns in the condition or its inner conditions.
Unwraps proxies when the condition's column (or any of its values) include paths.
"""
# Like iter_conditions, this can't live in each condition without going possibly infinite on the
# recursion, or passing the visited set through every call. That makes the signature ugly, so we
# take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the
# actual columns.
visited = set()
for condition in iter_conditions(condition):
if condition.operation in ("and", "or", "not"):
continue
# Non-meta conditions always have a column, and each of values has the potential to be a column.
# Comparison will only have a list of len 1, but it's simpler to just iterate values and check each
# unwrap proxies created for paths
column = proxied(condition.column)
# special case for None
# this could also have skipped on isinstance(condition, Condition)
# but this is slightly more flexible for users to create their own None-sentinel Conditions
if column is None:
continue
if column not in visited:
visited.add(column)
yield column
for value in condition.values:
if isinstance(value, ComparisonMixin):
if value not in visited:
visited.add(value)
yield value | python | def iter_columns(condition):
"""
Yield all columns in the condition or its inner conditions.
Unwraps proxies when the condition's column (or any of its values) include paths.
"""
# Like iter_conditions, this can't live in each condition without going possibly infinite on the
# recursion, or passing the visited set through every call. That makes the signature ugly, so we
# take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the
# actual columns.
visited = set()
for condition in iter_conditions(condition):
if condition.operation in ("and", "or", "not"):
continue
# Non-meta conditions always have a column, and each of values has the potential to be a column.
# Comparison will only have a list of len 1, but it's simpler to just iterate values and check each
# unwrap proxies created for paths
column = proxied(condition.column)
# special case for None
# this could also have skipped on isinstance(condition, Condition)
# but this is slightly more flexible for users to create their own None-sentinel Conditions
if column is None:
continue
if column not in visited:
visited.add(column)
yield column
for value in condition.values:
if isinstance(value, ComparisonMixin):
if value not in visited:
visited.add(value)
yield value | ['def', 'iter_columns', '(', 'condition', ')', ':', "# Like iter_conditions, this can't live in each condition without going possibly infinite on the", '# recursion, or passing the visited set through every call. That makes the signature ugly, so we', "# take care of it here. Luckily, it's pretty easy to leverage iter_conditions and just unpack the", '# actual columns.', 'visited', '=', 'set', '(', ')', 'for', 'condition', 'in', 'iter_conditions', '(', 'condition', ')', ':', 'if', 'condition', '.', 'operation', 'in', '(', '"and"', ',', '"or"', ',', '"not"', ')', ':', 'continue', '# Non-meta conditions always have a column, and each of values has the potential to be a column.', "# Comparison will only have a list of len 1, but it's simpler to just iterate values and check each", '# unwrap proxies created for paths', 'column', '=', 'proxied', '(', 'condition', '.', 'column', ')', '# special case for None', '# this could also have skipped on isinstance(condition, Condition)', '# but this is slightly more flexible for users to create their own None-sentinel Conditions', 'if', 'column', 'is', 'None', ':', 'continue', 'if', 'column', 'not', 'in', 'visited', ':', 'visited', '.', 'add', '(', 'column', ')', 'yield', 'column', 'for', 'value', 'in', 'condition', '.', 'values', ':', 'if', 'isinstance', '(', 'value', ',', 'ComparisonMixin', ')', ':', 'if', 'value', 'not', 'in', 'visited', ':', 'visited', '.', 'add', '(', 'value', ')', 'yield', 'value'] | Yield all columns in the condition or its inner conditions.
Unwraps proxies when the condition's column (or any of its values) include paths. | ['Yield', 'all', 'columns', 'in', 'the', 'condition', 'or', 'its', 'inner', 'conditions', '.'] | train | https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/conditions.py#L938-L970 |
2,436 | jbaiter/gphoto2-cffi | gphoto2cffi/backend.py | _logging_callback | def _logging_callback(level, domain, message, data):
""" Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused)
"""
domain = ffi.string(domain).decode()
message = ffi.string(message).decode()
logger = LOGGER.getChild(domain)
if level not in LOG_LEVELS:
return
logger.log(LOG_LEVELS[level], message) | python | def _logging_callback(level, domain, message, data):
""" Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused)
"""
domain = ffi.string(domain).decode()
message = ffi.string(message).decode()
logger = LOGGER.getChild(domain)
if level not in LOG_LEVELS:
return
logger.log(LOG_LEVELS[level], message) | ['def', '_logging_callback', '(', 'level', ',', 'domain', ',', 'message', ',', 'data', ')', ':', 'domain', '=', 'ffi', '.', 'string', '(', 'domain', ')', '.', 'decode', '(', ')', 'message', '=', 'ffi', '.', 'string', '(', 'message', ')', '.', 'decode', '(', ')', 'logger', '=', 'LOGGER', '.', 'getChild', '(', 'domain', ')', 'if', 'level', 'not', 'in', 'LOG_LEVELS', ':', 'return', 'logger', '.', 'log', '(', 'LOG_LEVELS', '[', 'level', ']', ',', 'message', ')'] | Callback that outputs libgphoto2's logging message via
Python's standard logging facilities.
:param level: libgphoto2 logging level
:param domain: component the message originates from
:param message: logging message
:param data: Other data in the logging record (unused) | ['Callback', 'that', 'outputs', 'libgphoto2', 's', 'logging', 'message', 'via', 'Python', 's', 'standard', 'logging', 'facilities', '.'] | train | https://github.com/jbaiter/gphoto2-cffi/blob/2876d15a58174bd24613cd4106a3ef0cefd48050/gphoto2cffi/backend.py#L75-L90 |
2,437 | google/grr | grr/core/grr_response_core/lib/rdfvalues/crypto.py | SignedBlob.Sign | def Sign(self, data, signing_key, verify_key=None):
"""Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining.
"""
if signing_key.KeyLen() < 2048:
logging.warning("signing key is too short.")
self.signature = signing_key.Sign(data)
self.signature_type = self.SignatureType.RSA_PKCS1v15
self.digest = hashlib.sha256(data).digest()
self.digest_type = self.HashType.SHA256
self.data = data
# Test we can verify before we send it off.
if verify_key is None:
verify_key = signing_key.GetPublicKey()
# Verify our own data.
self.Verify(verify_key)
return self | python | def Sign(self, data, signing_key, verify_key=None):
"""Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining.
"""
if signing_key.KeyLen() < 2048:
logging.warning("signing key is too short.")
self.signature = signing_key.Sign(data)
self.signature_type = self.SignatureType.RSA_PKCS1v15
self.digest = hashlib.sha256(data).digest()
self.digest_type = self.HashType.SHA256
self.data = data
# Test we can verify before we send it off.
if verify_key is None:
verify_key = signing_key.GetPublicKey()
# Verify our own data.
self.Verify(verify_key)
return self | ['def', 'Sign', '(', 'self', ',', 'data', ',', 'signing_key', ',', 'verify_key', '=', 'None', ')', ':', 'if', 'signing_key', '.', 'KeyLen', '(', ')', '<', '2048', ':', 'logging', '.', 'warning', '(', '"signing key is too short."', ')', 'self', '.', 'signature', '=', 'signing_key', '.', 'Sign', '(', 'data', ')', 'self', '.', 'signature_type', '=', 'self', '.', 'SignatureType', '.', 'RSA_PKCS1v15', 'self', '.', 'digest', '=', 'hashlib', '.', 'sha256', '(', 'data', ')', '.', 'digest', '(', ')', 'self', '.', 'digest_type', '=', 'self', '.', 'HashType', '.', 'SHA256', 'self', '.', 'data', '=', 'data', '# Test we can verify before we send it off.', 'if', 'verify_key', 'is', 'None', ':', 'verify_key', '=', 'signing_key', '.', 'GetPublicKey', '(', ')', '# Verify our own data.', 'self', '.', 'Verify', '(', 'verify_key', ')', 'return', 'self'] | Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining. | ['Use', 'the', 'data', 'to', 'sign', 'this', 'blob', '.'] | train | https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/crypto.py#L561-L591 |
2,438 | caseyjlaw/activegit | activegit/activegit.py | ActiveGit.initializerepo | def initializerepo(self):
""" Fill empty directory with products and make first commit """
try:
os.mkdir(self.repopath)
except OSError:
pass
cmd = self.repo.init(bare=self.bare, shared=self.shared)
if not self.bare:
self.write_testing_data([], [])
self.write_training_data([], [])
self.write_classifier(None)
cmd = self.repo.add('training.pkl')
cmd = self.repo.add('testing.pkl')
cmd = self.repo.add('classifier.pkl')
cmd = self.repo.commit(m='initial commit')
cmd = self.repo.tag('initial')
cmd = self.set_version('initial') | python | def initializerepo(self):
""" Fill empty directory with products and make first commit """
try:
os.mkdir(self.repopath)
except OSError:
pass
cmd = self.repo.init(bare=self.bare, shared=self.shared)
if not self.bare:
self.write_testing_data([], [])
self.write_training_data([], [])
self.write_classifier(None)
cmd = self.repo.add('training.pkl')
cmd = self.repo.add('testing.pkl')
cmd = self.repo.add('classifier.pkl')
cmd = self.repo.commit(m='initial commit')
cmd = self.repo.tag('initial')
cmd = self.set_version('initial') | ['def', 'initializerepo', '(', 'self', ')', ':', 'try', ':', 'os', '.', 'mkdir', '(', 'self', '.', 'repopath', ')', 'except', 'OSError', ':', 'pass', 'cmd', '=', 'self', '.', 'repo', '.', 'init', '(', 'bare', '=', 'self', '.', 'bare', ',', 'shared', '=', 'self', '.', 'shared', ')', 'if', 'not', 'self', '.', 'bare', ':', 'self', '.', 'write_testing_data', '(', '[', ']', ',', '[', ']', ')', 'self', '.', 'write_training_data', '(', '[', ']', ',', '[', ']', ')', 'self', '.', 'write_classifier', '(', 'None', ')', 'cmd', '=', 'self', '.', 'repo', '.', 'add', '(', "'training.pkl'", ')', 'cmd', '=', 'self', '.', 'repo', '.', 'add', '(', "'testing.pkl'", ')', 'cmd', '=', 'self', '.', 'repo', '.', 'add', '(', "'classifier.pkl'", ')', 'cmd', '=', 'self', '.', 'repo', '.', 'commit', '(', 'm', '=', "'initial commit'", ')', 'cmd', '=', 'self', '.', 'repo', '.', 'tag', '(', "'initial'", ')', 'cmd', '=', 'self', '.', 'set_version', '(', "'initial'", ')'] | Fill empty directory with products and make first commit | ['Fill', 'empty', 'directory', 'with', 'products', 'and', 'make', 'first', 'commit'] | train | https://github.com/caseyjlaw/activegit/blob/2b4a0ee0fecf13345b5257130ba98b48f46e1098/activegit/activegit.py#L54-L75 |
2,439 | gem/oq-engine | openquake/hmtk/seismicity/utils.py | decimal_year | def decimal_year(year, month, day):
"""
Allows to calculate the decimal year for a vector of dates
(TODO this is legacy code kept to maintain comparability with previous
declustering algorithms!)
:param year: year column from catalogue matrix
:type year: numpy.ndarray
:param month: month column from catalogue matrix
:type month: numpy.ndarray
:param day: day column from catalogue matrix
:type day: numpy.ndarray
:returns: decimal year column
:rtype: numpy.ndarray
"""
marker = np.array([0., 31., 59., 90., 120., 151., 181.,
212., 243., 273., 304., 334.])
tmonth = (month - 1).astype(int)
day_count = marker[tmonth] + day - 1.
dec_year = year + (day_count / 365.)
return dec_year | python | def decimal_year(year, month, day):
"""
Allows to calculate the decimal year for a vector of dates
(TODO this is legacy code kept to maintain comparability with previous
declustering algorithms!)
:param year: year column from catalogue matrix
:type year: numpy.ndarray
:param month: month column from catalogue matrix
:type month: numpy.ndarray
:param day: day column from catalogue matrix
:type day: numpy.ndarray
:returns: decimal year column
:rtype: numpy.ndarray
"""
marker = np.array([0., 31., 59., 90., 120., 151., 181.,
212., 243., 273., 304., 334.])
tmonth = (month - 1).astype(int)
day_count = marker[tmonth] + day - 1.
dec_year = year + (day_count / 365.)
return dec_year | ['def', 'decimal_year', '(', 'year', ',', 'month', ',', 'day', ')', ':', 'marker', '=', 'np', '.', 'array', '(', '[', '0.', ',', '31.', ',', '59.', ',', '90.', ',', '120.', ',', '151.', ',', '181.', ',', '212.', ',', '243.', ',', '273.', ',', '304.', ',', '334.', ']', ')', 'tmonth', '=', '(', 'month', '-', '1', ')', '.', 'astype', '(', 'int', ')', 'day_count', '=', 'marker', '[', 'tmonth', ']', '+', 'day', '-', '1.', 'dec_year', '=', 'year', '+', '(', 'day_count', '/', '365.', ')', 'return', 'dec_year'] | Allows to calculate the decimal year for a vector of dates
(TODO this is legacy code kept to maintain comparability with previous
declustering algorithms!)
:param year: year column from catalogue matrix
:type year: numpy.ndarray
:param month: month column from catalogue matrix
:type month: numpy.ndarray
:param day: day column from catalogue matrix
:type day: numpy.ndarray
:returns: decimal year column
:rtype: numpy.ndarray | ['Allows', 'to', 'calculate', 'the', 'decimal', 'year', 'for', 'a', 'vector', 'of', 'dates', '(', 'TODO', 'this', 'is', 'legacy', 'code', 'kept', 'to', 'maintain', 'comparability', 'with', 'previous', 'declustering', 'algorithms!', ')'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/seismicity/utils.py#L105-L126 |
2,440 | robinandeer/puzzle | puzzle/plugins/gemini/mixins/case.py | CaseMixin.case | def case(self, case_id=None):
"""Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
case(Case): A Case object
"""
cases = self.cases()
if case_id:
for case in cases:
if case.case_id == case_id:
return case
else:
if cases:
return cases[0]
return None | python | def case(self, case_id=None):
"""Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
case(Case): A Case object
"""
cases = self.cases()
if case_id:
for case in cases:
if case.case_id == case_id:
return case
else:
if cases:
return cases[0]
return None | ['def', 'case', '(', 'self', ',', 'case_id', '=', 'None', ')', ':', 'cases', '=', 'self', '.', 'cases', '(', ')', 'if', 'case_id', ':', 'for', 'case', 'in', 'cases', ':', 'if', 'case', '.', 'case_id', '==', 'case_id', ':', 'return', 'case', 'else', ':', 'if', 'cases', ':', 'return', 'cases', '[', '0', ']', 'return', 'None'] | Return a Case object
If no case_id is given return one case
Args:
case_id (str): A case id
Returns:
case(Case): A Case object | ['Return', 'a', 'Case', 'object'] | train | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/case.py#L48-L68 |
2,441 | Bystroushaak/pyDHTMLParser | src/dhtmlparser/__init__.py | _parseDOM | def _parseDOM(istack):
"""
Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list.
"""
ostack = []
end_tag_index = 0
def neither_nonpair_or_end_or_comment(el):
return not (el.isNonPairTag() or el.isEndTag() or el.isComment())
index = 0
while index < len(istack):
el = istack[index]
# check if this is pair tag
end_tag_index = _indexOfEndTag(istack[index:])
if end_tag_index == 0 and neither_nonpair_or_end_or_comment(el):
el.isNonPairTag(True)
if end_tag_index == 0:
if not el.isEndTag():
ostack.append(el)
else:
el.childs = _parseDOM(istack[index + 1: end_tag_index + index])
el.endtag = istack[end_tag_index + index] # reference to endtag
el.endtag.openertag = el
ostack.append(el)
ostack.append(el.endtag)
index = end_tag_index + index
index += 1
return ostack | python | def _parseDOM(istack):
"""
Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list.
"""
ostack = []
end_tag_index = 0
def neither_nonpair_or_end_or_comment(el):
return not (el.isNonPairTag() or el.isEndTag() or el.isComment())
index = 0
while index < len(istack):
el = istack[index]
# check if this is pair tag
end_tag_index = _indexOfEndTag(istack[index:])
if end_tag_index == 0 and neither_nonpair_or_end_or_comment(el):
el.isNonPairTag(True)
if end_tag_index == 0:
if not el.isEndTag():
ostack.append(el)
else:
el.childs = _parseDOM(istack[index + 1: end_tag_index + index])
el.endtag = istack[end_tag_index + index] # reference to endtag
el.endtag.openertag = el
ostack.append(el)
ostack.append(el.endtag)
index = end_tag_index + index
index += 1
return ostack | ['def', '_parseDOM', '(', 'istack', ')', ':', 'ostack', '=', '[', ']', 'end_tag_index', '=', '0', 'def', 'neither_nonpair_or_end_or_comment', '(', 'el', ')', ':', 'return', 'not', '(', 'el', '.', 'isNonPairTag', '(', ')', 'or', 'el', '.', 'isEndTag', '(', ')', 'or', 'el', '.', 'isComment', '(', ')', ')', 'index', '=', '0', 'while', 'index', '<', 'len', '(', 'istack', ')', ':', 'el', '=', 'istack', '[', 'index', ']', '# check if this is pair tag', 'end_tag_index', '=', '_indexOfEndTag', '(', 'istack', '[', 'index', ':', ']', ')', 'if', 'end_tag_index', '==', '0', 'and', 'neither_nonpair_or_end_or_comment', '(', 'el', ')', ':', 'el', '.', 'isNonPairTag', '(', 'True', ')', 'if', 'end_tag_index', '==', '0', ':', 'if', 'not', 'el', '.', 'isEndTag', '(', ')', ':', 'ostack', '.', 'append', '(', 'el', ')', 'else', ':', 'el', '.', 'childs', '=', '_parseDOM', '(', 'istack', '[', 'index', '+', '1', ':', 'end_tag_index', '+', 'index', ']', ')', 'el', '.', 'endtag', '=', 'istack', '[', 'end_tag_index', '+', 'index', ']', '# reference to endtag', 'el', '.', 'endtag', '.', 'openertag', '=', 'el', 'ostack', '.', 'append', '(', 'el', ')', 'ostack', '.', 'append', '(', 'el', '.', 'endtag', ')', 'index', '=', 'end_tag_index', '+', 'index', 'index', '+=', '1', 'return', 'ostack'] | Recursively go through element array and create DOM.
Args:
istack (list): List of :class:`.HTMLElement` objects.
Returns:
list: DOM tree as list. | ['Recursively', 'go', 'through', 'element', 'array', 'and', 'create', 'DOM', '.'] | train | https://github.com/Bystroushaak/pyDHTMLParser/blob/4756f93dd048500b038ece2323fe26e46b6bfdea/src/dhtmlparser/__init__.py#L186-L227 |
2,442 | ghukill/pyfc4 | pyfc4/models.py | Repository.create_resource | def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") | python | def create_resource(self, resource_type=None, uri=None):
'''
Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type
'''
if resource_type in [NonRDFSource, Binary, BasicContainer, DirectContainer, IndirectContainer]:
return resource_type(self, uri)
else:
raise TypeError("expecting Resource type, such as BasicContainer or NonRDFSource") | ['def', 'create_resource', '(', 'self', ',', 'resource_type', '=', 'None', ',', 'uri', '=', 'None', ')', ':', 'if', 'resource_type', 'in', '[', 'NonRDFSource', ',', 'Binary', ',', 'BasicContainer', ',', 'DirectContainer', ',', 'IndirectContainer', ']', ':', 'return', 'resource_type', '(', 'self', ',', 'uri', ')', 'else', ':', 'raise', 'TypeError', '(', '"expecting Resource type, such as BasicContainer or NonRDFSource"', ')'] | Convenience method for creating a new resource
Note: A Resource is instantiated, but is not yet created. Still requires resource.create().
Args:
uri (rdflib.term.URIRef, str): uri of resource to create
resource_type (NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): resource type to create
Returns:
(NonRDFSource (Binary), BasicContainer, DirectContainer, IndirectContainer): instance of appropriate type | ['Convenience', 'method', 'for', 'creating', 'a', 'new', 'resource'] | train | https://github.com/ghukill/pyfc4/blob/59011df592f08978c4a901a908862d112a5dcf02/pyfc4/models.py#L141-L159 |
2,443 | saltstack/salt | salt/modules/minion.py | list_ | def list_():
'''
Return a list of accepted, denied, unaccepted and rejected keys.
This is the same output as `salt-key -L`
CLI Example:
.. code-block:: bash
salt 'master' minion.list
'''
pki_dir = __salt__['config.get']('pki_dir', '')
# We have to replace the minion/master directories
pki_dir = pki_dir.replace('minion', 'master')
# The source code below is (nearly) a copy of salt.key.Key.list_keys
key_dirs = _check_minions_directories(pki_dir)
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, IOError):
# key dir kind is not created yet, just skip
continue
return ret | python | def list_():
'''
Return a list of accepted, denied, unaccepted and rejected keys.
This is the same output as `salt-key -L`
CLI Example:
.. code-block:: bash
salt 'master' minion.list
'''
pki_dir = __salt__['config.get']('pki_dir', '')
# We have to replace the minion/master directories
pki_dir = pki_dir.replace('minion', 'master')
# The source code below is (nearly) a copy of salt.key.Key.list_keys
key_dirs = _check_minions_directories(pki_dir)
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.data.sorted_ignorecase(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, IOError):
# key dir kind is not created yet, just skip
continue
return ret | ['def', 'list_', '(', ')', ':', 'pki_dir', '=', '__salt__', '[', "'config.get'", ']', '(', "'pki_dir'", ',', "''", ')', '# We have to replace the minion/master directories', 'pki_dir', '=', 'pki_dir', '.', 'replace', '(', "'minion'", ',', "'master'", ')', '# The source code below is (nearly) a copy of salt.key.Key.list_keys', 'key_dirs', '=', '_check_minions_directories', '(', 'pki_dir', ')', 'ret', '=', '{', '}', 'for', 'dir_', 'in', 'key_dirs', ':', 'ret', '[', 'os', '.', 'path', '.', 'basename', '(', 'dir_', ')', ']', '=', '[', ']', 'try', ':', 'for', 'fn_', 'in', 'salt', '.', 'utils', '.', 'data', '.', 'sorted_ignorecase', '(', 'os', '.', 'listdir', '(', 'dir_', ')', ')', ':', 'if', 'not', 'fn_', '.', 'startswith', '(', "'.'", ')', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'os', '.', 'path', '.', 'join', '(', 'dir_', ',', 'fn_', ')', ')', ':', 'ret', '[', 'os', '.', 'path', '.', 'basename', '(', 'dir_', ')', ']', '.', 'append', '(', 'fn_', ')', 'except', '(', 'OSError', ',', 'IOError', ')', ':', '# key dir kind is not created yet, just skip', 'continue', 'return', 'ret'] | Return a list of accepted, denied, unaccepted and rejected keys.
This is the same output as `salt-key -L`
CLI Example:
.. code-block:: bash
salt 'master' minion.list | ['Return', 'a', 'list', 'of', 'accepted', 'denied', 'unaccepted', 'and', 'rejected', 'keys', '.', 'This', 'is', 'the', 'same', 'output', 'as', 'salt', '-', 'key', '-', 'L'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/minion.py#L26-L58 |
2,444 | poppy-project/pypot | pypot/dynamixel/__init__.py | get_port_vendor_info | def get_port_vendor_info(port=None):
""" Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' """
port_info_dict = dict((x[0], x[2]) for x in serial.tools.list_ports.comports())
return port_info_dict[port] if port is not None else port_info_dict | python | def get_port_vendor_info(port=None):
""" Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' """
port_info_dict = dict((x[0], x[2]) for x in serial.tools.list_ports.comports())
return port_info_dict[port] if port is not None else port_info_dict | ['def', 'get_port_vendor_info', '(', 'port', '=', 'None', ')', ':', 'port_info_dict', '=', 'dict', '(', '(', 'x', '[', '0', ']', ',', 'x', '[', '2', ']', ')', 'for', 'x', 'in', 'serial', '.', 'tools', '.', 'list_ports', '.', 'comports', '(', ')', ')', 'return', 'port_info_dict', '[', 'port', ']', 'if', 'port', 'is', 'not', 'None', 'else', 'port_info_dict'] | Return vendor informations of a usb2serial device.
It may depends on the Operating System.
:param string port: port of the usb2serial device
:Example:
Result with a USB2Dynamixel on Linux:
In [1]: import pypot.dynamixel
In [2]: pypot.dynamixel.get_port_vendor_info('/dev/ttyUSB0')
Out[2]: 'USB VID:PID=0403:6001 SNR=A7005LKE' | ['Return', 'vendor', 'informations', 'of', 'a', 'usb2serial', 'device', '.', 'It', 'may', 'depends', 'on', 'the', 'Operating', 'System', '.', ':', 'param', 'string', 'port', ':', 'port', 'of', 'the', 'usb2serial', 'device'] | train | https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/dynamixel/__init__.py#L58-L71 |
2,445 | andrewsnowden/dota2py | dota2py/summary.py | debug_dump | def debug_dump(message, file_prefix="dump"):
"""
Utility while developing to dump message data to play with in the
interpreter
"""
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() | python | def debug_dump(message, file_prefix="dump"):
"""
Utility while developing to dump message data to play with in the
interpreter
"""
global index
index += 1
with open("%s_%s.dump" % (file_prefix, index), 'w') as f:
f.write(message.SerializeToString())
f.close() | ['def', 'debug_dump', '(', 'message', ',', 'file_prefix', '=', '"dump"', ')', ':', 'global', 'index', 'index', '+=', '1', 'with', 'open', '(', '"%s_%s.dump"', '%', '(', 'file_prefix', ',', 'index', ')', ',', "'w'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'message', '.', 'SerializeToString', '(', ')', ')', 'f', '.', 'close', '(', ')'] | Utility while developing to dump message data to play with in the
interpreter | ['Utility', 'while', 'developing', 'to', 'dump', 'message', 'data', 'to', 'play', 'with', 'in', 'the', 'interpreter'] | train | https://github.com/andrewsnowden/dota2py/blob/67637f4b9c160ea90c11b7e81545baf350affa7a/dota2py/summary.py#L14-L25 |
2,446 | spookylukey/django-paypal | paypal/pro/models.py | PayPalNVP.process | def process(self, request, item):
"""Do a direct payment."""
warn_untested()
from paypal.pro.helpers import PayPalWPP
wpp = PayPalWPP(request)
# Change the model information into a dict that PayPal can understand.
params = model_to_dict(self, exclude=self.ADMIN_FIELDS)
params['acct'] = self.acct
params['creditcardtype'] = self.creditcardtype
params['expdate'] = self.expdate
params['cvv2'] = self.cvv2
params.update(item)
# Create recurring payment:
if 'billingperiod' in params:
return wpp.createRecurringPaymentsProfile(params, direct=True)
# Create single payment:
else:
return wpp.doDirectPayment(params) | python | def process(self, request, item):
"""Do a direct payment."""
warn_untested()
from paypal.pro.helpers import PayPalWPP
wpp = PayPalWPP(request)
# Change the model information into a dict that PayPal can understand.
params = model_to_dict(self, exclude=self.ADMIN_FIELDS)
params['acct'] = self.acct
params['creditcardtype'] = self.creditcardtype
params['expdate'] = self.expdate
params['cvv2'] = self.cvv2
params.update(item)
# Create recurring payment:
if 'billingperiod' in params:
return wpp.createRecurringPaymentsProfile(params, direct=True)
# Create single payment:
else:
return wpp.doDirectPayment(params) | ['def', 'process', '(', 'self', ',', 'request', ',', 'item', ')', ':', 'warn_untested', '(', ')', 'from', 'paypal', '.', 'pro', '.', 'helpers', 'import', 'PayPalWPP', 'wpp', '=', 'PayPalWPP', '(', 'request', ')', '# Change the model information into a dict that PayPal can understand.', 'params', '=', 'model_to_dict', '(', 'self', ',', 'exclude', '=', 'self', '.', 'ADMIN_FIELDS', ')', 'params', '[', "'acct'", ']', '=', 'self', '.', 'acct', 'params', '[', "'creditcardtype'", ']', '=', 'self', '.', 'creditcardtype', 'params', '[', "'expdate'", ']', '=', 'self', '.', 'expdate', 'params', '[', "'cvv2'", ']', '=', 'self', '.', 'cvv2', 'params', '.', 'update', '(', 'item', ')', '# Create recurring payment:', 'if', "'billingperiod'", 'in', 'params', ':', 'return', 'wpp', '.', 'createRecurringPaymentsProfile', '(', 'params', ',', 'direct', '=', 'True', ')', '# Create single payment:', 'else', ':', 'return', 'wpp', '.', 'doDirectPayment', '(', 'params', ')'] | Do a direct payment. | ['Do', 'a', 'direct', 'payment', '.'] | train | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/models.py#L130-L150 |
2,447 | payu-org/payu | payu/models/um.py | date_to_um_dump_date | def date_to_um_dump_date(date):
"""
Convert a time date object to a um dump format date which is
<decade><year><month><day>0
To accommodate two digit months and days the UM uses letters. e.g. 1st oct
is writing 01a10.
"""
assert(date.month <= 12)
decade = date.year // 10
# UM can only handle 36 decades then goes back to the beginning.
decade = decade % 36
year = date.year % 10
month = date.month
day = date.day
um_d = string.digits + string.ascii_letters[:26]
um_dump_date = (
'{decade}{year}{month}{day}0'.format(
decade=um_d[decade],
year=um_d[year],
month=um_d[month],
day=um_d[day]
)
)
return um_dump_date | python | def date_to_um_dump_date(date):
"""
Convert a time date object to a um dump format date which is
<decade><year><month><day>0
To accommodate two digit months and days the UM uses letters. e.g. 1st oct
is writing 01a10.
"""
assert(date.month <= 12)
decade = date.year // 10
# UM can only handle 36 decades then goes back to the beginning.
decade = decade % 36
year = date.year % 10
month = date.month
day = date.day
um_d = string.digits + string.ascii_letters[:26]
um_dump_date = (
'{decade}{year}{month}{day}0'.format(
decade=um_d[decade],
year=um_d[year],
month=um_d[month],
day=um_d[day]
)
)
return um_dump_date | ['def', 'date_to_um_dump_date', '(', 'date', ')', ':', 'assert', '(', 'date', '.', 'month', '<=', '12', ')', 'decade', '=', 'date', '.', 'year', '//', '10', '# UM can only handle 36 decades then goes back to the beginning.', 'decade', '=', 'decade', '%', '36', 'year', '=', 'date', '.', 'year', '%', '10', 'month', '=', 'date', '.', 'month', 'day', '=', 'date', '.', 'day', 'um_d', '=', 'string', '.', 'digits', '+', 'string', '.', 'ascii_letters', '[', ':', '26', ']', 'um_dump_date', '=', '(', "'{decade}{year}{month}{day}0'", '.', 'format', '(', 'decade', '=', 'um_d', '[', 'decade', ']', ',', 'year', '=', 'um_d', '[', 'year', ']', ',', 'month', '=', 'um_d', '[', 'month', ']', ',', 'day', '=', 'um_d', '[', 'day', ']', ')', ')', 'return', 'um_dump_date'] | Convert a time date object to a um dump format date which is
<decade><year><month><day>0
To accommodate two digit months and days the UM uses letters. e.g. 1st oct
is writing 01a10. | ['Convert', 'a', 'time', 'date', 'object', 'to', 'a', 'um', 'dump', 'format', 'date', 'which', 'is', '<decade', '>', '<year', '>', '<month', '>', '<day', '>', '0'] | train | https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/models/um.py#L181-L209 |
2,448 | cltk/cltk | cltk/prosody/latin/clausulae_analysis.py | Clausulae.clausulae_analysis | def clausulae_analysis(prosody):
"""
Return dictionary in which the key is a type of clausula and the value is its frequency.
:param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes.
:return: dictionary of prosody
"""
prosody = ''.join(prosody)
return {
'cretic + trochee': prosody.count('¯˘¯¯x'),
'4th paeon + trochee': prosody.count('˘˘˘¯¯x'),
'1st paeon + trochee': prosody.count('¯˘˘˘¯x'),
'substituted cretic + trochee': prosody.count('˘˘˘˘˘¯x'),
'1st paeon + anapest': prosody.count('¯˘˘˘˘˘x'),
'double cretic': prosody.count('¯˘¯¯˘x'),
'4th paeon + cretic': prosody.count('˘˘˘¯¯˘x'),
'molossus + cretic': prosody.count('¯¯¯¯˘x'),
'double trochee': prosody.count('¯˘¯x'),
'molossus + double trochee': prosody.count('¯¯¯¯˘¯x'),
'cretic + double trochee': prosody.count('¯˘¯¯˘¯x'),
'dactyl + double trochee': prosody.count('¯˘˘¯˘¯x'),
'choriamb + double trochee': prosody.count('¯˘˘¯¯˘¯x'),
'cretic + iamb': prosody.count('¯˘¯˘x'),
'molossus + iamb': prosody.count('¯¯¯˘x'),
'double spondee': prosody.count('¯¯¯x'),
'cretic + double spondee': prosody.count('¯˘¯¯¯¯x'),
'heroic': prosody.count('¯˘˘¯x')
} | python | def clausulae_analysis(prosody):
"""
Return dictionary in which the key is a type of clausula and the value is its frequency.
:param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes.
:return: dictionary of prosody
"""
prosody = ''.join(prosody)
return {
'cretic + trochee': prosody.count('¯˘¯¯x'),
'4th paeon + trochee': prosody.count('˘˘˘¯¯x'),
'1st paeon + trochee': prosody.count('¯˘˘˘¯x'),
'substituted cretic + trochee': prosody.count('˘˘˘˘˘¯x'),
'1st paeon + anapest': prosody.count('¯˘˘˘˘˘x'),
'double cretic': prosody.count('¯˘¯¯˘x'),
'4th paeon + cretic': prosody.count('˘˘˘¯¯˘x'),
'molossus + cretic': prosody.count('¯¯¯¯˘x'),
'double trochee': prosody.count('¯˘¯x'),
'molossus + double trochee': prosody.count('¯¯¯¯˘¯x'),
'cretic + double trochee': prosody.count('¯˘¯¯˘¯x'),
'dactyl + double trochee': prosody.count('¯˘˘¯˘¯x'),
'choriamb + double trochee': prosody.count('¯˘˘¯¯˘¯x'),
'cretic + iamb': prosody.count('¯˘¯˘x'),
'molossus + iamb': prosody.count('¯¯¯˘x'),
'double spondee': prosody.count('¯¯¯x'),
'cretic + double spondee': prosody.count('¯˘¯¯¯¯x'),
'heroic': prosody.count('¯˘˘¯x')
} | ['def', 'clausulae_analysis', '(', 'prosody', ')', ':', 'prosody', '=', "''", '.', 'join', '(', 'prosody', ')', 'return', '{', "'cretic + trochee'", ':', 'prosody', '.', 'count', '(', "'¯˘¯¯x'),", '', '', "'4th paeon + trochee'", ':', 'prosody', '.', 'count', '(', "'˘˘˘¯¯x'),", '', '', "'1st paeon + trochee'", ':', 'prosody', '.', 'count', '(', "'¯˘˘˘¯x'),", '', '', "'substituted cretic + trochee'", ':', 'prosody', '.', 'count', '(', "'˘˘˘˘˘¯x'),", '', '', "'1st paeon + anapest'", ':', 'prosody', '.', 'count', '(', "'¯˘˘˘˘˘x'),", '', '', "'double cretic'", ':', 'prosody', '.', 'count', '(', "'¯˘¯¯˘x'),", '', '', "'4th paeon + cretic'", ':', 'prosody', '.', 'count', '(', "'˘˘˘¯¯˘x'),", '', '', "'molossus + cretic'", ':', 'prosody', '.', 'count', '(', "'¯¯¯¯˘x'),", '', '', "'double trochee'", ':', 'prosody', '.', 'count', '(', "'¯˘¯x'),", '', '', "'molossus + double trochee'", ':', 'prosody', '.', 'count', '(', "'¯¯¯¯˘¯x'),", '', '', "'cretic + double trochee'", ':', 'prosody', '.', 'count', '(', "'¯˘¯¯˘¯x'),", '', '', "'dactyl + double trochee'", ':', 'prosody', '.', 'count', '(', "'¯˘˘¯˘¯x'),", '', '', "'choriamb + double trochee'", ':', 'prosody', '.', 'count', '(', "'¯˘˘¯¯˘¯x'),", '', '', "'cretic + iamb'", ':', 'prosody', '.', 'count', '(', "'¯˘¯˘x'),", '', '', "'molossus + iamb'", ':', 'prosody', '.', 'count', '(', "'¯¯¯˘x'),", '', '', "'double spondee'", ':', 'prosody', '.', 'count', '(', "'¯¯¯x'),", '', '', "'cretic + double spondee'", ':', 'prosody', '.', 'count', '(', "'¯˘¯¯¯¯x'),", '', '', "'heroic'", ':', 'prosody', '.', 'count', '(', "'¯˘˘¯x')", '', '}'] | Return dictionary in which the key is a type of clausula and the value is its frequency.
:param prosody: the prosody of a prose text (must be in the format of the scansion produced by the scanner classes.
:return: dictionary of prosody | ['Return', 'dictionary', 'in', 'which', 'the', 'key', 'is', 'a', 'type', 'of', 'clausula', 'and', 'the', 'value', 'is', 'its', 'frequency', '.', ':', 'param', 'prosody', ':', 'the', 'prosody', 'of', 'a', 'prose', 'text', '(', 'must', 'be', 'in', 'the', 'format', 'of', 'the', 'scansion', 'produced', 'by', 'the', 'scanner', 'classes', '.', ':', 'return', ':', 'dictionary', 'of', 'prosody'] | train | https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/prosody/latin/clausulae_analysis.py#L21-L49 |
2,449 | awslabs/aws-shell | awsshell/app.py | AWSShell.load_config | def load_config(self):
"""Load the config from the config file or template."""
config = Config()
self.config_obj = config.load('awsshellrc')
self.config_section = self.config_obj['aws-shell']
self.model_completer.match_fuzzy = self.config_section.as_bool(
'match_fuzzy')
self.enable_vi_bindings = self.config_section.as_bool(
'enable_vi_bindings')
self.show_completion_columns = self.config_section.as_bool(
'show_completion_columns')
self.show_help = self.config_section.as_bool('show_help')
self.theme = self.config_section['theme'] | python | def load_config(self):
"""Load the config from the config file or template."""
config = Config()
self.config_obj = config.load('awsshellrc')
self.config_section = self.config_obj['aws-shell']
self.model_completer.match_fuzzy = self.config_section.as_bool(
'match_fuzzy')
self.enable_vi_bindings = self.config_section.as_bool(
'enable_vi_bindings')
self.show_completion_columns = self.config_section.as_bool(
'show_completion_columns')
self.show_help = self.config_section.as_bool('show_help')
self.theme = self.config_section['theme'] | ['def', 'load_config', '(', 'self', ')', ':', 'config', '=', 'Config', '(', ')', 'self', '.', 'config_obj', '=', 'config', '.', 'load', '(', "'awsshellrc'", ')', 'self', '.', 'config_section', '=', 'self', '.', 'config_obj', '[', "'aws-shell'", ']', 'self', '.', 'model_completer', '.', 'match_fuzzy', '=', 'self', '.', 'config_section', '.', 'as_bool', '(', "'match_fuzzy'", ')', 'self', '.', 'enable_vi_bindings', '=', 'self', '.', 'config_section', '.', 'as_bool', '(', "'enable_vi_bindings'", ')', 'self', '.', 'show_completion_columns', '=', 'self', '.', 'config_section', '.', 'as_bool', '(', "'show_completion_columns'", ')', 'self', '.', 'show_help', '=', 'self', '.', 'config_section', '.', 'as_bool', '(', "'show_help'", ')', 'self', '.', 'theme', '=', 'self', '.', 'config_section', '[', "'theme'", ']'] | Load the config from the config file or template. | ['Load', 'the', 'config', 'from', 'the', 'config', 'file', 'or', 'template', '.'] | train | https://github.com/awslabs/aws-shell/blob/8950f03d9d720879890af6c11537b8f9789ce5a9/awsshell/app.py#L258-L270 |
2,450 | saltstack/salt | salt/states/esxdatacenter.py | datacenter_configured | def datacenter_configured(name):
'''
Makes sure a datacenter exists.
If the state is run by an ``esxdatacenter`` minion, the name of the
datacenter is retrieved from the proxy details, otherwise the datacenter
has the same name as the state.
Supported proxies: esxdatacenter
name:
Datacenter name. Ignored if the proxytype is ``esxdatacenter``.
'''
proxy_type = __salt__['vsphere.get_proxy_type']()
if proxy_type == 'esxdatacenter':
dc_name = __salt__['esxdatacenter.get_details']()['datacenter']
else:
dc_name = name
log.info('Running datacenter_configured for datacenter \'%s\'', dc_name)
ret = {'name': name,
'changes': {},
'result': None,
'comment': 'Default'}
comments = []
si = None
try:
si = __salt__['vsphere.get_service_instance_via_proxy']()
dcs = __salt__['vsphere.list_datacenters_via_proxy'](
datacenter_names=[dc_name], service_instance=si)
if not dcs:
if __opts__['test']:
comments.append('State will create '
'datacenter \'{0}\'.'.format(dc_name))
else:
log.debug('Creating datacenter \'%s\'', dc_name)
__salt__['vsphere.create_datacenter'](dc_name, si)
comments.append('Created datacenter \'{0}\'.'.format(dc_name))
log.info(comments[-1])
ret['changes'].update({'new': {'name': dc_name}})
else:
comments.append('Datacenter \'{0}\' already exists. Nothing to be '
'done.'.format(dc_name))
log.info(comments[-1])
__salt__['vsphere.disconnect'](si)
ret['comment'] = '\n'.join(comments)
ret['result'] = None if __opts__['test'] and ret['changes'] else True
return ret
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: %s', exc)
if si:
__salt__['vsphere.disconnect'](si)
ret.update({
'result': False if not __opts__['test'] else None,
'comment': six.text_type(exc)})
return ret | python | def datacenter_configured(name):
'''
Makes sure a datacenter exists.
If the state is run by an ``esxdatacenter`` minion, the name of the
datacenter is retrieved from the proxy details, otherwise the datacenter
has the same name as the state.
Supported proxies: esxdatacenter
name:
Datacenter name. Ignored if the proxytype is ``esxdatacenter``.
'''
proxy_type = __salt__['vsphere.get_proxy_type']()
if proxy_type == 'esxdatacenter':
dc_name = __salt__['esxdatacenter.get_details']()['datacenter']
else:
dc_name = name
log.info('Running datacenter_configured for datacenter \'%s\'', dc_name)
ret = {'name': name,
'changes': {},
'result': None,
'comment': 'Default'}
comments = []
si = None
try:
si = __salt__['vsphere.get_service_instance_via_proxy']()
dcs = __salt__['vsphere.list_datacenters_via_proxy'](
datacenter_names=[dc_name], service_instance=si)
if not dcs:
if __opts__['test']:
comments.append('State will create '
'datacenter \'{0}\'.'.format(dc_name))
else:
log.debug('Creating datacenter \'%s\'', dc_name)
__salt__['vsphere.create_datacenter'](dc_name, si)
comments.append('Created datacenter \'{0}\'.'.format(dc_name))
log.info(comments[-1])
ret['changes'].update({'new': {'name': dc_name}})
else:
comments.append('Datacenter \'{0}\' already exists. Nothing to be '
'done.'.format(dc_name))
log.info(comments[-1])
__salt__['vsphere.disconnect'](si)
ret['comment'] = '\n'.join(comments)
ret['result'] = None if __opts__['test'] and ret['changes'] else True
return ret
except salt.exceptions.CommandExecutionError as exc:
log.error('Error: %s', exc)
if si:
__salt__['vsphere.disconnect'](si)
ret.update({
'result': False if not __opts__['test'] else None,
'comment': six.text_type(exc)})
return ret | ['def', 'datacenter_configured', '(', 'name', ')', ':', 'proxy_type', '=', '__salt__', '[', "'vsphere.get_proxy_type'", ']', '(', ')', 'if', 'proxy_type', '==', "'esxdatacenter'", ':', 'dc_name', '=', '__salt__', '[', "'esxdatacenter.get_details'", ']', '(', ')', '[', "'datacenter'", ']', 'else', ':', 'dc_name', '=', 'name', 'log', '.', 'info', '(', "'Running datacenter_configured for datacenter \\'%s\\''", ',', 'dc_name', ')', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'None', ',', "'comment'", ':', "'Default'", '}', 'comments', '=', '[', ']', 'si', '=', 'None', 'try', ':', 'si', '=', '__salt__', '[', "'vsphere.get_service_instance_via_proxy'", ']', '(', ')', 'dcs', '=', '__salt__', '[', "'vsphere.list_datacenters_via_proxy'", ']', '(', 'datacenter_names', '=', '[', 'dc_name', ']', ',', 'service_instance', '=', 'si', ')', 'if', 'not', 'dcs', ':', 'if', '__opts__', '[', "'test'", ']', ':', 'comments', '.', 'append', '(', "'State will create '", "'datacenter \\'{0}\\'.'", '.', 'format', '(', 'dc_name', ')', ')', 'else', ':', 'log', '.', 'debug', '(', "'Creating datacenter \\'%s\\''", ',', 'dc_name', ')', '__salt__', '[', "'vsphere.create_datacenter'", ']', '(', 'dc_name', ',', 'si', ')', 'comments', '.', 'append', '(', "'Created datacenter \\'{0}\\'.'", '.', 'format', '(', 'dc_name', ')', ')', 'log', '.', 'info', '(', 'comments', '[', '-', '1', ']', ')', 'ret', '[', "'changes'", ']', '.', 'update', '(', '{', "'new'", ':', '{', "'name'", ':', 'dc_name', '}', '}', ')', 'else', ':', 'comments', '.', 'append', '(', "'Datacenter \\'{0}\\' already exists. Nothing to be '", "'done.'", '.', 'format', '(', 'dc_name', ')', ')', 'log', '.', 'info', '(', 'comments', '[', '-', '1', ']', ')', '__salt__', '[', "'vsphere.disconnect'", ']', '(', 'si', ')', 'ret', '[', "'comment'", ']', '=', "'\\n'", '.', 'join', '(', 'comments', ')', 'ret', '[', "'result'", ']', '=', 'None', 'if', '__opts__', '[', "'test'", ']', 'and', 'ret', '[', "'changes'", ']', 'else', 'True', 'return', 'ret', 'except', 'salt', '.', 'exceptions', '.', 'CommandExecutionError', 'as', 'exc', ':', 'log', '.', 'error', '(', "'Error: %s'", ',', 'exc', ')', 'if', 'si', ':', '__salt__', '[', "'vsphere.disconnect'", ']', '(', 'si', ')', 'ret', '.', 'update', '(', '{', "'result'", ':', 'False', 'if', 'not', '__opts__', '[', "'test'", ']', 'else', 'None', ',', "'comment'", ':', 'six', '.', 'text_type', '(', 'exc', ')', '}', ')', 'return', 'ret'] | Makes sure a datacenter exists.
If the state is run by an ``esxdatacenter`` minion, the name of the
datacenter is retrieved from the proxy details, otherwise the datacenter
has the same name as the state.
Supported proxies: esxdatacenter
name:
Datacenter name. Ignored if the proxytype is ``esxdatacenter``. | ['Makes', 'sure', 'a', 'datacenter', 'exists', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxdatacenter.py#L72-L126 |
2,451 | saltstack/salt | salt/modules/infoblox.py | get_host_ipv4 | def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts):
'''
Get ipv4 address from host record.
Use `allow_array` to return possible multiple values.
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_host_ipv4 host=localhost.domain.com
salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae
'''
data = get_host(name=name, mac=mac, **api_opts)
if data and 'ipv4addrs' in data:
l = []
for a in data['ipv4addrs']:
if 'ipv4addr' in a:
l.append(a['ipv4addr'])
if allow_array:
return l
if l:
return l[0]
return None | python | def get_host_ipv4(name=None, mac=None, allow_array=False, **api_opts):
'''
Get ipv4 address from host record.
Use `allow_array` to return possible multiple values.
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_host_ipv4 host=localhost.domain.com
salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae
'''
data = get_host(name=name, mac=mac, **api_opts)
if data and 'ipv4addrs' in data:
l = []
for a in data['ipv4addrs']:
if 'ipv4addr' in a:
l.append(a['ipv4addr'])
if allow_array:
return l
if l:
return l[0]
return None | ['def', 'get_host_ipv4', '(', 'name', '=', 'None', ',', 'mac', '=', 'None', ',', 'allow_array', '=', 'False', ',', '*', '*', 'api_opts', ')', ':', 'data', '=', 'get_host', '(', 'name', '=', 'name', ',', 'mac', '=', 'mac', ',', '*', '*', 'api_opts', ')', 'if', 'data', 'and', "'ipv4addrs'", 'in', 'data', ':', 'l', '=', '[', ']', 'for', 'a', 'in', 'data', '[', "'ipv4addrs'", ']', ':', 'if', "'ipv4addr'", 'in', 'a', ':', 'l', '.', 'append', '(', 'a', '[', "'ipv4addr'", ']', ')', 'if', 'allow_array', ':', 'return', 'l', 'if', 'l', ':', 'return', 'l', '[', '0', ']', 'return', 'None'] | Get ipv4 address from host record.
Use `allow_array` to return possible multiple values.
CLI Examples:
.. code-block:: bash
salt-call infoblox.get_host_ipv4 host=localhost.domain.com
salt-call infoblox.get_host_ipv4 mac=00:50:56:84:6e:ae | ['Get', 'ipv4', 'address', 'from', 'host', 'record', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/infoblox.py#L410-L433 |
2,452 | awslabs/serverless-application-model | samtranslator/swagger/swagger.py | SwaggerEditor.has_integration | def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
return self.has_path(path, method) and \
isinstance(path_dict[method], dict) and \
self.method_has_integration(path_dict[method]) | python | def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
return self.has_path(path, method) and \
isinstance(path_dict[method], dict) and \
self.method_has_integration(path_dict[method]) | ['def', 'has_integration', '(', 'self', ',', 'path', ',', 'method', ')', ':', 'method', '=', 'self', '.', '_normalize_method_name', '(', 'method', ')', 'path_dict', '=', 'self', '.', 'get_path', '(', 'path', ')', 'return', 'self', '.', 'has_path', '(', 'path', ',', 'method', ')', 'and', 'isinstance', '(', 'path_dict', '[', 'method', ']', ',', 'dict', ')', 'and', 'self', '.', 'method_has_integration', '(', 'path_dict', '[', 'method', ']', ')'] | Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present | ['Checks', 'if', 'an', 'API', 'Gateway', 'integration', 'is', 'already', 'present', 'at', 'the', 'given', 'path', '/', 'method'] | train | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/swagger/swagger.py#L99-L112 |
2,453 | DataDog/integrations-core | vsphere/datadog_checks/vsphere/objects_queue.py | ObjectsQueue.pop | def pop(self, key, resource_type):
"""
Extract an object from the list.
If the key is not in the cache, this will raise a KeyError.
If the list is empty, method will return None
"""
with self._objects_queue_lock:
objects = self._objects_queue[key].get(resource_type, [])
return objects.pop() if objects else None | python | def pop(self, key, resource_type):
"""
Extract an object from the list.
If the key is not in the cache, this will raise a KeyError.
If the list is empty, method will return None
"""
with self._objects_queue_lock:
objects = self._objects_queue[key].get(resource_type, [])
return objects.pop() if objects else None | ['def', 'pop', '(', 'self', ',', 'key', ',', 'resource_type', ')', ':', 'with', 'self', '.', '_objects_queue_lock', ':', 'objects', '=', 'self', '.', '_objects_queue', '[', 'key', ']', '.', 'get', '(', 'resource_type', ',', '[', ']', ')', 'return', 'objects', '.', 'pop', '(', ')', 'if', 'objects', 'else', 'None'] | Extract an object from the list.
If the key is not in the cache, this will raise a KeyError.
If the list is empty, method will return None | ['Extract', 'an', 'object', 'from', 'the', 'list', '.', 'If', 'the', 'key', 'is', 'not', 'in', 'the', 'cache', 'this', 'will', 'raise', 'a', 'KeyError', '.', 'If', 'the', 'list', 'is', 'empty', 'method', 'will', 'return', 'None'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/vsphere/datadog_checks/vsphere/objects_queue.py#L37-L45 |
2,454 | gwastro/pycbc | pycbc/workflow/segment.py | get_sci_segs_for_ifo | def get_sci_segs_for_ifo(ifo, cp, start_time, end_time, out_dir, tags=None):
"""
Obtain science segments for the selected ifo
Parameters
-----------
ifo : string
The string describing the ifo to obtain science times for.
start_time : gps time (either int/LIGOTimeGPS)
The time at which to begin searching for segments.
end_time : gps time (either int/LIGOTimeGPS)
The time at which to stop searching for segments.
out_dir : path
The directory in which output will be stored.
tag : string, optional (default=None)
Use this to specify a tag. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
Returns
--------
sci_segs : ligo.segments.segmentlist
The segmentlist generated by this call
sci_xml_file : pycbc.workflow.core.SegFile
The workflow File object corresponding to this science segments file.
out_sci_seg_name : string
The name of the output segment list in the output XML file.
"""
if tags is None:
tags = []
seg_valid_seg = segments.segment([start_time,end_time])
sci_seg_name = cp.get_opt_tags(
"workflow-segments", "segments-%s-science-name" %(ifo.lower()), tags)
sci_seg_url = cp.get_opt_tags(
"workflow-segments", "segments-database-url", tags)
# NOTE: ligolw_segment_query returns slightly strange output. The output
# segment list is put in with name "RESULT". So this is hardcoded here
out_sci_seg_name = "RESULT"
if tags:
sci_xml_file_path = os.path.join(
out_dir, "%s-SCIENCE_SEGMENTS_%s.xml" \
%(ifo.upper(), '_'.join(tags)))
tag_list=tags + ['SCIENCE']
else:
sci_xml_file_path = os.path.join(
out_dir, "%s-SCIENCE_SEGMENTS.xml" %(ifo.upper()) )
tag_list = ['SCIENCE']
if file_needs_generating(sci_xml_file_path, cp, tags=tags):
seg_find_call = [ resolve_url(cp.get("executables","segment_query"),
permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR),
"--query-segments",
"--segment-url", sci_seg_url,
"--gps-start-time", str(start_time),
"--gps-end-time", str(end_time),
"--include-segments", sci_seg_name,
"--output-file", sci_xml_file_path ]
make_external_call(seg_find_call, out_dir=os.path.join(out_dir,'logs'),
out_basename='%s-science-call' %(ifo.lower()) )
# Yes its yucky to generate a file and then read it back in.
sci_xml_file_path = os.path.abspath(sci_xml_file_path)
sci_xml_file = SegFile.from_segment_xml(sci_xml_file_path, tags=tag_list,
valid_segment=seg_valid_seg)
# NOTE: ligolw_segment_query returns slightly strange output. The output
# segment_summary output does not use RESULT. Therefore move the
# segment_summary across.
sci_xml_file.seg_summ_dict[ifo.upper() + ":" + out_sci_seg_name] = \
sci_xml_file.seg_summ_dict[':'.join(sci_seg_name.split(':')[0:2])]
sci_segs = sci_xml_file.return_union_seglist()
return sci_segs, sci_xml_file, out_sci_seg_name | python | def get_sci_segs_for_ifo(ifo, cp, start_time, end_time, out_dir, tags=None):
"""
Obtain science segments for the selected ifo
Parameters
-----------
ifo : string
The string describing the ifo to obtain science times for.
start_time : gps time (either int/LIGOTimeGPS)
The time at which to begin searching for segments.
end_time : gps time (either int/LIGOTimeGPS)
The time at which to stop searching for segments.
out_dir : path
The directory in which output will be stored.
tag : string, optional (default=None)
Use this to specify a tag. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
Returns
--------
sci_segs : ligo.segments.segmentlist
The segmentlist generated by this call
sci_xml_file : pycbc.workflow.core.SegFile
The workflow File object corresponding to this science segments file.
out_sci_seg_name : string
The name of the output segment list in the output XML file.
"""
if tags is None:
tags = []
seg_valid_seg = segments.segment([start_time,end_time])
sci_seg_name = cp.get_opt_tags(
"workflow-segments", "segments-%s-science-name" %(ifo.lower()), tags)
sci_seg_url = cp.get_opt_tags(
"workflow-segments", "segments-database-url", tags)
# NOTE: ligolw_segment_query returns slightly strange output. The output
# segment list is put in with name "RESULT". So this is hardcoded here
out_sci_seg_name = "RESULT"
if tags:
sci_xml_file_path = os.path.join(
out_dir, "%s-SCIENCE_SEGMENTS_%s.xml" \
%(ifo.upper(), '_'.join(tags)))
tag_list=tags + ['SCIENCE']
else:
sci_xml_file_path = os.path.join(
out_dir, "%s-SCIENCE_SEGMENTS.xml" %(ifo.upper()) )
tag_list = ['SCIENCE']
if file_needs_generating(sci_xml_file_path, cp, tags=tags):
seg_find_call = [ resolve_url(cp.get("executables","segment_query"),
permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR),
"--query-segments",
"--segment-url", sci_seg_url,
"--gps-start-time", str(start_time),
"--gps-end-time", str(end_time),
"--include-segments", sci_seg_name,
"--output-file", sci_xml_file_path ]
make_external_call(seg_find_call, out_dir=os.path.join(out_dir,'logs'),
out_basename='%s-science-call' %(ifo.lower()) )
# Yes its yucky to generate a file and then read it back in.
sci_xml_file_path = os.path.abspath(sci_xml_file_path)
sci_xml_file = SegFile.from_segment_xml(sci_xml_file_path, tags=tag_list,
valid_segment=seg_valid_seg)
# NOTE: ligolw_segment_query returns slightly strange output. The output
# segment_summary output does not use RESULT. Therefore move the
# segment_summary across.
sci_xml_file.seg_summ_dict[ifo.upper() + ":" + out_sci_seg_name] = \
sci_xml_file.seg_summ_dict[':'.join(sci_seg_name.split(':')[0:2])]
sci_segs = sci_xml_file.return_union_seglist()
return sci_segs, sci_xml_file, out_sci_seg_name | ['def', 'get_sci_segs_for_ifo', '(', 'ifo', ',', 'cp', ',', 'start_time', ',', 'end_time', ',', 'out_dir', ',', 'tags', '=', 'None', ')', ':', 'if', 'tags', 'is', 'None', ':', 'tags', '=', '[', ']', 'seg_valid_seg', '=', 'segments', '.', 'segment', '(', '[', 'start_time', ',', 'end_time', ']', ')', 'sci_seg_name', '=', 'cp', '.', 'get_opt_tags', '(', '"workflow-segments"', ',', '"segments-%s-science-name"', '%', '(', 'ifo', '.', 'lower', '(', ')', ')', ',', 'tags', ')', 'sci_seg_url', '=', 'cp', '.', 'get_opt_tags', '(', '"workflow-segments"', ',', '"segments-database-url"', ',', 'tags', ')', '# NOTE: ligolw_segment_query returns slightly strange output. The output', '# segment list is put in with name "RESULT". So this is hardcoded here', 'out_sci_seg_name', '=', '"RESULT"', 'if', 'tags', ':', 'sci_xml_file_path', '=', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', '"%s-SCIENCE_SEGMENTS_%s.xml"', '%', '(', 'ifo', '.', 'upper', '(', ')', ',', "'_'", '.', 'join', '(', 'tags', ')', ')', ')', 'tag_list', '=', 'tags', '+', '[', "'SCIENCE'", ']', 'else', ':', 'sci_xml_file_path', '=', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', '"%s-SCIENCE_SEGMENTS.xml"', '%', '(', 'ifo', '.', 'upper', '(', ')', ')', ')', 'tag_list', '=', '[', "'SCIENCE'", ']', 'if', 'file_needs_generating', '(', 'sci_xml_file_path', ',', 'cp', ',', 'tags', '=', 'tags', ')', ':', 'seg_find_call', '=', '[', 'resolve_url', '(', 'cp', '.', 'get', '(', '"executables"', ',', '"segment_query"', ')', ',', 'permissions', '=', 'stat', '.', 'S_IRUSR', '|', 'stat', '.', 'S_IWUSR', '|', 'stat', '.', 'S_IXUSR', ')', ',', '"--query-segments"', ',', '"--segment-url"', ',', 'sci_seg_url', ',', '"--gps-start-time"', ',', 'str', '(', 'start_time', ')', ',', '"--gps-end-time"', ',', 'str', '(', 'end_time', ')', ',', '"--include-segments"', ',', 'sci_seg_name', ',', '"--output-file"', ',', 'sci_xml_file_path', ']', 'make_external_call', '(', 'seg_find_call', ',', 'out_dir', '=', 'os', '.', 'path', '.', 'join', '(', 'out_dir', ',', "'logs'", ')', ',', 'out_basename', '=', "'%s-science-call'", '%', '(', 'ifo', '.', 'lower', '(', ')', ')', ')', '# Yes its yucky to generate a file and then read it back in.', 'sci_xml_file_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'sci_xml_file_path', ')', 'sci_xml_file', '=', 'SegFile', '.', 'from_segment_xml', '(', 'sci_xml_file_path', ',', 'tags', '=', 'tag_list', ',', 'valid_segment', '=', 'seg_valid_seg', ')', '# NOTE: ligolw_segment_query returns slightly strange output. The output', '# segment_summary output does not use RESULT. Therefore move the', '# segment_summary across.', 'sci_xml_file', '.', 'seg_summ_dict', '[', 'ifo', '.', 'upper', '(', ')', '+', '":"', '+', 'out_sci_seg_name', ']', '=', 'sci_xml_file', '.', 'seg_summ_dict', '[', "':'", '.', 'join', '(', 'sci_seg_name', '.', 'split', '(', "':'", ')', '[', '0', ':', '2', ']', ')', ']', 'sci_segs', '=', 'sci_xml_file', '.', 'return_union_seglist', '(', ')', 'return', 'sci_segs', ',', 'sci_xml_file', ',', 'out_sci_seg_name'] | Obtain science segments for the selected ifo
Parameters
-----------
ifo : string
The string describing the ifo to obtain science times for.
start_time : gps time (either int/LIGOTimeGPS)
The time at which to begin searching for segments.
end_time : gps time (either int/LIGOTimeGPS)
The time at which to stop searching for segments.
out_dir : path
The directory in which output will be stored.
tag : string, optional (default=None)
Use this to specify a tag. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
Returns
--------
sci_segs : ligo.segments.segmentlist
The segmentlist generated by this call
sci_xml_file : pycbc.workflow.core.SegFile
The workflow File object corresponding to this science segments file.
out_sci_seg_name : string
The name of the output segment list in the output XML file. | ['Obtain', 'science', 'segments', 'for', 'the', 'selected', 'ifo'] | train | https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/segment.py#L627-L702 |
2,455 | jsommers/switchyard | switchyard/lib/topo/topobuild.py | Topology.unserialize | def unserialize(jsonstr):
'''
Unserialize a JSON string representation of a topology
'''
topod = json.loads(jsonstr)
G = json_graph.node_link_graph(topod)
for n,ndict in G.nodes(data=True):
if 'nodeobj' not in ndict or 'type' not in ndict:
raise Exception("Required type information is not present in serialized node {} :{}".format(n, ndict))
nobj = ndict['nodeobj']
cls = eval(ndict['type'])
ndict['nodeobj'] = cls(**dict(nobj))
t = Topology(nxgraph=G)
return t | python | def unserialize(jsonstr):
'''
Unserialize a JSON string representation of a topology
'''
topod = json.loads(jsonstr)
G = json_graph.node_link_graph(topod)
for n,ndict in G.nodes(data=True):
if 'nodeobj' not in ndict or 'type' not in ndict:
raise Exception("Required type information is not present in serialized node {} :{}".format(n, ndict))
nobj = ndict['nodeobj']
cls = eval(ndict['type'])
ndict['nodeobj'] = cls(**dict(nobj))
t = Topology(nxgraph=G)
return t | ['def', 'unserialize', '(', 'jsonstr', ')', ':', 'topod', '=', 'json', '.', 'loads', '(', 'jsonstr', ')', 'G', '=', 'json_graph', '.', 'node_link_graph', '(', 'topod', ')', 'for', 'n', ',', 'ndict', 'in', 'G', '.', 'nodes', '(', 'data', '=', 'True', ')', ':', 'if', "'nodeobj'", 'not', 'in', 'ndict', 'or', "'type'", 'not', 'in', 'ndict', ':', 'raise', 'Exception', '(', '"Required type information is not present in serialized node {} :{}"', '.', 'format', '(', 'n', ',', 'ndict', ')', ')', 'nobj', '=', 'ndict', '[', "'nodeobj'", ']', 'cls', '=', 'eval', '(', 'ndict', '[', "'type'", ']', ')', 'ndict', '[', "'nodeobj'", ']', '=', 'cls', '(', '*', '*', 'dict', '(', 'nobj', ')', ')', 't', '=', 'Topology', '(', 'nxgraph', '=', 'G', ')', 'return', 't'] | Unserialize a JSON string representation of a topology | ['Unserialize', 'a', 'JSON', 'string', 'representation', 'of', 'a', 'topology'] | train | https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/topo/topobuild.py#L273-L286 |
2,456 | inasafe/inasafe | safe/common/utilities.py | temp_dir | def temp_dir(sub_dir='work'):
"""Obtain the temporary working directory for the operating system.
An inasafe subdirectory will automatically be created under this and
if specified, a user subdirectory under that.
.. note:: You can use this together with unique_filename to create
a file in a temporary directory under the inasafe workspace. e.g.
tmpdir = temp_dir('testing')
tmpfile = unique_filename(dir=tmpdir)
print tmpfile
/tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C
If you specify INASAFE_WORK_DIR as an environment var, it will be
used in preference to the system temp directory.
:param sub_dir: Optional argument which will cause an additional
subdirectory to be created e.g. /tmp/inasafe/foo/
:type sub_dir: str
:return: Path to the temp dir that is created.
:rtype: str
:raises: Any errors from the underlying system calls.
"""
user = getpass.getuser().replace(' ', '_')
current_date = date.today()
date_string = current_date.isoformat()
if 'INASAFE_WORK_DIR' in os.environ:
new_directory = os.environ['INASAFE_WORK_DIR']
else:
# Following 4 lines are a workaround for tempfile.tempdir()
# unreliabilty
handle, filename = mkstemp()
os.close(handle)
new_directory = os.path.dirname(filename)
os.remove(filename)
path = os.path.join(new_directory, 'inasafe', date_string, user, sub_dir)
if not os.path.exists(path):
# Ensure that the dir is world writable
# Umask sets the new mask and returns the old
old_mask = os.umask(0000)
os.makedirs(path, 0o777)
# Reinstate the old mask for tmp
os.umask(old_mask)
return path | python | def temp_dir(sub_dir='work'):
"""Obtain the temporary working directory for the operating system.
An inasafe subdirectory will automatically be created under this and
if specified, a user subdirectory under that.
.. note:: You can use this together with unique_filename to create
a file in a temporary directory under the inasafe workspace. e.g.
tmpdir = temp_dir('testing')
tmpfile = unique_filename(dir=tmpdir)
print tmpfile
/tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C
If you specify INASAFE_WORK_DIR as an environment var, it will be
used in preference to the system temp directory.
:param sub_dir: Optional argument which will cause an additional
subdirectory to be created e.g. /tmp/inasafe/foo/
:type sub_dir: str
:return: Path to the temp dir that is created.
:rtype: str
:raises: Any errors from the underlying system calls.
"""
user = getpass.getuser().replace(' ', '_')
current_date = date.today()
date_string = current_date.isoformat()
if 'INASAFE_WORK_DIR' in os.environ:
new_directory = os.environ['INASAFE_WORK_DIR']
else:
# Following 4 lines are a workaround for tempfile.tempdir()
# unreliabilty
handle, filename = mkstemp()
os.close(handle)
new_directory = os.path.dirname(filename)
os.remove(filename)
path = os.path.join(new_directory, 'inasafe', date_string, user, sub_dir)
if not os.path.exists(path):
# Ensure that the dir is world writable
# Umask sets the new mask and returns the old
old_mask = os.umask(0000)
os.makedirs(path, 0o777)
# Reinstate the old mask for tmp
os.umask(old_mask)
return path | ['def', 'temp_dir', '(', 'sub_dir', '=', "'work'", ')', ':', 'user', '=', 'getpass', '.', 'getuser', '(', ')', '.', 'replace', '(', "' '", ',', "'_'", ')', 'current_date', '=', 'date', '.', 'today', '(', ')', 'date_string', '=', 'current_date', '.', 'isoformat', '(', ')', 'if', "'INASAFE_WORK_DIR'", 'in', 'os', '.', 'environ', ':', 'new_directory', '=', 'os', '.', 'environ', '[', "'INASAFE_WORK_DIR'", ']', 'else', ':', '# Following 4 lines are a workaround for tempfile.tempdir()', '# unreliabilty', 'handle', ',', 'filename', '=', 'mkstemp', '(', ')', 'os', '.', 'close', '(', 'handle', ')', 'new_directory', '=', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', 'os', '.', 'remove', '(', 'filename', ')', 'path', '=', 'os', '.', 'path', '.', 'join', '(', 'new_directory', ',', "'inasafe'", ',', 'date_string', ',', 'user', ',', 'sub_dir', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', '# Ensure that the dir is world writable', '# Umask sets the new mask and returns the old', 'old_mask', '=', 'os', '.', 'umask', '(', '0000', ')', 'os', '.', 'makedirs', '(', 'path', ',', '0o777', ')', '# Reinstate the old mask for tmp', 'os', '.', 'umask', '(', 'old_mask', ')', 'return', 'path'] | Obtain the temporary working directory for the operating system.
An inasafe subdirectory will automatically be created under this and
if specified, a user subdirectory under that.
.. note:: You can use this together with unique_filename to create
a file in a temporary directory under the inasafe workspace. e.g.
tmpdir = temp_dir('testing')
tmpfile = unique_filename(dir=tmpdir)
print tmpfile
/tmp/inasafe/23-08-2012/timlinux/testing/tmpMRpF_C
If you specify INASAFE_WORK_DIR as an environment var, it will be
used in preference to the system temp directory.
:param sub_dir: Optional argument which will cause an additional
subdirectory to be created e.g. /tmp/inasafe/foo/
:type sub_dir: str
:return: Path to the temp dir that is created.
:rtype: str
:raises: Any errors from the underlying system calls. | ['Obtain', 'the', 'temporary', 'working', 'directory', 'for', 'the', 'operating', 'system', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/utilities.py#L106-L154 |
2,457 | MIT-LCP/wfdb-python | wfdb/io/record.py | is_monotonic | def is_monotonic(full_list):
"""
Determine whether elements in a list are monotonic. ie. unique
elements are clustered together.
ie. [5,5,3,4] is, [5,3,5] is not.
"""
prev_elements = set({full_list[0]})
prev_item = full_list[0]
for item in full_list:
if item != prev_item:
if item in prev_elements:
return False
prev_item = item
prev_elements.add(item)
return True | python | def is_monotonic(full_list):
"""
Determine whether elements in a list are monotonic. ie. unique
elements are clustered together.
ie. [5,5,3,4] is, [5,3,5] is not.
"""
prev_elements = set({full_list[0]})
prev_item = full_list[0]
for item in full_list:
if item != prev_item:
if item in prev_elements:
return False
prev_item = item
prev_elements.add(item)
return True | ['def', 'is_monotonic', '(', 'full_list', ')', ':', 'prev_elements', '=', 'set', '(', '{', 'full_list', '[', '0', ']', '}', ')', 'prev_item', '=', 'full_list', '[', '0', ']', 'for', 'item', 'in', 'full_list', ':', 'if', 'item', '!=', 'prev_item', ':', 'if', 'item', 'in', 'prev_elements', ':', 'return', 'False', 'prev_item', '=', 'item', 'prev_elements', '.', 'add', '(', 'item', ')', 'return', 'True'] | Determine whether elements in a list are monotonic. ie. unique
elements are clustered together.
ie. [5,5,3,4] is, [5,3,5] is not. | ['Determine', 'whether', 'elements', 'in', 'a', 'list', 'are', 'monotonic', '.', 'ie', '.', 'unique', 'elements', 'are', 'clustered', 'together', '.'] | train | https://github.com/MIT-LCP/wfdb-python/blob/cc8c9e9e44f10af961b7a9d8ae03708b31ac8a8c/wfdb/io/record.py#L1542-L1559 |
2,458 | googleapis/google-cloud-python | firestore/google/cloud/firestore_v1beta1/query.py | Query._to_protobuf | def _to_protobuf(self):
"""Convert the current query into the equivalent protobuf.
Returns:
google.cloud.firestore_v1beta1.types.StructuredQuery: The
query protobuf.
"""
projection = self._normalize_projection(self._projection)
orders = self._normalize_orders()
start_at = self._normalize_cursor(self._start_at, orders)
end_at = self._normalize_cursor(self._end_at, orders)
query_kwargs = {
"select": projection,
"from": [
query_pb2.StructuredQuery.CollectionSelector(
collection_id=self._parent.id
)
],
"where": self._filters_pb(),
"order_by": orders,
"start_at": _cursor_pb(start_at),
"end_at": _cursor_pb(end_at),
}
if self._offset is not None:
query_kwargs["offset"] = self._offset
if self._limit is not None:
query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit)
return query_pb2.StructuredQuery(**query_kwargs) | python | def _to_protobuf(self):
"""Convert the current query into the equivalent protobuf.
Returns:
google.cloud.firestore_v1beta1.types.StructuredQuery: The
query protobuf.
"""
projection = self._normalize_projection(self._projection)
orders = self._normalize_orders()
start_at = self._normalize_cursor(self._start_at, orders)
end_at = self._normalize_cursor(self._end_at, orders)
query_kwargs = {
"select": projection,
"from": [
query_pb2.StructuredQuery.CollectionSelector(
collection_id=self._parent.id
)
],
"where": self._filters_pb(),
"order_by": orders,
"start_at": _cursor_pb(start_at),
"end_at": _cursor_pb(end_at),
}
if self._offset is not None:
query_kwargs["offset"] = self._offset
if self._limit is not None:
query_kwargs["limit"] = wrappers_pb2.Int32Value(value=self._limit)
return query_pb2.StructuredQuery(**query_kwargs) | ['def', '_to_protobuf', '(', 'self', ')', ':', 'projection', '=', 'self', '.', '_normalize_projection', '(', 'self', '.', '_projection', ')', 'orders', '=', 'self', '.', '_normalize_orders', '(', ')', 'start_at', '=', 'self', '.', '_normalize_cursor', '(', 'self', '.', '_start_at', ',', 'orders', ')', 'end_at', '=', 'self', '.', '_normalize_cursor', '(', 'self', '.', '_end_at', ',', 'orders', ')', 'query_kwargs', '=', '{', '"select"', ':', 'projection', ',', '"from"', ':', '[', 'query_pb2', '.', 'StructuredQuery', '.', 'CollectionSelector', '(', 'collection_id', '=', 'self', '.', '_parent', '.', 'id', ')', ']', ',', '"where"', ':', 'self', '.', '_filters_pb', '(', ')', ',', '"order_by"', ':', 'orders', ',', '"start_at"', ':', '_cursor_pb', '(', 'start_at', ')', ',', '"end_at"', ':', '_cursor_pb', '(', 'end_at', ')', ',', '}', 'if', 'self', '.', '_offset', 'is', 'not', 'None', ':', 'query_kwargs', '[', '"offset"', ']', '=', 'self', '.', '_offset', 'if', 'self', '.', '_limit', 'is', 'not', 'None', ':', 'query_kwargs', '[', '"limit"', ']', '=', 'wrappers_pb2', '.', 'Int32Value', '(', 'value', '=', 'self', '.', '_limit', ')', 'return', 'query_pb2', '.', 'StructuredQuery', '(', '*', '*', 'query_kwargs', ')'] | Convert the current query into the equivalent protobuf.
Returns:
google.cloud.firestore_v1beta1.types.StructuredQuery: The
query protobuf. | ['Convert', 'the', 'current', 'query', 'into', 'the', 'equivalent', 'protobuf', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/firestore/google/cloud/firestore_v1beta1/query.py#L666-L695 |
2,459 | satellogic/telluric | telluric/georaster.py | GeoRaster2.center | def center(self):
"""Return footprint center in world coordinates, as GeoVector."""
image_center = Point(self.width / 2, self.height / 2)
return self.to_world(image_center) | python | def center(self):
"""Return footprint center in world coordinates, as GeoVector."""
image_center = Point(self.width / 2, self.height / 2)
return self.to_world(image_center) | ['def', 'center', '(', 'self', ')', ':', 'image_center', '=', 'Point', '(', 'self', '.', 'width', '/', '2', ',', 'self', '.', 'height', '/', '2', ')', 'return', 'self', '.', 'to_world', '(', 'image_center', ')'] | Return footprint center in world coordinates, as GeoVector. | ['Return', 'footprint', 'center', 'in', 'world', 'coordinates', 'as', 'GeoVector', '.'] | train | https://github.com/satellogic/telluric/blob/e752cd3ee71e339f79717e526fde362e80055d9e/telluric/georaster.py#L1576-L1579 |
2,460 | saltstack/salt | salt/modules/boto_elb.py | apply_security_groups | def apply_security_groups(name, security_groups, region=None, key=None,
keyid=None, profile=None):
'''
Apply security groups to ELB.
CLI example:
.. code-block:: bash
salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, six.string_types):
security_groups = salt.utils.json.loads(security_groups)
try:
conn.apply_security_groups_to_lb(name, security_groups)
log.info('Applied security_groups on ELB %s', name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to appply security_groups on ELB %s: %s',
name, e.message)
return False | python | def apply_security_groups(name, security_groups, region=None, key=None,
keyid=None, profile=None):
'''
Apply security groups to ELB.
CLI example:
.. code-block:: bash
salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if isinstance(security_groups, six.string_types):
security_groups = salt.utils.json.loads(security_groups)
try:
conn.apply_security_groups_to_lb(name, security_groups)
log.info('Applied security_groups on ELB %s', name)
return True
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to appply security_groups on ELB %s: %s',
name, e.message)
return False | ['def', 'apply_security_groups', '(', 'name', ',', 'security_groups', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'if', 'isinstance', '(', 'security_groups', ',', 'six', '.', 'string_types', ')', ':', 'security_groups', '=', 'salt', '.', 'utils', '.', 'json', '.', 'loads', '(', 'security_groups', ')', 'try', ':', 'conn', '.', 'apply_security_groups_to_lb', '(', 'name', ',', 'security_groups', ')', 'log', '.', 'info', '(', "'Applied security_groups on ELB %s'", ',', 'name', ')', 'return', 'True', 'except', 'boto', '.', 'exception', '.', 'BotoServerError', 'as', 'e', ':', 'log', '.', 'debug', '(', 'e', ')', 'log', '.', 'error', '(', "'Failed to appply security_groups on ELB %s: %s'", ',', 'name', ',', 'e', '.', 'message', ')', 'return', 'False'] | Apply security groups to ELB.
CLI example:
.. code-block:: bash
salt myminion boto_elb.apply_security_groups myelb '["mysecgroup1"]' | ['Apply', 'security', 'groups', 'to', 'ELB', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elb.py#L357-L380 |
2,461 | aws/sagemaker-python-sdk | src/sagemaker/tuner.py | HyperparameterTuner.transfer_learning_tuner | def transfer_learning_tuner(self, additional_parents=None, estimator=None):
"""Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"TransferLearning" and parents as the union of provided list of ``additional_parents`` and the ``self``.
Also, training image in the new tuner's estimator is updated with the provided ``training_image``.
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the transfer learning tuner.
estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with
the desired configuration. There does not need to be a training job associated with this instance.
Returns:
sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner`` instance which can be used to launch transfer
learning tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(additional_parents={"parent-job-2"})
Later On:
>>> transfer_learning_tuner.fit(inputs={})
"""
return self._create_warm_start_tuner(additional_parents=additional_parents,
warm_start_type=WarmStartTypes.TRANSFER_LEARNING,
estimator=estimator) | python | def transfer_learning_tuner(self, additional_parents=None, estimator=None):
"""Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"TransferLearning" and parents as the union of provided list of ``additional_parents`` and the ``self``.
Also, training image in the new tuner's estimator is updated with the provided ``training_image``.
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the transfer learning tuner.
estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with
the desired configuration. There does not need to be a training job associated with this instance.
Returns:
sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner`` instance which can be used to launch transfer
learning tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(additional_parents={"parent-job-2"})
Later On:
>>> transfer_learning_tuner.fit(inputs={})
"""
return self._create_warm_start_tuner(additional_parents=additional_parents,
warm_start_type=WarmStartTypes.TRANSFER_LEARNING,
estimator=estimator) | ['def', 'transfer_learning_tuner', '(', 'self', ',', 'additional_parents', '=', 'None', ',', 'estimator', '=', 'None', ')', ':', 'return', 'self', '.', '_create_warm_start_tuner', '(', 'additional_parents', '=', 'additional_parents', ',', 'warm_start_type', '=', 'WarmStartTypes', '.', 'TRANSFER_LEARNING', ',', 'estimator', '=', 'estimator', ')'] | Creates a new ``HyperparameterTuner`` by copying the request fields from the provided parent to the new
instance of ``HyperparameterTuner``. Followed by addition of warm start configuration with the type as
"TransferLearning" and parents as the union of provided list of ``additional_parents`` and the ``self``.
Also, training image in the new tuner's estimator is updated with the provided ``training_image``.
Args:
additional_parents (set{str}): Set of additional parents along with the self to be used in warm starting
the transfer learning tuner.
estimator (sagemaker.estimator.EstimatorBase): An estimator object that has been initialized with
the desired configuration. There does not need to be a training job associated with this instance.
Returns:
sagemaker.tuner.HyperparameterTuner: ``HyperparameterTuner`` instance which can be used to launch transfer
learning tuning job.
Examples:
>>> parent_tuner = HyperparameterTuner.attach(tuning_job_name="parent-job-1")
>>> transfer_learning_tuner = parent_tuner.transfer_learning_tuner(additional_parents={"parent-job-2"})
Later On:
>>> transfer_learning_tuner.fit(inputs={}) | ['Creates', 'a', 'new', 'HyperparameterTuner', 'by', 'copying', 'the', 'request', 'fields', 'from', 'the', 'provided', 'parent', 'to', 'the', 'new', 'instance', 'of', 'HyperparameterTuner', '.', 'Followed', 'by', 'addition', 'of', 'warm', 'start', 'configuration', 'with', 'the', 'type', 'as', 'TransferLearning', 'and', 'parents', 'as', 'the', 'union', 'of', 'provided', 'list', 'of', 'additional_parents', 'and', 'the', 'self', '.', 'Also', 'training', 'image', 'in', 'the', 'new', 'tuner', 's', 'estimator', 'is', 'updated', 'with', 'the', 'provided', 'training_image', '.'] | train | https://github.com/aws/sagemaker-python-sdk/blob/a9e724c7d3f5572b68c3903548c792a59d99799a/src/sagemaker/tuner.py#L532-L557 |
2,462 | basho/riak-python-client | riak/transports/transport.py | Transport.delete | def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None,
pw=None, timeout=None):
"""
Deletes an object.
"""
raise NotImplementedError | python | def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None,
pw=None, timeout=None):
"""
Deletes an object.
"""
raise NotImplementedError | ['def', 'delete', '(', 'self', ',', 'robj', ',', 'rw', '=', 'None', ',', 'r', '=', 'None', ',', 'w', '=', 'None', ',', 'dw', '=', 'None', ',', 'pr', '=', 'None', ',', 'pw', '=', 'None', ',', 'timeout', '=', 'None', ')', ':', 'raise', 'NotImplementedError'] | Deletes an object. | ['Deletes', 'an', 'object', '.'] | train | https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/transport.py#L84-L89 |
2,463 | dhermes/bezier | src/bezier/_surface_helpers.py | quadratic_jacobian_polynomial | def quadratic_jacobian_polynomial(nodes):
r"""Compute the Jacobian determinant of a quadratic surface.
.. note::
This is used **only** by :meth:`Surface._compute_valid` (which is
in turn used to compute / cache the :attr:`Surface.is_valid`
property).
Converts :math:`\det(J(s, t))` to a polynomial on the reference
triangle and represents it as a surface object.
.. note::
This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this.
(However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER``
would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing
determinants would fail if there weren't 2 rows.)
Args:
nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface.
Returns:
numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.
"""
# First evaluate the Jacobian at each of the 6 nodes.
jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER)
jac_at_nodes = np.empty((1, 6), order="F")
jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2])
jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4])
jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6])
jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8])
jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10])
jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:])
# Convert the nodal values to the Bernstein basis...
bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN)
return bernstein | python | def quadratic_jacobian_polynomial(nodes):
r"""Compute the Jacobian determinant of a quadratic surface.
.. note::
This is used **only** by :meth:`Surface._compute_valid` (which is
in turn used to compute / cache the :attr:`Surface.is_valid`
property).
Converts :math:`\det(J(s, t))` to a polynomial on the reference
triangle and represents it as a surface object.
.. note::
This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this.
(However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER``
would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing
determinants would fail if there weren't 2 rows.)
Args:
nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface.
Returns:
numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.
"""
# First evaluate the Jacobian at each of the 6 nodes.
jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER)
jac_at_nodes = np.empty((1, 6), order="F")
jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2])
jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4])
jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6])
jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8])
jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10])
jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:])
# Convert the nodal values to the Bernstein basis...
bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN)
return bernstein | ['def', 'quadratic_jacobian_polynomial', '(', 'nodes', ')', ':', '# First evaluate the Jacobian at each of the 6 nodes.', 'jac_parts', '=', '_helpers', '.', 'matrix_product', '(', 'nodes', ',', '_QUADRATIC_JACOBIAN_HELPER', ')', 'jac_at_nodes', '=', 'np', '.', 'empty', '(', '(', '1', ',', '6', ')', ',', 'order', '=', '"F"', ')', 'jac_at_nodes', '[', '0', ',', '0', ']', '=', 'two_by_two_det', '(', 'jac_parts', '[', ':', ',', ':', '2', ']', ')', 'jac_at_nodes', '[', '0', ',', '1', ']', '=', 'two_by_two_det', '(', 'jac_parts', '[', ':', ',', '2', ':', '4', ']', ')', 'jac_at_nodes', '[', '0', ',', '2', ']', '=', 'two_by_two_det', '(', 'jac_parts', '[', ':', ',', '4', ':', '6', ']', ')', 'jac_at_nodes', '[', '0', ',', '3', ']', '=', 'two_by_two_det', '(', 'jac_parts', '[', ':', ',', '6', ':', '8', ']', ')', 'jac_at_nodes', '[', '0', ',', '4', ']', '=', 'two_by_two_det', '(', 'jac_parts', '[', ':', ',', '8', ':', '10', ']', ')', 'jac_at_nodes', '[', '0', ',', '5', ']', '=', 'two_by_two_det', '(', 'jac_parts', '[', ':', ',', '10', ':', ']', ')', '# Convert the nodal values to the Bernstein basis...', 'bernstein', '=', '_helpers', '.', 'matrix_product', '(', 'jac_at_nodes', ',', '_QUADRATIC_TO_BERNSTEIN', ')', 'return', 'bernstein'] | r"""Compute the Jacobian determinant of a quadratic surface.
.. note::
This is used **only** by :meth:`Surface._compute_valid` (which is
in turn used to compute / cache the :attr:`Surface.is_valid`
property).
Converts :math:`\det(J(s, t))` to a polynomial on the reference
triangle and represents it as a surface object.
.. note::
This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this.
(However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER``
would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing
determinants would fail if there weren't 2 rows.)
Args:
nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface.
Returns:
numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis. | ['r', 'Compute', 'the', 'Jacobian', 'determinant', 'of', 'a', 'quadratic', 'surface', '.'] | train | https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L815-L851 |
2,464 | thriftrw/thriftrw-python | thriftrw/compile/compiler.py | ModuleSpec.link | def link(self):
"""Link all the types in this module and all included modules."""
if self.linked:
return self
self.linked = True
included_modules = []
# Link includes
for include in self.includes.values():
included_modules.append(include.link().surface)
self.scope.add_surface('__includes__', tuple(included_modules))
self.scope.add_surface('__thrift_source__', self.thrift_source)
# Link self
for linker in LINKERS:
linker(self.scope).link()
self.scope.add_surface('loads', Deserializer(self.protocol))
self.scope.add_surface('dumps', Serializer(self.protocol))
return self | python | def link(self):
"""Link all the types in this module and all included modules."""
if self.linked:
return self
self.linked = True
included_modules = []
# Link includes
for include in self.includes.values():
included_modules.append(include.link().surface)
self.scope.add_surface('__includes__', tuple(included_modules))
self.scope.add_surface('__thrift_source__', self.thrift_source)
# Link self
for linker in LINKERS:
linker(self.scope).link()
self.scope.add_surface('loads', Deserializer(self.protocol))
self.scope.add_surface('dumps', Serializer(self.protocol))
return self | ['def', 'link', '(', 'self', ')', ':', 'if', 'self', '.', 'linked', ':', 'return', 'self', 'self', '.', 'linked', '=', 'True', 'included_modules', '=', '[', ']', '# Link includes', 'for', 'include', 'in', 'self', '.', 'includes', '.', 'values', '(', ')', ':', 'included_modules', '.', 'append', '(', 'include', '.', 'link', '(', ')', '.', 'surface', ')', 'self', '.', 'scope', '.', 'add_surface', '(', "'__includes__'", ',', 'tuple', '(', 'included_modules', ')', ')', 'self', '.', 'scope', '.', 'add_surface', '(', "'__thrift_source__'", ',', 'self', '.', 'thrift_source', ')', '# Link self', 'for', 'linker', 'in', 'LINKERS', ':', 'linker', '(', 'self', '.', 'scope', ')', '.', 'link', '(', ')', 'self', '.', 'scope', '.', 'add_surface', '(', "'loads'", ',', 'Deserializer', '(', 'self', '.', 'protocol', ')', ')', 'self', '.', 'scope', '.', 'add_surface', '(', "'dumps'", ',', 'Serializer', '(', 'self', '.', 'protocol', ')', ')', 'return', 'self'] | Link all the types in this module and all included modules. | ['Link', 'all', 'the', 'types', 'in', 'this', 'module', 'and', 'all', 'included', 'modules', '.'] | train | https://github.com/thriftrw/thriftrw-python/blob/4f2f71acd7a0ac716c9ea5cdcea2162aa561304a/thriftrw/compile/compiler.py#L112-L135 |
2,465 | architv/soccer-cli | soccer/main.py | main | def main(league, time, standings, team, live, use12hour, players,
output_format, output_file, upcoming, lookup, listcodes, apikey):
"""
A CLI for live and past football scores from various football leagues.
League codes:
\b
- WC: World Cup
- EC: European Championship
- CL: Champions League
- PL: English Premier League
- ELC: English Championship
- FL1: French Ligue 1
- BL: German Bundesliga
- SA: Serie A
- DED: Eredivisie
- PPL: Primeira Liga
- PD: Primera Division
- BSA: Brazil Serie A
"""
headers = {'X-Auth-Token': apikey}
try:
if output_format == 'stdout' and output_file:
raise IncorrectParametersException('Printing output to stdout and '
'saving to a file are mutually exclusive')
writer = get_writer(output_format, output_file)
rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer)
if listcodes:
list_team_codes()
return
if live:
rh.get_live_scores(use12hour)
return
if standings:
if not league:
raise IncorrectParametersException('Please specify a league. '
'Example --standings --league=PL')
if league == 'CL':
raise IncorrectParametersException('Standings for CL - '
'Champions League not supported')
rh.get_standings(league)
return
if team:
if lookup:
map_team_id(team)
return
if players:
rh.get_team_players(team)
return
else:
rh.get_team_scores(team, time, upcoming, use12hour)
return
rh.get_league_scores(league, time, upcoming, use12hour)
except IncorrectParametersException as e:
click.secho(str(e), fg="red", bold=True) | python | def main(league, time, standings, team, live, use12hour, players,
output_format, output_file, upcoming, lookup, listcodes, apikey):
"""
A CLI for live and past football scores from various football leagues.
League codes:
\b
- WC: World Cup
- EC: European Championship
- CL: Champions League
- PL: English Premier League
- ELC: English Championship
- FL1: French Ligue 1
- BL: German Bundesliga
- SA: Serie A
- DED: Eredivisie
- PPL: Primeira Liga
- PD: Primera Division
- BSA: Brazil Serie A
"""
headers = {'X-Auth-Token': apikey}
try:
if output_format == 'stdout' and output_file:
raise IncorrectParametersException('Printing output to stdout and '
'saving to a file are mutually exclusive')
writer = get_writer(output_format, output_file)
rh = RequestHandler(headers, LEAGUE_IDS, TEAM_NAMES, writer)
if listcodes:
list_team_codes()
return
if live:
rh.get_live_scores(use12hour)
return
if standings:
if not league:
raise IncorrectParametersException('Please specify a league. '
'Example --standings --league=PL')
if league == 'CL':
raise IncorrectParametersException('Standings for CL - '
'Champions League not supported')
rh.get_standings(league)
return
if team:
if lookup:
map_team_id(team)
return
if players:
rh.get_team_players(team)
return
else:
rh.get_team_scores(team, time, upcoming, use12hour)
return
rh.get_league_scores(league, time, upcoming, use12hour)
except IncorrectParametersException as e:
click.secho(str(e), fg="red", bold=True) | ['def', 'main', '(', 'league', ',', 'time', ',', 'standings', ',', 'team', ',', 'live', ',', 'use12hour', ',', 'players', ',', 'output_format', ',', 'output_file', ',', 'upcoming', ',', 'lookup', ',', 'listcodes', ',', 'apikey', ')', ':', 'headers', '=', '{', "'X-Auth-Token'", ':', 'apikey', '}', 'try', ':', 'if', 'output_format', '==', "'stdout'", 'and', 'output_file', ':', 'raise', 'IncorrectParametersException', '(', "'Printing output to stdout and '", "'saving to a file are mutually exclusive'", ')', 'writer', '=', 'get_writer', '(', 'output_format', ',', 'output_file', ')', 'rh', '=', 'RequestHandler', '(', 'headers', ',', 'LEAGUE_IDS', ',', 'TEAM_NAMES', ',', 'writer', ')', 'if', 'listcodes', ':', 'list_team_codes', '(', ')', 'return', 'if', 'live', ':', 'rh', '.', 'get_live_scores', '(', 'use12hour', ')', 'return', 'if', 'standings', ':', 'if', 'not', 'league', ':', 'raise', 'IncorrectParametersException', '(', "'Please specify a league. '", "'Example --standings --league=PL'", ')', 'if', 'league', '==', "'CL'", ':', 'raise', 'IncorrectParametersException', '(', "'Standings for CL - '", "'Champions League not supported'", ')', 'rh', '.', 'get_standings', '(', 'league', ')', 'return', 'if', 'team', ':', 'if', 'lookup', ':', 'map_team_id', '(', 'team', ')', 'return', 'if', 'players', ':', 'rh', '.', 'get_team_players', '(', 'team', ')', 'return', 'else', ':', 'rh', '.', 'get_team_scores', '(', 'team', ',', 'time', ',', 'upcoming', ',', 'use12hour', ')', 'return', 'rh', '.', 'get_league_scores', '(', 'league', ',', 'time', ',', 'upcoming', ',', 'use12hour', ')', 'except', 'IncorrectParametersException', 'as', 'e', ':', 'click', '.', 'secho', '(', 'str', '(', 'e', ')', ',', 'fg', '=', '"red"', ',', 'bold', '=', 'True', ')'] | A CLI for live and past football scores from various football leagues.
League codes:
\b
- WC: World Cup
- EC: European Championship
- CL: Champions League
- PL: English Premier League
- ELC: English Championship
- FL1: French Ligue 1
- BL: German Bundesliga
- SA: Serie A
- DED: Eredivisie
- PPL: Primeira Liga
- PD: Primera Division
- BSA: Brazil Serie A | ['A', 'CLI', 'for', 'live', 'and', 'past', 'football', 'scores', 'from', 'various', 'football', 'leagues', '.'] | train | https://github.com/architv/soccer-cli/blob/472e9f492f7633a8e9739e228a6c31de454da88b/soccer/main.py#L133-L194 |
2,466 | glyphobet/fragments | fragments/commands.py | status | def status(*args):
"""
Get the current status of the fragments repository, limited to FILENAME(s) if specified.
Limit output to files with status STATUS, if present.
"""
parser = argparse.ArgumentParser(prog="%s %s" % (__package__, status.__name__), description=status.__doc__)
parser.add_argument('FILENAME', help="files to show status for", nargs="*", default=['.'])
parser.add_argument('-l', '--limit', type=str, dest="STATUS", default='MDAE ', action="store", help="limit to files in STATUS")
args = parser.parse_args(args)
config = FragmentsConfig()
yield "%s configuration version %s.%s.%s" % ((__package__,) + config['version'])
yield "stored in %s" % config.directory
for s, curr_path in _iterate_over_files(args.FILENAME, config, statuses=args.STATUS):
yield _status_to_color.get(s, str)('%s\t%s' % (s, os.path.relpath(curr_path))) | python | def status(*args):
"""
Get the current status of the fragments repository, limited to FILENAME(s) if specified.
Limit output to files with status STATUS, if present.
"""
parser = argparse.ArgumentParser(prog="%s %s" % (__package__, status.__name__), description=status.__doc__)
parser.add_argument('FILENAME', help="files to show status for", nargs="*", default=['.'])
parser.add_argument('-l', '--limit', type=str, dest="STATUS", default='MDAE ', action="store", help="limit to files in STATUS")
args = parser.parse_args(args)
config = FragmentsConfig()
yield "%s configuration version %s.%s.%s" % ((__package__,) + config['version'])
yield "stored in %s" % config.directory
for s, curr_path in _iterate_over_files(args.FILENAME, config, statuses=args.STATUS):
yield _status_to_color.get(s, str)('%s\t%s' % (s, os.path.relpath(curr_path))) | ['def', 'status', '(', '*', 'args', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'prog', '=', '"%s %s"', '%', '(', '__package__', ',', 'status', '.', '__name__', ')', ',', 'description', '=', 'status', '.', '__doc__', ')', 'parser', '.', 'add_argument', '(', "'FILENAME'", ',', 'help', '=', '"files to show status for"', ',', 'nargs', '=', '"*"', ',', 'default', '=', '[', "'.'", ']', ')', 'parser', '.', 'add_argument', '(', "'-l'", ',', "'--limit'", ',', 'type', '=', 'str', ',', 'dest', '=', '"STATUS"', ',', 'default', '=', "'MDAE '", ',', 'action', '=', '"store"', ',', 'help', '=', '"limit to files in STATUS"', ')', 'args', '=', 'parser', '.', 'parse_args', '(', 'args', ')', 'config', '=', 'FragmentsConfig', '(', ')', 'yield', '"%s configuration version %s.%s.%s"', '%', '(', '(', '__package__', ',', ')', '+', 'config', '[', "'version'", ']', ')', 'yield', '"stored in %s"', '%', 'config', '.', 'directory', 'for', 's', ',', 'curr_path', 'in', '_iterate_over_files', '(', 'args', '.', 'FILENAME', ',', 'config', ',', 'statuses', '=', 'args', '.', 'STATUS', ')', ':', 'yield', '_status_to_color', '.', 'get', '(', 's', ',', 'str', ')', '(', "'%s\\t%s'", '%', '(', 's', ',', 'os', '.', 'path', '.', 'relpath', '(', 'curr_path', ')', ')', ')'] | Get the current status of the fragments repository, limited to FILENAME(s) if specified.
Limit output to files with status STATUS, if present. | ['Get', 'the', 'current', 'status', 'of', 'the', 'fragments', 'repository', 'limited', 'to', 'FILENAME', '(', 's', ')', 'if', 'specified', '.', 'Limit', 'output', 'to', 'files', 'with', 'status', 'STATUS', 'if', 'present', '.'] | train | https://github.com/glyphobet/fragments/blob/b58473604e2db47b98703260b8ee8605264247e3/fragments/commands.py#L80-L94 |
2,467 | konstantint/matplotlib-venn | matplotlib_venn/_region.py | VennArcgonRegion.verify | def verify(self):
'''
Verify the correctness of the region arcs. Throws an VennRegionException if verification fails
(or any other exception if it happens during verification).
'''
# Verify size of arcs list
if (len(self.arcs) < 2):
raise VennRegionException("At least two arcs needed in a poly-arc region")
if (len(self.arcs) > 4):
raise VennRegionException("At most 4 arcs are supported currently for poly-arc regions")
TRIG_TOL = 100*tol # We need to use looser tolerance level here because conversion to angles and back is prone to large errors.
# Verify connectedness of arcs
for i in range(len(self.arcs)):
if not np.all(self.arcs[i-1].end_point() - self.arcs[i].start_point() < TRIG_TOL):
raise VennRegionException("Arcs of an poly-arc-gon must be connected via endpoints")
# Verify that arcs do not cross-intersect except at endpoints
for i in range(len(self.arcs)-1):
for j in range(i+1, len(self.arcs)):
ips = self.arcs[i].intersect_arc(self.arcs[j])
for ip in ips:
if not (np.all(abs(ip - self.arcs[i].start_point()) < TRIG_TOL) or np.all(abs(ip - self.arcs[i].end_point()) < TRIG_TOL)):
raise VennRegionException("Arcs of a poly-arc-gon may only intersect at endpoints")
if len(ips) != 0 and (i - j) % len(self.arcs) > 1 and (j - i) % len(self.arcs) > 1:
# Two non-consecutive arcs intersect. This is in general not good, but
# may occasionally happen when all arcs inbetween have length 0.
pass # raise VennRegionException("Non-consecutive arcs of a poly-arc-gon may not intersect")
# Verify that vertices are ordered so that at each point the direction along the polyarc changes towards the left.
# Note that this test only makes sense for polyarcs obtained using circle intersections & subtractions.
# A "flower-like" polyarc may have its vertices ordered counter-clockwise yet the direction would turn to the right at each of them.
for i in range(len(self.arcs)):
prev_arc = self.arcs[i-1]
cur_arc = self.arcs[i]
if box_product(prev_arc.direction_vector(prev_arc.to_angle), cur_arc.direction_vector(cur_arc.from_angle)) < -tol:
raise VennRegionException("Arcs must be ordered so that the direction at each vertex changes counter-clockwise") | python | def verify(self):
'''
Verify the correctness of the region arcs. Throws an VennRegionException if verification fails
(or any other exception if it happens during verification).
'''
# Verify size of arcs list
if (len(self.arcs) < 2):
raise VennRegionException("At least two arcs needed in a poly-arc region")
if (len(self.arcs) > 4):
raise VennRegionException("At most 4 arcs are supported currently for poly-arc regions")
TRIG_TOL = 100*tol # We need to use looser tolerance level here because conversion to angles and back is prone to large errors.
# Verify connectedness of arcs
for i in range(len(self.arcs)):
if not np.all(self.arcs[i-1].end_point() - self.arcs[i].start_point() < TRIG_TOL):
raise VennRegionException("Arcs of an poly-arc-gon must be connected via endpoints")
# Verify that arcs do not cross-intersect except at endpoints
for i in range(len(self.arcs)-1):
for j in range(i+1, len(self.arcs)):
ips = self.arcs[i].intersect_arc(self.arcs[j])
for ip in ips:
if not (np.all(abs(ip - self.arcs[i].start_point()) < TRIG_TOL) or np.all(abs(ip - self.arcs[i].end_point()) < TRIG_TOL)):
raise VennRegionException("Arcs of a poly-arc-gon may only intersect at endpoints")
if len(ips) != 0 and (i - j) % len(self.arcs) > 1 and (j - i) % len(self.arcs) > 1:
# Two non-consecutive arcs intersect. This is in general not good, but
# may occasionally happen when all arcs inbetween have length 0.
pass # raise VennRegionException("Non-consecutive arcs of a poly-arc-gon may not intersect")
# Verify that vertices are ordered so that at each point the direction along the polyarc changes towards the left.
# Note that this test only makes sense for polyarcs obtained using circle intersections & subtractions.
# A "flower-like" polyarc may have its vertices ordered counter-clockwise yet the direction would turn to the right at each of them.
for i in range(len(self.arcs)):
prev_arc = self.arcs[i-1]
cur_arc = self.arcs[i]
if box_product(prev_arc.direction_vector(prev_arc.to_angle), cur_arc.direction_vector(cur_arc.from_angle)) < -tol:
raise VennRegionException("Arcs must be ordered so that the direction at each vertex changes counter-clockwise") | ['def', 'verify', '(', 'self', ')', ':', '# Verify size of arcs list', 'if', '(', 'len', '(', 'self', '.', 'arcs', ')', '<', '2', ')', ':', 'raise', 'VennRegionException', '(', '"At least two arcs needed in a poly-arc region"', ')', 'if', '(', 'len', '(', 'self', '.', 'arcs', ')', '>', '4', ')', ':', 'raise', 'VennRegionException', '(', '"At most 4 arcs are supported currently for poly-arc regions"', ')', 'TRIG_TOL', '=', '100', '*', 'tol', '# We need to use looser tolerance level here because conversion to angles and back is prone to large errors.', '# Verify connectedness of arcs', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'arcs', ')', ')', ':', 'if', 'not', 'np', '.', 'all', '(', 'self', '.', 'arcs', '[', 'i', '-', '1', ']', '.', 'end_point', '(', ')', '-', 'self', '.', 'arcs', '[', 'i', ']', '.', 'start_point', '(', ')', '<', 'TRIG_TOL', ')', ':', 'raise', 'VennRegionException', '(', '"Arcs of an poly-arc-gon must be connected via endpoints"', ')', '# Verify that arcs do not cross-intersect except at endpoints', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'arcs', ')', '-', '1', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ',', 'len', '(', 'self', '.', 'arcs', ')', ')', ':', 'ips', '=', 'self', '.', 'arcs', '[', 'i', ']', '.', 'intersect_arc', '(', 'self', '.', 'arcs', '[', 'j', ']', ')', 'for', 'ip', 'in', 'ips', ':', 'if', 'not', '(', 'np', '.', 'all', '(', 'abs', '(', 'ip', '-', 'self', '.', 'arcs', '[', 'i', ']', '.', 'start_point', '(', ')', ')', '<', 'TRIG_TOL', ')', 'or', 'np', '.', 'all', '(', 'abs', '(', 'ip', '-', 'self', '.', 'arcs', '[', 'i', ']', '.', 'end_point', '(', ')', ')', '<', 'TRIG_TOL', ')', ')', ':', 'raise', 'VennRegionException', '(', '"Arcs of a poly-arc-gon may only intersect at endpoints"', ')', 'if', 'len', '(', 'ips', ')', '!=', '0', 'and', '(', 'i', '-', 'j', ')', '%', 'len', '(', 'self', '.', 'arcs', ')', '>', '1', 'and', '(', 'j', '-', 'i', ')', '%', 'len', '(', 'self', '.', 'arcs', ')', '>', '1', ':', '# Two non-consecutive arcs intersect. This is in general not good, but', '# may occasionally happen when all arcs inbetween have length 0.', 'pass', '# raise VennRegionException("Non-consecutive arcs of a poly-arc-gon may not intersect")', '# Verify that vertices are ordered so that at each point the direction along the polyarc changes towards the left.', '# Note that this test only makes sense for polyarcs obtained using circle intersections & subtractions.', '# A "flower-like" polyarc may have its vertices ordered counter-clockwise yet the direction would turn to the right at each of them.', 'for', 'i', 'in', 'range', '(', 'len', '(', 'self', '.', 'arcs', ')', ')', ':', 'prev_arc', '=', 'self', '.', 'arcs', '[', 'i', '-', '1', ']', 'cur_arc', '=', 'self', '.', 'arcs', '[', 'i', ']', 'if', 'box_product', '(', 'prev_arc', '.', 'direction_vector', '(', 'prev_arc', '.', 'to_angle', ')', ',', 'cur_arc', '.', 'direction_vector', '(', 'cur_arc', '.', 'from_angle', ')', ')', '<', '-', 'tol', ':', 'raise', 'VennRegionException', '(', '"Arcs must be ordered so that the direction at each vertex changes counter-clockwise"', ')'] | Verify the correctness of the region arcs. Throws an VennRegionException if verification fails
(or any other exception if it happens during verification). | ['Verify', 'the', 'correctness', 'of', 'the', 'region', 'arcs', '.', 'Throws', 'an', 'VennRegionException', 'if', 'verification', 'fails', '(', 'or', 'any', 'other', 'exception', 'if', 'it', 'happens', 'during', 'verification', ')', '.'] | train | https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_region.py#L242-L279 |
2,468 | sarugaku/pythonfinder | tasks/vendoring/__init__.py | rewrite_file_imports | def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text(encoding='utf-8')
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s(\n\s*)' % lib,
r'\1from pythonfinder._vendor import %s\2' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pythonfinder._vendor.%s' % lib,
text,
)
item.write_text(text, encoding='utf-8') | python | def rewrite_file_imports(item, vendored_libs):
"""Rewrite 'import xxx' and 'from xxx import' for vendored_libs"""
text = item.read_text(encoding='utf-8')
for lib in vendored_libs:
text = re.sub(
r'(\n\s*)import %s(\n\s*)' % lib,
r'\1from pythonfinder._vendor import %s\2' % lib,
text,
)
text = re.sub(
r'(\n\s*)from %s' % lib,
r'\1from pythonfinder._vendor.%s' % lib,
text,
)
item.write_text(text, encoding='utf-8') | ['def', 'rewrite_file_imports', '(', 'item', ',', 'vendored_libs', ')', ':', 'text', '=', 'item', '.', 'read_text', '(', 'encoding', '=', "'utf-8'", ')', 'for', 'lib', 'in', 'vendored_libs', ':', 'text', '=', 're', '.', 'sub', '(', "r'(\\n\\s*)import %s(\\n\\s*)'", '%', 'lib', ',', "r'\\1from pythonfinder._vendor import %s\\2'", '%', 'lib', ',', 'text', ',', ')', 'text', '=', 're', '.', 'sub', '(', "r'(\\n\\s*)from %s'", '%', 'lib', ',', "r'\\1from pythonfinder._vendor.%s'", '%', 'lib', ',', 'text', ',', ')', 'item', '.', 'write_text', '(', 'text', ',', 'encoding', '=', "'utf-8'", ')'] | Rewrite 'import xxx' and 'from xxx import' for vendored_libs | ['Rewrite', 'import', 'xxx', 'and', 'from', 'xxx', 'import', 'for', 'vendored_libs'] | train | https://github.com/sarugaku/pythonfinder/blob/6f5a4143ff6b52093b4de54650bc09c92d239bf4/tasks/vendoring/__init__.py#L78-L92 |
2,469 | arne-cl/discoursegraphs | src/discoursegraphs/readwrite/exmaralda.py | ExmaraldaDocumentGraph.is_token_annotation_tier | def is_token_annotation_tier(self, tier):
"""
returns True, iff all events in the given tier annotate exactly one
token.
"""
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True | python | def is_token_annotation_tier(self, tier):
"""
returns True, iff all events in the given tier annotate exactly one
token.
"""
for i, event in enumerate(tier.iter('event')):
if self.indexdelta(event.attrib['end'], event.attrib['start']) != 1:
return False
return True | ['def', 'is_token_annotation_tier', '(', 'self', ',', 'tier', ')', ':', 'for', 'i', ',', 'event', 'in', 'enumerate', '(', 'tier', '.', 'iter', '(', "'event'", ')', ')', ':', 'if', 'self', '.', 'indexdelta', '(', 'event', '.', 'attrib', '[', "'end'", ']', ',', 'event', '.', 'attrib', '[', "'start'", ']', ')', '!=', '1', ':', 'return', 'False', 'return', 'True'] | returns True, iff all events in the given tier annotate exactly one
token. | ['returns', 'True', 'iff', 'all', 'events', 'in', 'the', 'given', 'tier', 'annotate', 'exactly', 'one', 'token', '.'] | train | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exmaralda.py#L356-L364 |
2,470 | iwanbk/nyamuk | nyamuk/mqtt_pkt.py | MqttPkt.alloc | def alloc(self):
"""from _mosquitto_packet_alloc."""
byte = 0
remaining_bytes = bytearray(5)
i = 0
remaining_length = self.remaining_length
self.payload = None
self.remaining_count = 0
loop_flag = True
#self.dump()
while loop_flag:
byte = remaining_length % 128
remaining_length = remaining_length / 128
if remaining_length > 0:
byte = byte | 0x80
remaining_bytes[self.remaining_count] = byte
self.remaining_count += 1
if not (remaining_length > 0 and self.remaining_count < 5):
loop_flag = False
if self.remaining_count == 5:
return NC.ERR_PAYLOAD_SIZE
self.packet_length = self.remaining_length + 1 + self.remaining_count
self.payload = bytearray(self.packet_length)
self.payload[0] = self.command
i = 0
while i < self.remaining_count:
self.payload[i+1] = remaining_bytes[i]
i += 1
self.pos = 1 + self.remaining_count
return NC.ERR_SUCCESS | python | def alloc(self):
"""from _mosquitto_packet_alloc."""
byte = 0
remaining_bytes = bytearray(5)
i = 0
remaining_length = self.remaining_length
self.payload = None
self.remaining_count = 0
loop_flag = True
#self.dump()
while loop_flag:
byte = remaining_length % 128
remaining_length = remaining_length / 128
if remaining_length > 0:
byte = byte | 0x80
remaining_bytes[self.remaining_count] = byte
self.remaining_count += 1
if not (remaining_length > 0 and self.remaining_count < 5):
loop_flag = False
if self.remaining_count == 5:
return NC.ERR_PAYLOAD_SIZE
self.packet_length = self.remaining_length + 1 + self.remaining_count
self.payload = bytearray(self.packet_length)
self.payload[0] = self.command
i = 0
while i < self.remaining_count:
self.payload[i+1] = remaining_bytes[i]
i += 1
self.pos = 1 + self.remaining_count
return NC.ERR_SUCCESS | ['def', 'alloc', '(', 'self', ')', ':', 'byte', '=', '0', 'remaining_bytes', '=', 'bytearray', '(', '5', ')', 'i', '=', '0', 'remaining_length', '=', 'self', '.', 'remaining_length', 'self', '.', 'payload', '=', 'None', 'self', '.', 'remaining_count', '=', '0', 'loop_flag', '=', 'True', '#self.dump()', 'while', 'loop_flag', ':', 'byte', '=', 'remaining_length', '%', '128', 'remaining_length', '=', 'remaining_length', '/', '128', 'if', 'remaining_length', '>', '0', ':', 'byte', '=', 'byte', '|', '0x80', 'remaining_bytes', '[', 'self', '.', 'remaining_count', ']', '=', 'byte', 'self', '.', 'remaining_count', '+=', '1', 'if', 'not', '(', 'remaining_length', '>', '0', 'and', 'self', '.', 'remaining_count', '<', '5', ')', ':', 'loop_flag', '=', 'False', 'if', 'self', '.', 'remaining_count', '==', '5', ':', 'return', 'NC', '.', 'ERR_PAYLOAD_SIZE', 'self', '.', 'packet_length', '=', 'self', '.', 'remaining_length', '+', '1', '+', 'self', '.', 'remaining_count', 'self', '.', 'payload', '=', 'bytearray', '(', 'self', '.', 'packet_length', ')', 'self', '.', 'payload', '[', '0', ']', '=', 'self', '.', 'command', 'i', '=', '0', 'while', 'i', '<', 'self', '.', 'remaining_count', ':', 'self', '.', 'payload', '[', 'i', '+', '1', ']', '=', 'remaining_bytes', '[', 'i', ']', 'i', '+=', '1', 'self', '.', 'pos', '=', '1', '+', 'self', '.', 'remaining_count', 'return', 'NC', '.', 'ERR_SUCCESS'] | from _mosquitto_packet_alloc. | ['from', '_mosquitto_packet_alloc', '.'] | train | https://github.com/iwanbk/nyamuk/blob/ac4c6028de288a4c8e0b332ae16eae889deb643d/nyamuk/mqtt_pkt.py#L47-L88 |
2,471 | bachya/pyflunearyou | pyflunearyou/cdc.py | adjust_status | def adjust_status(info: dict) -> dict:
"""Apply status mapping to a raw API result."""
modified_info = deepcopy(info)
modified_info.update({
'level':
get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])),
'level2':
STATUS_MAP[99] if info['level2'] is None else
get_nearest_by_numeric_key(STATUS_MAP, int(info['level2']))
})
return modified_info | python | def adjust_status(info: dict) -> dict:
"""Apply status mapping to a raw API result."""
modified_info = deepcopy(info)
modified_info.update({
'level':
get_nearest_by_numeric_key(STATUS_MAP, int(info['level'])),
'level2':
STATUS_MAP[99] if info['level2'] is None else
get_nearest_by_numeric_key(STATUS_MAP, int(info['level2']))
})
return modified_info | ['def', 'adjust_status', '(', 'info', ':', 'dict', ')', '->', 'dict', ':', 'modified_info', '=', 'deepcopy', '(', 'info', ')', 'modified_info', '.', 'update', '(', '{', "'level'", ':', 'get_nearest_by_numeric_key', '(', 'STATUS_MAP', ',', 'int', '(', 'info', '[', "'level'", ']', ')', ')', ',', "'level2'", ':', 'STATUS_MAP', '[', '99', ']', 'if', 'info', '[', "'level2'", ']', 'is', 'None', 'else', 'get_nearest_by_numeric_key', '(', 'STATUS_MAP', ',', 'int', '(', 'info', '[', "'level2'", ']', ')', ')', '}', ')', 'return', 'modified_info'] | Apply status mapping to a raw API result. | ['Apply', 'status', 'mapping', 'to', 'a', 'raw', 'API', 'result', '.'] | train | https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/cdc.py#L23-L34 |
2,472 | saltstack/salt | salt/modules/boto_vpc.py | describe_subnet | def describe_subnet(subnet_id=None, subnet_name=None, region=None,
key=None, keyid=None, profile=None):
'''
Given a subnet id or name, describe its properties.
Returns a dictionary of interesting properties.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.describe_subnet subnet_id=subnet-123456
salt myminion boto_vpc.describe_subnet subnet_name=mysubnet
'''
try:
subnet = _get_resource('subnet', name=subnet_name, resource_id=subnet_id,
region=region, key=key, keyid=keyid, profile=profile)
except BotoServerError as e:
return {'error': __utils__['boto.get_error'](e)}
if not subnet:
return {'subnet': None}
log.debug('Found subnet: %s', subnet.id)
keys = ('id', 'cidr_block', 'availability_zone', 'tags', 'vpc_id')
ret = {'subnet': dict((k, getattr(subnet, k)) for k in keys)}
explicit_route_table_assoc = _get_subnet_explicit_route_table(ret['subnet']['id'],
ret['subnet']['vpc_id'],
conn=None, region=region,
key=key, keyid=keyid, profile=profile)
if explicit_route_table_assoc:
ret['subnet']['explicit_route_table_association_id'] = explicit_route_table_assoc
return ret | python | def describe_subnet(subnet_id=None, subnet_name=None, region=None,
key=None, keyid=None, profile=None):
'''
Given a subnet id or name, describe its properties.
Returns a dictionary of interesting properties.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.describe_subnet subnet_id=subnet-123456
salt myminion boto_vpc.describe_subnet subnet_name=mysubnet
'''
try:
subnet = _get_resource('subnet', name=subnet_name, resource_id=subnet_id,
region=region, key=key, keyid=keyid, profile=profile)
except BotoServerError as e:
return {'error': __utils__['boto.get_error'](e)}
if not subnet:
return {'subnet': None}
log.debug('Found subnet: %s', subnet.id)
keys = ('id', 'cidr_block', 'availability_zone', 'tags', 'vpc_id')
ret = {'subnet': dict((k, getattr(subnet, k)) for k in keys)}
explicit_route_table_assoc = _get_subnet_explicit_route_table(ret['subnet']['id'],
ret['subnet']['vpc_id'],
conn=None, region=region,
key=key, keyid=keyid, profile=profile)
if explicit_route_table_assoc:
ret['subnet']['explicit_route_table_association_id'] = explicit_route_table_assoc
return ret | ['def', 'describe_subnet', '(', 'subnet_id', '=', 'None', ',', 'subnet_name', '=', 'None', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'try', ':', 'subnet', '=', '_get_resource', '(', "'subnet'", ',', 'name', '=', 'subnet_name', ',', 'resource_id', '=', 'subnet_id', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'except', 'BotoServerError', 'as', 'e', ':', 'return', '{', "'error'", ':', '__utils__', '[', "'boto.get_error'", ']', '(', 'e', ')', '}', 'if', 'not', 'subnet', ':', 'return', '{', "'subnet'", ':', 'None', '}', 'log', '.', 'debug', '(', "'Found subnet: %s'", ',', 'subnet', '.', 'id', ')', 'keys', '=', '(', "'id'", ',', "'cidr_block'", ',', "'availability_zone'", ',', "'tags'", ',', "'vpc_id'", ')', 'ret', '=', '{', "'subnet'", ':', 'dict', '(', '(', 'k', ',', 'getattr', '(', 'subnet', ',', 'k', ')', ')', 'for', 'k', 'in', 'keys', ')', '}', 'explicit_route_table_assoc', '=', '_get_subnet_explicit_route_table', '(', 'ret', '[', "'subnet'", ']', '[', "'id'", ']', ',', 'ret', '[', "'subnet'", ']', '[', "'vpc_id'", ']', ',', 'conn', '=', 'None', ',', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'if', 'explicit_route_table_assoc', ':', 'ret', '[', "'subnet'", ']', '[', "'explicit_route_table_association_id'", ']', '=', 'explicit_route_table_assoc', 'return', 'ret'] | Given a subnet id or name, describe its properties.
Returns a dictionary of interesting properties.
.. versionadded:: 2015.8.0
CLI Examples:
.. code-block:: bash
salt myminion boto_vpc.describe_subnet subnet_id=subnet-123456
salt myminion boto_vpc.describe_subnet subnet_name=mysubnet | ['Given', 'a', 'subnet', 'id', 'or', 'name', 'describe', 'its', 'properties', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_vpc.py#L1018-L1053 |
2,473 | raamana/hiwenet | hiwenet/pairwise_dist.py | run_cli | def run_cli():
"Command line interface to hiwenet."
features_path, groups_path, weight_method, num_bins, edge_range, \
trim_outliers, trim_percentile, return_networkx_graph, out_weights_path = parse_args()
# TODO add the possibility to process multiple combinations of parameters: diff subjects, diff metrics
# for features_path to be a file containing multiple subjects (one/line)
# -w could take multiple values kldiv,histint,
# each line: input_features_path,out_weights_path
features, groups = read_features_and_groups(features_path, groups_path)
extract(features, groups, weight_method=weight_method, num_bins=num_bins,
edge_range=edge_range, trim_outliers=trim_outliers, trim_percentile=trim_percentile,
return_networkx_graph=return_networkx_graph, out_weights_path=out_weights_path) | python | def run_cli():
"Command line interface to hiwenet."
features_path, groups_path, weight_method, num_bins, edge_range, \
trim_outliers, trim_percentile, return_networkx_graph, out_weights_path = parse_args()
# TODO add the possibility to process multiple combinations of parameters: diff subjects, diff metrics
# for features_path to be a file containing multiple subjects (one/line)
# -w could take multiple values kldiv,histint,
# each line: input_features_path,out_weights_path
features, groups = read_features_and_groups(features_path, groups_path)
extract(features, groups, weight_method=weight_method, num_bins=num_bins,
edge_range=edge_range, trim_outliers=trim_outliers, trim_percentile=trim_percentile,
return_networkx_graph=return_networkx_graph, out_weights_path=out_weights_path) | ['def', 'run_cli', '(', ')', ':', 'features_path', ',', 'groups_path', ',', 'weight_method', ',', 'num_bins', ',', 'edge_range', ',', 'trim_outliers', ',', 'trim_percentile', ',', 'return_networkx_graph', ',', 'out_weights_path', '=', 'parse_args', '(', ')', '# TODO add the possibility to process multiple combinations of parameters: diff subjects, diff metrics', '# for features_path to be a file containing multiple subjects (one/line)', '# -w could take multiple values kldiv,histint,', '# each line: input_features_path,out_weights_path', 'features', ',', 'groups', '=', 'read_features_and_groups', '(', 'features_path', ',', 'groups_path', ')', 'extract', '(', 'features', ',', 'groups', ',', 'weight_method', '=', 'weight_method', ',', 'num_bins', '=', 'num_bins', ',', 'edge_range', '=', 'edge_range', ',', 'trim_outliers', '=', 'trim_outliers', ',', 'trim_percentile', '=', 'trim_percentile', ',', 'return_networkx_graph', '=', 'return_networkx_graph', ',', 'out_weights_path', '=', 'out_weights_path', ')'] | Command line interface to hiwenet. | ['Command', 'line', 'interface', 'to', 'hiwenet', '.'] | train | https://github.com/raamana/hiwenet/blob/b12699b3722fd0a6a835e7d7ca4baf58fb181809/hiwenet/pairwise_dist.py#L577-L592 |
2,474 | jelmer/python-fastimport | fastimport/parser.py | ImportParser.iter_commands | def iter_commands(self):
"""Iterator returning ImportCommand objects."""
while True:
line = self.next_line()
if line is None:
if b'done' in self.features:
raise errors.PrematureEndOfStream(self.lineno)
break
elif len(line) == 0 or line.startswith(b'#'):
continue
# Search for commands in order of likelihood
elif line.startswith(b'commit '):
yield self._parse_commit(line[len(b'commit '):])
elif line.startswith(b'blob'):
yield self._parse_blob()
elif line.startswith(b'done'):
break
elif line.startswith(b'progress '):
yield commands.ProgressCommand(line[len(b'progress '):])
elif line.startswith(b'reset '):
yield self._parse_reset(line[len(b'reset '):])
elif line.startswith(b'tag '):
yield self._parse_tag(line[len(b'tag '):])
elif line.startswith(b'checkpoint'):
yield commands.CheckpointCommand()
elif line.startswith(b'feature'):
yield self._parse_feature(line[len(b'feature '):])
else:
self.abort(errors.InvalidCommand, line) | python | def iter_commands(self):
"""Iterator returning ImportCommand objects."""
while True:
line = self.next_line()
if line is None:
if b'done' in self.features:
raise errors.PrematureEndOfStream(self.lineno)
break
elif len(line) == 0 or line.startswith(b'#'):
continue
# Search for commands in order of likelihood
elif line.startswith(b'commit '):
yield self._parse_commit(line[len(b'commit '):])
elif line.startswith(b'blob'):
yield self._parse_blob()
elif line.startswith(b'done'):
break
elif line.startswith(b'progress '):
yield commands.ProgressCommand(line[len(b'progress '):])
elif line.startswith(b'reset '):
yield self._parse_reset(line[len(b'reset '):])
elif line.startswith(b'tag '):
yield self._parse_tag(line[len(b'tag '):])
elif line.startswith(b'checkpoint'):
yield commands.CheckpointCommand()
elif line.startswith(b'feature'):
yield self._parse_feature(line[len(b'feature '):])
else:
self.abort(errors.InvalidCommand, line) | ['def', 'iter_commands', '(', 'self', ')', ':', 'while', 'True', ':', 'line', '=', 'self', '.', 'next_line', '(', ')', 'if', 'line', 'is', 'None', ':', 'if', "b'done'", 'in', 'self', '.', 'features', ':', 'raise', 'errors', '.', 'PrematureEndOfStream', '(', 'self', '.', 'lineno', ')', 'break', 'elif', 'len', '(', 'line', ')', '==', '0', 'or', 'line', '.', 'startswith', '(', "b'#'", ')', ':', 'continue', '# Search for commands in order of likelihood', 'elif', 'line', '.', 'startswith', '(', "b'commit '", ')', ':', 'yield', 'self', '.', '_parse_commit', '(', 'line', '[', 'len', '(', "b'commit '", ')', ':', ']', ')', 'elif', 'line', '.', 'startswith', '(', "b'blob'", ')', ':', 'yield', 'self', '.', '_parse_blob', '(', ')', 'elif', 'line', '.', 'startswith', '(', "b'done'", ')', ':', 'break', 'elif', 'line', '.', 'startswith', '(', "b'progress '", ')', ':', 'yield', 'commands', '.', 'ProgressCommand', '(', 'line', '[', 'len', '(', "b'progress '", ')', ':', ']', ')', 'elif', 'line', '.', 'startswith', '(', "b'reset '", ')', ':', 'yield', 'self', '.', '_parse_reset', '(', 'line', '[', 'len', '(', "b'reset '", ')', ':', ']', ')', 'elif', 'line', '.', 'startswith', '(', "b'tag '", ')', ':', 'yield', 'self', '.', '_parse_tag', '(', 'line', '[', 'len', '(', "b'tag '", ')', ':', ']', ')', 'elif', 'line', '.', 'startswith', '(', "b'checkpoint'", ')', ':', 'yield', 'commands', '.', 'CheckpointCommand', '(', ')', 'elif', 'line', '.', 'startswith', '(', "b'feature'", ')', ':', 'yield', 'self', '.', '_parse_feature', '(', 'line', '[', 'len', '(', "b'feature '", ')', ':', ']', ')', 'else', ':', 'self', '.', 'abort', '(', 'errors', '.', 'InvalidCommand', ',', 'line', ')'] | Iterator returning ImportCommand objects. | ['Iterator', 'returning', 'ImportCommand', 'objects', '.'] | train | https://github.com/jelmer/python-fastimport/blob/5cef9e037b7d7b37f58f522ac9ea4e343e6a1dff/fastimport/parser.py#L290-L318 |
2,475 | PmagPy/PmagPy | pmagpy/ipmag.py | pmag_results_extract | def pmag_results_extract(res_file="pmag_results.txt", crit_file="", spec_file="",
age_file="", latex=False, grade=False, WD="."):
"""
Generate tab delimited output file(s) with result data.
Save output files and return True if successful.
Possible output files: Directions, Intensities, SiteNfo, Criteria,
Specimens
Optional Parameters (defaults are used if not specified)
----------
res_file : name of pmag_results file (default is "pmag_results.txt")
crit_file : name of criteria file (default is "pmag_criteria.txt")
spec_file : name of specimen file (default is "pmag_specimens.txt")
age_file : name of age file (default is "er_ages.txt")
latex : boolean argument to output in LaTeX (default is False)
WD : path to directory that contains input files and takes output (default is current directory, '.')
"""
# format outfiles
if latex:
latex = 1
file_type = '.tex'
else:
latex = 0
file_type = '.txt'
dir_path = os.path.realpath(WD)
outfile = os.path.join(dir_path, 'Directions' + file_type)
Ioutfile = os.path.join(dir_path, 'Intensities' + file_type)
Soutfile = os.path.join(dir_path, 'SiteNfo' + file_type)
Specout = os.path.join(dir_path, 'Specimens' + file_type)
Critout = os.path.join(dir_path, 'Criteria' + file_type)
# format infiles
res_file = os.path.join(dir_path, res_file)
if crit_file:
crit_file = os.path.join(dir_path, crit_file)
if spec_file:
spec_file = os.path.join(dir_path, spec_file)
else:
grade = False
# open output files
f = open(outfile, 'w')
sf = open(Soutfile, 'w')
fI = open(Ioutfile, 'w')
if crit_file:
cr = open(Critout, 'w')
# set up column headers
Sites, file_type = pmag.magic_read(res_file)
if crit_file:
Crits, file_type = pmag.magic_read(crit_file)
else:
Crits = []
SiteCols = ["Site", "Location",
"Lat. (N)", "Long. (E)", "Age ", "Age sigma", "Units"]
SiteKeys = ["er_site_names", "average_lat", "average_lon", "average_age",
"average_age_sigma", "average_age_unit"]
DirCols = ["Site", 'Comp.', "perc TC", "Dec.", "Inc.", "Nl", "Np", "k ", "R", "a95",
"PLat", "PLong"]
DirKeys = ["er_site_names", "pole_comp_name", "tilt_correction", "average_dec", "average_inc",
"average_n_lines", "average_n_planes", "average_k", "average_r", "average_alpha95",
"vgp_lat", "vgp_lon"]
IntCols = ["Site", "N", "B (uT)", "sigma",
"sigma perc", "VADM", "VADM sigma"]
IntKeys = ["er_site_names", "average_int_n", "average_int", "average_int_sigma",
'average_int_sigma_perc', "vadm", "vadm_sigma"]
AllowedKeys = ['specimen_frac', 'specimen_scat', 'specimen_gap_max', 'measurement_step_min',
'measurement_step_max', 'measurement_step_unit', 'specimen_polarity',
'specimen_nrm', 'specimen_direction_type', 'specimen_comp_nmb', 'specimen_mad',
'specimen_alpha95', 'specimen_n', 'specimen_int_sigma',
'specimen_int_sigma_perc', 'specimen_int_rel_sigma',
'specimen_int_rel_sigma_perc', 'specimen_int_mad', 'specimen_int_n',
'specimen_w', 'specimen_q', 'specimen_f', 'specimen_fvds', 'specimen_b_sigma',
'specimen_b_beta', 'specimen_g', 'specimen_dang', 'specimen_md',
'specimen_ptrm', 'specimen_drat', 'specimen_drats', 'specimen_rsc',
'specimen_viscosity_index', 'specimen_magn_moment', 'specimen_magn_volume',
'specimen_magn_mass', 'specimen_int_ptrm_n', 'specimen_delta', 'specimen_theta',
'specimen_gamma', 'sample_polarity', 'sample_nrm', 'sample_direction_type',
'sample_comp_nmb', 'sample_sigma', 'sample_alpha95', 'sample_n',
'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r',
'sample_tilt_correction', 'sample_int_sigma', 'sample_int_sigma_perc',
'sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_n',
'sample_magn_moment', 'sample_magn_volume', 'sample_magn_mass', 'site_polarity',
'site_nrm', 'site_direction_type', 'site_comp_nmb', 'site_sigma',
'site_alpha95', 'site_n', 'site_n_lines', 'site_n_planes', 'site_k', 'site_r',
'site_tilt_correction', 'site_int_sigma', 'site_int_sigma_perc',
'site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_n',
'site_magn_moment', 'site_magn_volume', 'site_magn_mass', 'average_age_min',
'average_age_max', 'average_age_sigma', 'average_age_unit', 'average_sigma',
'average_alpha95', 'average_n', 'average_nn', 'average_k', 'average_r',
'average_int_sigma', 'average_int_rel_sigma', 'average_int_rel_sigma_perc',
'average_int_n', 'average_int_nn', 'vgp_dp', 'vgp_dm', 'vgp_sigma',
'vgp_alpha95', 'vgp_n', 'vdm_sigma', 'vdm_n', 'vadm_sigma', 'vadm_n']
if crit_file:
crit = Crits[0] # get a list of useful keys
for key in list(crit.keys()):
if key not in AllowedKeys:
del(crit[key])
for key in list(crit.keys()):
if (not crit[key]) or (eval(crit[key]) > 1000) or (eval(crit[key]) == 0):
# get rid of all blank or too big ones or too little ones
del(crit[key])
CritKeys = list(crit.keys())
if spec_file:
Specs, file_type = pmag.magic_read(spec_file)
fsp = open(Specout, 'w') # including specimen intensities if desired
SpecCols = ["Site", "Specimen", "B (uT)", "MAD", "Beta", "N", "Q", "DANG", "f-vds",
"DRATS", "T (C)"]
SpecKeys = ['er_site_name', 'er_specimen_name', 'specimen_int', 'specimen_int_mad',
'specimen_b_beta', 'specimen_int_n', 'specimen_q', 'specimen_dang',
'specimen_fvds', 'specimen_drats', 'trange']
Xtra = ['specimen_frac', 'specimen_scat', 'specimen_gmax']
if grade:
SpecCols.append('Grade')
SpecKeys.append('specimen_grade')
for x in Xtra: # put in the new intensity keys if present
if x in list(Specs[0].keys()):
SpecKeys.append(x)
newkey = ""
for k in x.split('_')[1:]:
newkey = newkey + k + '_'
SpecCols.append(newkey.strip('_'))
SpecCols.append('Corrections')
SpecKeys.append('corrections')
# these should be multiplied by 1e6
Micro = ['specimen_int', 'average_int', 'average_int_sigma']
Zeta = ['vadm', 'vadm_sigma'] # these should be multiplied by 1e21
# write out the header information for each output file
if latex: # write out the latex header stuff
sep = ' & '
end = '\\\\'
f.write('\\documentclass{article}\n')
f.write('\\usepackage[margin=1in]{geometry}\n')
f.write('\\usepackage{longtable}\n')
f.write('\\begin{document}\n')
sf.write('\\documentclass{article}\n')
sf.write('\\usepackage[margin=1in]{geometry}\n')
sf.write('\\usepackage{longtable}\n')
sf.write('\\begin{document}\n')
fI.write('\\documentclass{article}\n')
fI.write('\\usepackage[margin=1in]{geometry}\n')
fI.write('\\usepackage{longtable}\n')
fI.write('\\begin{document}\n')
if crit_file:
cr.write('\\documentclass{article}\n')
cr.write('\\usepackage[margin=1in]{geometry}\n')
cr.write('\\usepackage{longtable}\n')
cr.write('\\begin{document}\n')
if spec_file:
fsp.write('\\documentclass{article}\n')
fsp.write('\\usepackage[margin=1in]{geometry}\n')
fsp.write('\\usepackage{longtable}\n')
fsp.write('\\begin{document}\n')
tabstring = '\\begin{longtable}{'
fstring = tabstring
for k in range(len(SiteCols)):
fstring = fstring + 'r'
sf.write(fstring + '}\n')
sf.write('\hline\n')
fstring = tabstring
for k in range(len(DirCols)):
fstring = fstring + 'r'
f.write(fstring + '}\n')
f.write('\hline\n')
fstring = tabstring
for k in range(len(IntCols)):
fstring = fstring + 'r'
fI.write(fstring + '}\n')
fI.write('\hline\n')
fstring = tabstring
if crit_file:
for k in range(len(CritKeys)):
fstring = fstring + 'r'
cr.write(fstring + '}\n')
cr.write('\hline\n')
if spec_file:
fstring = tabstring
for k in range(len(SpecCols)):
fstring = fstring + 'r'
fsp.write(fstring + '}\n')
fsp.write('\hline\n')
else: # just set the tab and line endings for tab delimited
sep = ' \t '
end = ''
# now write out the actual column headers
Soutstring, Doutstring, Ioutstring, Spoutstring, Croutstring = "", "", "", "", ""
for k in range(len(SiteCols)):
Soutstring = Soutstring + SiteCols[k] + sep
Soutstring = Soutstring.strip(sep)
Soutstring = Soutstring + end + '\n'
sf.write(Soutstring)
for k in range(len(DirCols)):
Doutstring = Doutstring + DirCols[k] + sep
Doutstring = Doutstring.strip(sep)
Doutstring = Doutstring + end + '\n'
f.write(Doutstring)
for k in range(len(IntCols)):
Ioutstring = Ioutstring + IntCols[k] + sep
Ioutstring = Ioutstring.strip(sep)
Ioutstring = Ioutstring + end + '\n'
fI.write(Ioutstring)
if crit_file:
for k in range(len(CritKeys)):
Croutstring = Croutstring + CritKeys[k] + sep
Croutstring = Croutstring.strip(sep)
Croutstring = Croutstring + end + '\n'
cr.write(Croutstring)
if spec_file:
for k in range(len(SpecCols)):
Spoutstring = Spoutstring + SpecCols[k] + sep
Spoutstring = Spoutstring.strip(sep)
Spoutstring = Spoutstring + end + "\n"
fsp.write(Spoutstring)
if latex: # put in a horizontal line in latex file
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
if crit_file:
cr.write('\hline\n')
if spec_file:
fsp.write('\hline\n')
# do criteria
if crit_file:
for crit in Crits:
Croutstring = ""
for key in CritKeys:
Croutstring = Croutstring + crit[key] + sep
Croutstring = Croutstring.strip(sep) + end
cr.write(Croutstring + '\n')
# do directions
# get all results with VGPs
VGPs = pmag.get_dictitem(Sites, 'vgp_lat', '', 'F')
VGPs = pmag.get_dictitem(VGPs, 'data_type', 'i',
'T') # get site level stuff
for site in VGPs:
if len(site['er_site_names'].split(":")) == 1:
if 'er_sample_names' not in list(site.keys()):
site['er_sample_names'] = ''
if 'pole_comp_name' not in list(site.keys()):
site['pole_comp_name'] = "A"
if 'average_nn' not in list(site.keys()) and 'average_n' in list(site.keys()):
site['average_nn'] = site['average_n']
if 'average_n_lines' not in list(site.keys()):
site['average_n_lines'] = site['average_nn']
if 'average_n_planes' not in list(site.keys()):
site['average_n_planes'] = ""
Soutstring, Doutstring = "", ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
for key in DirKeys:
if key in list(site.keys()):
Doutstring = Doutstring + site[key] + sep
Doutstring = Doutstring.strip(sep) + end
f.write(Doutstring + '\n')
# now do intensities
VADMs = pmag.get_dictitem(Sites, 'vadm', '', 'F')
VADMs = pmag.get_dictitem(VADMs, 'data_type', 'i', 'T')
for site in VADMs: # do results level stuff
if site not in VGPs:
Soutstring = ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
else:
Soutstring = Soutstring + " " + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
if len(site['er_site_names'].split(":")) == 1 and site['data_type'] == 'i':
if 'average_int_sigma_perc' not in list(site.keys()):
site['average_int_sigma_perc'] = "0"
if site["average_int_sigma"] == "":
site["average_int_sigma"] = "0"
if site["average_int_sigma_perc"] == "":
site["average_int_sigma_perc"] = "0"
if site["vadm"] == "":
site["vadm"] = "0"
if site["vadm_sigma"] == "":
site["vadm_sigma"] = "0"
for key in list(site.keys()): # reformat vadms, intensities
if key in Micro:
site[key] = '%7.1f' % (float(site[key]) * 1e6)
if key in Zeta:
site[key] = '%7.1f' % (float(site[key]) * 1e-21)
outstring = ""
for key in IntKeys:
if key not in list(site.keys()):
site[key] = ""
outstring = outstring + site[key] + sep
outstring = outstring.strip(sep) + end + '\n'
fI.write(outstring)
# VDMs=pmag.get_dictitem(Sites,'vdm','','F') # get non-blank VDMs
# for site in VDMs: # do results level stuff
# if len(site['er_site_names'].split(":"))==1:
# if 'average_int_sigma_perc' not in site.keys():site['average_int_sigma_perc']="0"
# if site["average_int_sigma"]=="":site["average_int_sigma"]="0"
# if site["average_int_sigma_perc"]=="":site["average_int_sigma_perc"]="0"
# if site["vadm"]=="":site["vadm"]="0"
# if site["vadm_sigma"]=="":site["vadm_sigma"]="0"
# for key in site.keys(): # reformat vadms, intensities
# if key in Micro: site[key]='%7.1f'%(float(site[key])*1e6)
# if key in Zeta: site[key]='%7.1f'%(float(site[key])*1e-21)
# outstring=""
# for key in IntKeys:
# outstring=outstring+site[key]+sep
# fI.write(outstring.strip(sep)+'\n')
if spec_file:
SpecsInts = pmag.get_dictitem(Specs, 'specimen_int', '', 'F')
for spec in SpecsInts:
spec['trange'] = '%i' % (int(float(spec['measurement_step_min']) - 273)) + \
'-' + '%i' % (int(float(spec['measurement_step_max']) - 273))
meths = spec['magic_method_codes'].split(':')
corrections = ''
for meth in meths:
if 'DA' in meth:
corrections = corrections + meth[3:] + ':'
corrections = corrections.strip(':')
if corrections.strip() == "":
corrections = "None"
spec['corrections'] = corrections
outstring = ""
for key in SpecKeys:
if key in Micro:
spec[key] = '%7.1f' % (float(spec[key]) * 1e6)
if key in Zeta:
spec[key] = '%7.1f' % (float(spec[key]) * 1e-21)
outstring = outstring + spec[key] + sep
fsp.write(outstring.strip(sep) + end + '\n')
#
if latex: # write out the tail stuff
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
f.write('\end{longtable}\n')
sf.write('\end{longtable}\n')
fI.write('\end{longtable}\n')
f.write('\end{document}\n')
sf.write('\end{document}\n')
fI.write('\end{document}\n')
if spec_file:
fsp.write('\hline\n')
fsp.write('\end{longtable}\n')
fsp.write('\end{document}\n')
if crit_file:
cr.write('\hline\n')
cr.write('\end{longtable}\n')
cr.write('\end{document}\n')
f.close()
sf.close()
fI.close()
print('data saved in: ', outfile, Ioutfile, Soutfile)
outfiles = [outfile, Ioutfile, Soutfile]
if spec_file:
fsp.close()
print('specimen data saved in: ', Specout)
outfiles.append(Specout)
if crit_file:
cr.close()
print('Selection criteria saved in: ', Critout)
outfiles.append(Critout)
return True, outfiles | python | def pmag_results_extract(res_file="pmag_results.txt", crit_file="", spec_file="",
age_file="", latex=False, grade=False, WD="."):
"""
Generate tab delimited output file(s) with result data.
Save output files and return True if successful.
Possible output files: Directions, Intensities, SiteNfo, Criteria,
Specimens
Optional Parameters (defaults are used if not specified)
----------
res_file : name of pmag_results file (default is "pmag_results.txt")
crit_file : name of criteria file (default is "pmag_criteria.txt")
spec_file : name of specimen file (default is "pmag_specimens.txt")
age_file : name of age file (default is "er_ages.txt")
latex : boolean argument to output in LaTeX (default is False)
WD : path to directory that contains input files and takes output (default is current directory, '.')
"""
# format outfiles
if latex:
latex = 1
file_type = '.tex'
else:
latex = 0
file_type = '.txt'
dir_path = os.path.realpath(WD)
outfile = os.path.join(dir_path, 'Directions' + file_type)
Ioutfile = os.path.join(dir_path, 'Intensities' + file_type)
Soutfile = os.path.join(dir_path, 'SiteNfo' + file_type)
Specout = os.path.join(dir_path, 'Specimens' + file_type)
Critout = os.path.join(dir_path, 'Criteria' + file_type)
# format infiles
res_file = os.path.join(dir_path, res_file)
if crit_file:
crit_file = os.path.join(dir_path, crit_file)
if spec_file:
spec_file = os.path.join(dir_path, spec_file)
else:
grade = False
# open output files
f = open(outfile, 'w')
sf = open(Soutfile, 'w')
fI = open(Ioutfile, 'w')
if crit_file:
cr = open(Critout, 'w')
# set up column headers
Sites, file_type = pmag.magic_read(res_file)
if crit_file:
Crits, file_type = pmag.magic_read(crit_file)
else:
Crits = []
SiteCols = ["Site", "Location",
"Lat. (N)", "Long. (E)", "Age ", "Age sigma", "Units"]
SiteKeys = ["er_site_names", "average_lat", "average_lon", "average_age",
"average_age_sigma", "average_age_unit"]
DirCols = ["Site", 'Comp.', "perc TC", "Dec.", "Inc.", "Nl", "Np", "k ", "R", "a95",
"PLat", "PLong"]
DirKeys = ["er_site_names", "pole_comp_name", "tilt_correction", "average_dec", "average_inc",
"average_n_lines", "average_n_planes", "average_k", "average_r", "average_alpha95",
"vgp_lat", "vgp_lon"]
IntCols = ["Site", "N", "B (uT)", "sigma",
"sigma perc", "VADM", "VADM sigma"]
IntKeys = ["er_site_names", "average_int_n", "average_int", "average_int_sigma",
'average_int_sigma_perc', "vadm", "vadm_sigma"]
AllowedKeys = ['specimen_frac', 'specimen_scat', 'specimen_gap_max', 'measurement_step_min',
'measurement_step_max', 'measurement_step_unit', 'specimen_polarity',
'specimen_nrm', 'specimen_direction_type', 'specimen_comp_nmb', 'specimen_mad',
'specimen_alpha95', 'specimen_n', 'specimen_int_sigma',
'specimen_int_sigma_perc', 'specimen_int_rel_sigma',
'specimen_int_rel_sigma_perc', 'specimen_int_mad', 'specimen_int_n',
'specimen_w', 'specimen_q', 'specimen_f', 'specimen_fvds', 'specimen_b_sigma',
'specimen_b_beta', 'specimen_g', 'specimen_dang', 'specimen_md',
'specimen_ptrm', 'specimen_drat', 'specimen_drats', 'specimen_rsc',
'specimen_viscosity_index', 'specimen_magn_moment', 'specimen_magn_volume',
'specimen_magn_mass', 'specimen_int_ptrm_n', 'specimen_delta', 'specimen_theta',
'specimen_gamma', 'sample_polarity', 'sample_nrm', 'sample_direction_type',
'sample_comp_nmb', 'sample_sigma', 'sample_alpha95', 'sample_n',
'sample_n_lines', 'sample_n_planes', 'sample_k', 'sample_r',
'sample_tilt_correction', 'sample_int_sigma', 'sample_int_sigma_perc',
'sample_int_rel_sigma', 'sample_int_rel_sigma_perc', 'sample_int_n',
'sample_magn_moment', 'sample_magn_volume', 'sample_magn_mass', 'site_polarity',
'site_nrm', 'site_direction_type', 'site_comp_nmb', 'site_sigma',
'site_alpha95', 'site_n', 'site_n_lines', 'site_n_planes', 'site_k', 'site_r',
'site_tilt_correction', 'site_int_sigma', 'site_int_sigma_perc',
'site_int_rel_sigma', 'site_int_rel_sigma_perc', 'site_int_n',
'site_magn_moment', 'site_magn_volume', 'site_magn_mass', 'average_age_min',
'average_age_max', 'average_age_sigma', 'average_age_unit', 'average_sigma',
'average_alpha95', 'average_n', 'average_nn', 'average_k', 'average_r',
'average_int_sigma', 'average_int_rel_sigma', 'average_int_rel_sigma_perc',
'average_int_n', 'average_int_nn', 'vgp_dp', 'vgp_dm', 'vgp_sigma',
'vgp_alpha95', 'vgp_n', 'vdm_sigma', 'vdm_n', 'vadm_sigma', 'vadm_n']
if crit_file:
crit = Crits[0] # get a list of useful keys
for key in list(crit.keys()):
if key not in AllowedKeys:
del(crit[key])
for key in list(crit.keys()):
if (not crit[key]) or (eval(crit[key]) > 1000) or (eval(crit[key]) == 0):
# get rid of all blank or too big ones or too little ones
del(crit[key])
CritKeys = list(crit.keys())
if spec_file:
Specs, file_type = pmag.magic_read(spec_file)
fsp = open(Specout, 'w') # including specimen intensities if desired
SpecCols = ["Site", "Specimen", "B (uT)", "MAD", "Beta", "N", "Q", "DANG", "f-vds",
"DRATS", "T (C)"]
SpecKeys = ['er_site_name', 'er_specimen_name', 'specimen_int', 'specimen_int_mad',
'specimen_b_beta', 'specimen_int_n', 'specimen_q', 'specimen_dang',
'specimen_fvds', 'specimen_drats', 'trange']
Xtra = ['specimen_frac', 'specimen_scat', 'specimen_gmax']
if grade:
SpecCols.append('Grade')
SpecKeys.append('specimen_grade')
for x in Xtra: # put in the new intensity keys if present
if x in list(Specs[0].keys()):
SpecKeys.append(x)
newkey = ""
for k in x.split('_')[1:]:
newkey = newkey + k + '_'
SpecCols.append(newkey.strip('_'))
SpecCols.append('Corrections')
SpecKeys.append('corrections')
# these should be multiplied by 1e6
Micro = ['specimen_int', 'average_int', 'average_int_sigma']
Zeta = ['vadm', 'vadm_sigma'] # these should be multiplied by 1e21
# write out the header information for each output file
if latex: # write out the latex header stuff
sep = ' & '
end = '\\\\'
f.write('\\documentclass{article}\n')
f.write('\\usepackage[margin=1in]{geometry}\n')
f.write('\\usepackage{longtable}\n')
f.write('\\begin{document}\n')
sf.write('\\documentclass{article}\n')
sf.write('\\usepackage[margin=1in]{geometry}\n')
sf.write('\\usepackage{longtable}\n')
sf.write('\\begin{document}\n')
fI.write('\\documentclass{article}\n')
fI.write('\\usepackage[margin=1in]{geometry}\n')
fI.write('\\usepackage{longtable}\n')
fI.write('\\begin{document}\n')
if crit_file:
cr.write('\\documentclass{article}\n')
cr.write('\\usepackage[margin=1in]{geometry}\n')
cr.write('\\usepackage{longtable}\n')
cr.write('\\begin{document}\n')
if spec_file:
fsp.write('\\documentclass{article}\n')
fsp.write('\\usepackage[margin=1in]{geometry}\n')
fsp.write('\\usepackage{longtable}\n')
fsp.write('\\begin{document}\n')
tabstring = '\\begin{longtable}{'
fstring = tabstring
for k in range(len(SiteCols)):
fstring = fstring + 'r'
sf.write(fstring + '}\n')
sf.write('\hline\n')
fstring = tabstring
for k in range(len(DirCols)):
fstring = fstring + 'r'
f.write(fstring + '}\n')
f.write('\hline\n')
fstring = tabstring
for k in range(len(IntCols)):
fstring = fstring + 'r'
fI.write(fstring + '}\n')
fI.write('\hline\n')
fstring = tabstring
if crit_file:
for k in range(len(CritKeys)):
fstring = fstring + 'r'
cr.write(fstring + '}\n')
cr.write('\hline\n')
if spec_file:
fstring = tabstring
for k in range(len(SpecCols)):
fstring = fstring + 'r'
fsp.write(fstring + '}\n')
fsp.write('\hline\n')
else: # just set the tab and line endings for tab delimited
sep = ' \t '
end = ''
# now write out the actual column headers
Soutstring, Doutstring, Ioutstring, Spoutstring, Croutstring = "", "", "", "", ""
for k in range(len(SiteCols)):
Soutstring = Soutstring + SiteCols[k] + sep
Soutstring = Soutstring.strip(sep)
Soutstring = Soutstring + end + '\n'
sf.write(Soutstring)
for k in range(len(DirCols)):
Doutstring = Doutstring + DirCols[k] + sep
Doutstring = Doutstring.strip(sep)
Doutstring = Doutstring + end + '\n'
f.write(Doutstring)
for k in range(len(IntCols)):
Ioutstring = Ioutstring + IntCols[k] + sep
Ioutstring = Ioutstring.strip(sep)
Ioutstring = Ioutstring + end + '\n'
fI.write(Ioutstring)
if crit_file:
for k in range(len(CritKeys)):
Croutstring = Croutstring + CritKeys[k] + sep
Croutstring = Croutstring.strip(sep)
Croutstring = Croutstring + end + '\n'
cr.write(Croutstring)
if spec_file:
for k in range(len(SpecCols)):
Spoutstring = Spoutstring + SpecCols[k] + sep
Spoutstring = Spoutstring.strip(sep)
Spoutstring = Spoutstring + end + "\n"
fsp.write(Spoutstring)
if latex: # put in a horizontal line in latex file
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
if crit_file:
cr.write('\hline\n')
if spec_file:
fsp.write('\hline\n')
# do criteria
if crit_file:
for crit in Crits:
Croutstring = ""
for key in CritKeys:
Croutstring = Croutstring + crit[key] + sep
Croutstring = Croutstring.strip(sep) + end
cr.write(Croutstring + '\n')
# do directions
# get all results with VGPs
VGPs = pmag.get_dictitem(Sites, 'vgp_lat', '', 'F')
VGPs = pmag.get_dictitem(VGPs, 'data_type', 'i',
'T') # get site level stuff
for site in VGPs:
if len(site['er_site_names'].split(":")) == 1:
if 'er_sample_names' not in list(site.keys()):
site['er_sample_names'] = ''
if 'pole_comp_name' not in list(site.keys()):
site['pole_comp_name'] = "A"
if 'average_nn' not in list(site.keys()) and 'average_n' in list(site.keys()):
site['average_nn'] = site['average_n']
if 'average_n_lines' not in list(site.keys()):
site['average_n_lines'] = site['average_nn']
if 'average_n_planes' not in list(site.keys()):
site['average_n_planes'] = ""
Soutstring, Doutstring = "", ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
for key in DirKeys:
if key in list(site.keys()):
Doutstring = Doutstring + site[key] + sep
Doutstring = Doutstring.strip(sep) + end
f.write(Doutstring + '\n')
# now do intensities
VADMs = pmag.get_dictitem(Sites, 'vadm', '', 'F')
VADMs = pmag.get_dictitem(VADMs, 'data_type', 'i', 'T')
for site in VADMs: # do results level stuff
if site not in VGPs:
Soutstring = ""
for key in SiteKeys:
if key in list(site.keys()):
Soutstring = Soutstring + site[key] + sep
else:
Soutstring = Soutstring + " " + sep
Soutstring = Soutstring.strip(sep) + end
sf.write(Soutstring + '\n')
if len(site['er_site_names'].split(":")) == 1 and site['data_type'] == 'i':
if 'average_int_sigma_perc' not in list(site.keys()):
site['average_int_sigma_perc'] = "0"
if site["average_int_sigma"] == "":
site["average_int_sigma"] = "0"
if site["average_int_sigma_perc"] == "":
site["average_int_sigma_perc"] = "0"
if site["vadm"] == "":
site["vadm"] = "0"
if site["vadm_sigma"] == "":
site["vadm_sigma"] = "0"
for key in list(site.keys()): # reformat vadms, intensities
if key in Micro:
site[key] = '%7.1f' % (float(site[key]) * 1e6)
if key in Zeta:
site[key] = '%7.1f' % (float(site[key]) * 1e-21)
outstring = ""
for key in IntKeys:
if key not in list(site.keys()):
site[key] = ""
outstring = outstring + site[key] + sep
outstring = outstring.strip(sep) + end + '\n'
fI.write(outstring)
# VDMs=pmag.get_dictitem(Sites,'vdm','','F') # get non-blank VDMs
# for site in VDMs: # do results level stuff
# if len(site['er_site_names'].split(":"))==1:
# if 'average_int_sigma_perc' not in site.keys():site['average_int_sigma_perc']="0"
# if site["average_int_sigma"]=="":site["average_int_sigma"]="0"
# if site["average_int_sigma_perc"]=="":site["average_int_sigma_perc"]="0"
# if site["vadm"]=="":site["vadm"]="0"
# if site["vadm_sigma"]=="":site["vadm_sigma"]="0"
# for key in site.keys(): # reformat vadms, intensities
# if key in Micro: site[key]='%7.1f'%(float(site[key])*1e6)
# if key in Zeta: site[key]='%7.1f'%(float(site[key])*1e-21)
# outstring=""
# for key in IntKeys:
# outstring=outstring+site[key]+sep
# fI.write(outstring.strip(sep)+'\n')
if spec_file:
SpecsInts = pmag.get_dictitem(Specs, 'specimen_int', '', 'F')
for spec in SpecsInts:
spec['trange'] = '%i' % (int(float(spec['measurement_step_min']) - 273)) + \
'-' + '%i' % (int(float(spec['measurement_step_max']) - 273))
meths = spec['magic_method_codes'].split(':')
corrections = ''
for meth in meths:
if 'DA' in meth:
corrections = corrections + meth[3:] + ':'
corrections = corrections.strip(':')
if corrections.strip() == "":
corrections = "None"
spec['corrections'] = corrections
outstring = ""
for key in SpecKeys:
if key in Micro:
spec[key] = '%7.1f' % (float(spec[key]) * 1e6)
if key in Zeta:
spec[key] = '%7.1f' % (float(spec[key]) * 1e-21)
outstring = outstring + spec[key] + sep
fsp.write(outstring.strip(sep) + end + '\n')
#
if latex: # write out the tail stuff
f.write('\hline\n')
sf.write('\hline\n')
fI.write('\hline\n')
f.write('\end{longtable}\n')
sf.write('\end{longtable}\n')
fI.write('\end{longtable}\n')
f.write('\end{document}\n')
sf.write('\end{document}\n')
fI.write('\end{document}\n')
if spec_file:
fsp.write('\hline\n')
fsp.write('\end{longtable}\n')
fsp.write('\end{document}\n')
if crit_file:
cr.write('\hline\n')
cr.write('\end{longtable}\n')
cr.write('\end{document}\n')
f.close()
sf.close()
fI.close()
print('data saved in: ', outfile, Ioutfile, Soutfile)
outfiles = [outfile, Ioutfile, Soutfile]
if spec_file:
fsp.close()
print('specimen data saved in: ', Specout)
outfiles.append(Specout)
if crit_file:
cr.close()
print('Selection criteria saved in: ', Critout)
outfiles.append(Critout)
return True, outfiles | ['def', 'pmag_results_extract', '(', 'res_file', '=', '"pmag_results.txt"', ',', 'crit_file', '=', '""', ',', 'spec_file', '=', '""', ',', 'age_file', '=', '""', ',', 'latex', '=', 'False', ',', 'grade', '=', 'False', ',', 'WD', '=', '"."', ')', ':', '# format outfiles', 'if', 'latex', ':', 'latex', '=', '1', 'file_type', '=', "'.tex'", 'else', ':', 'latex', '=', '0', 'file_type', '=', "'.txt'", 'dir_path', '=', 'os', '.', 'path', '.', 'realpath', '(', 'WD', ')', 'outfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'Directions'", '+', 'file_type', ')', 'Ioutfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'Intensities'", '+', 'file_type', ')', 'Soutfile', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'SiteNfo'", '+', 'file_type', ')', 'Specout', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'Specimens'", '+', 'file_type', ')', 'Critout', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', "'Criteria'", '+', 'file_type', ')', '# format infiles', 'res_file', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'res_file', ')', 'if', 'crit_file', ':', 'crit_file', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'crit_file', ')', 'if', 'spec_file', ':', 'spec_file', '=', 'os', '.', 'path', '.', 'join', '(', 'dir_path', ',', 'spec_file', ')', 'else', ':', 'grade', '=', 'False', '# open output files', 'f', '=', 'open', '(', 'outfile', ',', "'w'", ')', 'sf', '=', 'open', '(', 'Soutfile', ',', "'w'", ')', 'fI', '=', 'open', '(', 'Ioutfile', ',', "'w'", ')', 'if', 'crit_file', ':', 'cr', '=', 'open', '(', 'Critout', ',', "'w'", ')', '# set up column headers', 'Sites', ',', 'file_type', '=', 'pmag', '.', 'magic_read', '(', 'res_file', ')', 'if', 'crit_file', ':', 'Crits', ',', 'file_type', '=', 'pmag', '.', 'magic_read', '(', 'crit_file', ')', 'else', ':', 'Crits', '=', '[', ']', 'SiteCols', '=', '[', '"Site"', ',', '"Location"', ',', '"Lat. (N)"', ',', '"Long. (E)"', ',', '"Age "', ',', '"Age sigma"', ',', '"Units"', ']', 'SiteKeys', '=', '[', '"er_site_names"', ',', '"average_lat"', ',', '"average_lon"', ',', '"average_age"', ',', '"average_age_sigma"', ',', '"average_age_unit"', ']', 'DirCols', '=', '[', '"Site"', ',', "'Comp.'", ',', '"perc TC"', ',', '"Dec."', ',', '"Inc."', ',', '"Nl"', ',', '"Np"', ',', '"k "', ',', '"R"', ',', '"a95"', ',', '"PLat"', ',', '"PLong"', ']', 'DirKeys', '=', '[', '"er_site_names"', ',', '"pole_comp_name"', ',', '"tilt_correction"', ',', '"average_dec"', ',', '"average_inc"', ',', '"average_n_lines"', ',', '"average_n_planes"', ',', '"average_k"', ',', '"average_r"', ',', '"average_alpha95"', ',', '"vgp_lat"', ',', '"vgp_lon"', ']', 'IntCols', '=', '[', '"Site"', ',', '"N"', ',', '"B (uT)"', ',', '"sigma"', ',', '"sigma perc"', ',', '"VADM"', ',', '"VADM sigma"', ']', 'IntKeys', '=', '[', '"er_site_names"', ',', '"average_int_n"', ',', '"average_int"', ',', '"average_int_sigma"', ',', "'average_int_sigma_perc'", ',', '"vadm"', ',', '"vadm_sigma"', ']', 'AllowedKeys', '=', '[', "'specimen_frac'", ',', "'specimen_scat'", ',', "'specimen_gap_max'", ',', "'measurement_step_min'", ',', "'measurement_step_max'", ',', "'measurement_step_unit'", ',', "'specimen_polarity'", ',', "'specimen_nrm'", ',', "'specimen_direction_type'", ',', "'specimen_comp_nmb'", ',', "'specimen_mad'", ',', "'specimen_alpha95'", ',', "'specimen_n'", ',', "'specimen_int_sigma'", ',', "'specimen_int_sigma_perc'", ',', "'specimen_int_rel_sigma'", ',', "'specimen_int_rel_sigma_perc'", ',', "'specimen_int_mad'", ',', "'specimen_int_n'", ',', "'specimen_w'", ',', "'specimen_q'", ',', "'specimen_f'", ',', "'specimen_fvds'", ',', "'specimen_b_sigma'", ',', "'specimen_b_beta'", ',', "'specimen_g'", ',', "'specimen_dang'", ',', "'specimen_md'", ',', "'specimen_ptrm'", ',', "'specimen_drat'", ',', "'specimen_drats'", ',', "'specimen_rsc'", ',', "'specimen_viscosity_index'", ',', "'specimen_magn_moment'", ',', "'specimen_magn_volume'", ',', "'specimen_magn_mass'", ',', "'specimen_int_ptrm_n'", ',', "'specimen_delta'", ',', "'specimen_theta'", ',', "'specimen_gamma'", ',', "'sample_polarity'", ',', "'sample_nrm'", ',', "'sample_direction_type'", ',', "'sample_comp_nmb'", ',', "'sample_sigma'", ',', "'sample_alpha95'", ',', "'sample_n'", ',', "'sample_n_lines'", ',', "'sample_n_planes'", ',', "'sample_k'", ',', "'sample_r'", ',', "'sample_tilt_correction'", ',', "'sample_int_sigma'", ',', "'sample_int_sigma_perc'", ',', "'sample_int_rel_sigma'", ',', "'sample_int_rel_sigma_perc'", ',', "'sample_int_n'", ',', "'sample_magn_moment'", ',', "'sample_magn_volume'", ',', "'sample_magn_mass'", ',', "'site_polarity'", ',', "'site_nrm'", ',', "'site_direction_type'", ',', "'site_comp_nmb'", ',', "'site_sigma'", ',', "'site_alpha95'", ',', "'site_n'", ',', "'site_n_lines'", ',', "'site_n_planes'", ',', "'site_k'", ',', "'site_r'", ',', "'site_tilt_correction'", ',', "'site_int_sigma'", ',', "'site_int_sigma_perc'", ',', "'site_int_rel_sigma'", ',', "'site_int_rel_sigma_perc'", ',', "'site_int_n'", ',', "'site_magn_moment'", ',', "'site_magn_volume'", ',', "'site_magn_mass'", ',', "'average_age_min'", ',', "'average_age_max'", ',', "'average_age_sigma'", ',', "'average_age_unit'", ',', "'average_sigma'", ',', "'average_alpha95'", ',', "'average_n'", ',', "'average_nn'", ',', "'average_k'", ',', "'average_r'", ',', "'average_int_sigma'", ',', "'average_int_rel_sigma'", ',', "'average_int_rel_sigma_perc'", ',', "'average_int_n'", ',', "'average_int_nn'", ',', "'vgp_dp'", ',', "'vgp_dm'", ',', "'vgp_sigma'", ',', "'vgp_alpha95'", ',', "'vgp_n'", ',', "'vdm_sigma'", ',', "'vdm_n'", ',', "'vadm_sigma'", ',', "'vadm_n'", ']', 'if', 'crit_file', ':', 'crit', '=', 'Crits', '[', '0', ']', '# get a list of useful keys', 'for', 'key', 'in', 'list', '(', 'crit', '.', 'keys', '(', ')', ')', ':', 'if', 'key', 'not', 'in', 'AllowedKeys', ':', 'del', '(', 'crit', '[', 'key', ']', ')', 'for', 'key', 'in', 'list', '(', 'crit', '.', 'keys', '(', ')', ')', ':', 'if', '(', 'not', 'crit', '[', 'key', ']', ')', 'or', '(', 'eval', '(', 'crit', '[', 'key', ']', ')', '>', '1000', ')', 'or', '(', 'eval', '(', 'crit', '[', 'key', ']', ')', '==', '0', ')', ':', '# get rid of all blank or too big ones or too little ones', 'del', '(', 'crit', '[', 'key', ']', ')', 'CritKeys', '=', 'list', '(', 'crit', '.', 'keys', '(', ')', ')', 'if', 'spec_file', ':', 'Specs', ',', 'file_type', '=', 'pmag', '.', 'magic_read', '(', 'spec_file', ')', 'fsp', '=', 'open', '(', 'Specout', ',', "'w'", ')', '# including specimen intensities if desired', 'SpecCols', '=', '[', '"Site"', ',', '"Specimen"', ',', '"B (uT)"', ',', '"MAD"', ',', '"Beta"', ',', '"N"', ',', '"Q"', ',', '"DANG"', ',', '"f-vds"', ',', '"DRATS"', ',', '"T (C)"', ']', 'SpecKeys', '=', '[', "'er_site_name'", ',', "'er_specimen_name'", ',', "'specimen_int'", ',', "'specimen_int_mad'", ',', "'specimen_b_beta'", ',', "'specimen_int_n'", ',', "'specimen_q'", ',', "'specimen_dang'", ',', "'specimen_fvds'", ',', "'specimen_drats'", ',', "'trange'", ']', 'Xtra', '=', '[', "'specimen_frac'", ',', "'specimen_scat'", ',', "'specimen_gmax'", ']', 'if', 'grade', ':', 'SpecCols', '.', 'append', '(', "'Grade'", ')', 'SpecKeys', '.', 'append', '(', "'specimen_grade'", ')', 'for', 'x', 'in', 'Xtra', ':', '# put in the new intensity keys if present', 'if', 'x', 'in', 'list', '(', 'Specs', '[', '0', ']', '.', 'keys', '(', ')', ')', ':', 'SpecKeys', '.', 'append', '(', 'x', ')', 'newkey', '=', '""', 'for', 'k', 'in', 'x', '.', 'split', '(', "'_'", ')', '[', '1', ':', ']', ':', 'newkey', '=', 'newkey', '+', 'k', '+', "'_'", 'SpecCols', '.', 'append', '(', 'newkey', '.', 'strip', '(', "'_'", ')', ')', 'SpecCols', '.', 'append', '(', "'Corrections'", ')', 'SpecKeys', '.', 'append', '(', "'corrections'", ')', '# these should be multiplied by 1e6', 'Micro', '=', '[', "'specimen_int'", ',', "'average_int'", ',', "'average_int_sigma'", ']', 'Zeta', '=', '[', "'vadm'", ',', "'vadm_sigma'", ']', '# these should be multiplied by 1e21', '# write out the header information for each output file', 'if', 'latex', ':', '# write out the latex header stuff', 'sep', '=', "' & '", 'end', '=', "'\\\\\\\\'", 'f', '.', 'write', '(', "'\\\\documentclass{article}\\n'", ')', 'f', '.', 'write', '(', "'\\\\usepackage[margin=1in]{geometry}\\n'", ')', 'f', '.', 'write', '(', "'\\\\usepackage{longtable}\\n'", ')', 'f', '.', 'write', '(', "'\\\\begin{document}\\n'", ')', 'sf', '.', 'write', '(', "'\\\\documentclass{article}\\n'", ')', 'sf', '.', 'write', '(', "'\\\\usepackage[margin=1in]{geometry}\\n'", ')', 'sf', '.', 'write', '(', "'\\\\usepackage{longtable}\\n'", ')', 'sf', '.', 'write', '(', "'\\\\begin{document}\\n'", ')', 'fI', '.', 'write', '(', "'\\\\documentclass{article}\\n'", ')', 'fI', '.', 'write', '(', "'\\\\usepackage[margin=1in]{geometry}\\n'", ')', 'fI', '.', 'write', '(', "'\\\\usepackage{longtable}\\n'", ')', 'fI', '.', 'write', '(', "'\\\\begin{document}\\n'", ')', 'if', 'crit_file', ':', 'cr', '.', 'write', '(', "'\\\\documentclass{article}\\n'", ')', 'cr', '.', 'write', '(', "'\\\\usepackage[margin=1in]{geometry}\\n'", ')', 'cr', '.', 'write', '(', "'\\\\usepackage{longtable}\\n'", ')', 'cr', '.', 'write', '(', "'\\\\begin{document}\\n'", ')', 'if', 'spec_file', ':', 'fsp', '.', 'write', '(', "'\\\\documentclass{article}\\n'", ')', 'fsp', '.', 'write', '(', "'\\\\usepackage[margin=1in]{geometry}\\n'", ')', 'fsp', '.', 'write', '(', "'\\\\usepackage{longtable}\\n'", ')', 'fsp', '.', 'write', '(', "'\\\\begin{document}\\n'", ')', 'tabstring', '=', "'\\\\begin{longtable}{'", 'fstring', '=', 'tabstring', 'for', 'k', 'in', 'range', '(', 'len', '(', 'SiteCols', ')', ')', ':', 'fstring', '=', 'fstring', '+', "'r'", 'sf', '.', 'write', '(', 'fstring', '+', "'}\\n'", ')', 'sf', '.', 'write', '(', "'\\hline\\n'", ')', 'fstring', '=', 'tabstring', 'for', 'k', 'in', 'range', '(', 'len', '(', 'DirCols', ')', ')', ':', 'fstring', '=', 'fstring', '+', "'r'", 'f', '.', 'write', '(', 'fstring', '+', "'}\\n'", ')', 'f', '.', 'write', '(', "'\\hline\\n'", ')', 'fstring', '=', 'tabstring', 'for', 'k', 'in', 'range', '(', 'len', '(', 'IntCols', ')', ')', ':', 'fstring', '=', 'fstring', '+', "'r'", 'fI', '.', 'write', '(', 'fstring', '+', "'}\\n'", ')', 'fI', '.', 'write', '(', "'\\hline\\n'", ')', 'fstring', '=', 'tabstring', 'if', 'crit_file', ':', 'for', 'k', 'in', 'range', '(', 'len', '(', 'CritKeys', ')', ')', ':', 'fstring', '=', 'fstring', '+', "'r'", 'cr', '.', 'write', '(', 'fstring', '+', "'}\\n'", ')', 'cr', '.', 'write', '(', "'\\hline\\n'", ')', 'if', 'spec_file', ':', 'fstring', '=', 'tabstring', 'for', 'k', 'in', 'range', '(', 'len', '(', 'SpecCols', ')', ')', ':', 'fstring', '=', 'fstring', '+', "'r'", 'fsp', '.', 'write', '(', 'fstring', '+', "'}\\n'", ')', 'fsp', '.', 'write', '(', "'\\hline\\n'", ')', 'else', ':', '# just set the tab and line endings for tab delimited', 'sep', '=', "' \\t '", 'end', '=', "''", '# now write out the actual column headers', 'Soutstring', ',', 'Doutstring', ',', 'Ioutstring', ',', 'Spoutstring', ',', 'Croutstring', '=', '""', ',', '""', ',', '""', ',', '""', ',', '""', 'for', 'k', 'in', 'range', '(', 'len', '(', 'SiteCols', ')', ')', ':', 'Soutstring', '=', 'Soutstring', '+', 'SiteCols', '[', 'k', ']', '+', 'sep', 'Soutstring', '=', 'Soutstring', '.', 'strip', '(', 'sep', ')', 'Soutstring', '=', 'Soutstring', '+', 'end', '+', "'\\n'", 'sf', '.', 'write', '(', 'Soutstring', ')', 'for', 'k', 'in', 'range', '(', 'len', '(', 'DirCols', ')', ')', ':', 'Doutstring', '=', 'Doutstring', '+', 'DirCols', '[', 'k', ']', '+', 'sep', 'Doutstring', '=', 'Doutstring', '.', 'strip', '(', 'sep', ')', 'Doutstring', '=', 'Doutstring', '+', 'end', '+', "'\\n'", 'f', '.', 'write', '(', 'Doutstring', ')', 'for', 'k', 'in', 'range', '(', 'len', '(', 'IntCols', ')', ')', ':', 'Ioutstring', '=', 'Ioutstring', '+', 'IntCols', '[', 'k', ']', '+', 'sep', 'Ioutstring', '=', 'Ioutstring', '.', 'strip', '(', 'sep', ')', 'Ioutstring', '=', 'Ioutstring', '+', 'end', '+', "'\\n'", 'fI', '.', 'write', '(', 'Ioutstring', ')', 'if', 'crit_file', ':', 'for', 'k', 'in', 'range', '(', 'len', '(', 'CritKeys', ')', ')', ':', 'Croutstring', '=', 'Croutstring', '+', 'CritKeys', '[', 'k', ']', '+', 'sep', 'Croutstring', '=', 'Croutstring', '.', 'strip', '(', 'sep', ')', 'Croutstring', '=', 'Croutstring', '+', 'end', '+', "'\\n'", 'cr', '.', 'write', '(', 'Croutstring', ')', 'if', 'spec_file', ':', 'for', 'k', 'in', 'range', '(', 'len', '(', 'SpecCols', ')', ')', ':', 'Spoutstring', '=', 'Spoutstring', '+', 'SpecCols', '[', 'k', ']', '+', 'sep', 'Spoutstring', '=', 'Spoutstring', '.', 'strip', '(', 'sep', ')', 'Spoutstring', '=', 'Spoutstring', '+', 'end', '+', '"\\n"', 'fsp', '.', 'write', '(', 'Spoutstring', ')', 'if', 'latex', ':', '# put in a horizontal line in latex file', 'f', '.', 'write', '(', "'\\hline\\n'", ')', 'sf', '.', 'write', '(', "'\\hline\\n'", ')', 'fI', '.', 'write', '(', "'\\hline\\n'", ')', 'if', 'crit_file', ':', 'cr', '.', 'write', '(', "'\\hline\\n'", ')', 'if', 'spec_file', ':', 'fsp', '.', 'write', '(', "'\\hline\\n'", ')', '# do criteria', 'if', 'crit_file', ':', 'for', 'crit', 'in', 'Crits', ':', 'Croutstring', '=', '""', 'for', 'key', 'in', 'CritKeys', ':', 'Croutstring', '=', 'Croutstring', '+', 'crit', '[', 'key', ']', '+', 'sep', 'Croutstring', '=', 'Croutstring', '.', 'strip', '(', 'sep', ')', '+', 'end', 'cr', '.', 'write', '(', 'Croutstring', '+', "'\\n'", ')', '# do directions', '# get all results with VGPs', 'VGPs', '=', 'pmag', '.', 'get_dictitem', '(', 'Sites', ',', "'vgp_lat'", ',', "''", ',', "'F'", ')', 'VGPs', '=', 'pmag', '.', 'get_dictitem', '(', 'VGPs', ',', "'data_type'", ',', "'i'", ',', "'T'", ')', '# get site level stuff', 'for', 'site', 'in', 'VGPs', ':', 'if', 'len', '(', 'site', '[', "'er_site_names'", ']', '.', 'split', '(', '":"', ')', ')', '==', '1', ':', 'if', "'er_sample_names'", 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', "'er_sample_names'", ']', '=', "''", 'if', "'pole_comp_name'", 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', "'pole_comp_name'", ']', '=', '"A"', 'if', "'average_nn'", 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', 'and', "'average_n'", 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', "'average_nn'", ']', '=', 'site', '[', "'average_n'", ']', 'if', "'average_n_lines'", 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', "'average_n_lines'", ']', '=', 'site', '[', "'average_nn'", ']', 'if', "'average_n_planes'", 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', "'average_n_planes'", ']', '=', '""', 'Soutstring', ',', 'Doutstring', '=', '""', ',', '""', 'for', 'key', 'in', 'SiteKeys', ':', 'if', 'key', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'Soutstring', '=', 'Soutstring', '+', 'site', '[', 'key', ']', '+', 'sep', 'Soutstring', '=', 'Soutstring', '.', 'strip', '(', 'sep', ')', '+', 'end', 'sf', '.', 'write', '(', 'Soutstring', '+', "'\\n'", ')', 'for', 'key', 'in', 'DirKeys', ':', 'if', 'key', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'Doutstring', '=', 'Doutstring', '+', 'site', '[', 'key', ']', '+', 'sep', 'Doutstring', '=', 'Doutstring', '.', 'strip', '(', 'sep', ')', '+', 'end', 'f', '.', 'write', '(', 'Doutstring', '+', "'\\n'", ')', '# now do intensities', 'VADMs', '=', 'pmag', '.', 'get_dictitem', '(', 'Sites', ',', "'vadm'", ',', "''", ',', "'F'", ')', 'VADMs', '=', 'pmag', '.', 'get_dictitem', '(', 'VADMs', ',', "'data_type'", ',', "'i'", ',', "'T'", ')', 'for', 'site', 'in', 'VADMs', ':', '# do results level stuff', 'if', 'site', 'not', 'in', 'VGPs', ':', 'Soutstring', '=', '""', 'for', 'key', 'in', 'SiteKeys', ':', 'if', 'key', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'Soutstring', '=', 'Soutstring', '+', 'site', '[', 'key', ']', '+', 'sep', 'else', ':', 'Soutstring', '=', 'Soutstring', '+', '" "', '+', 'sep', 'Soutstring', '=', 'Soutstring', '.', 'strip', '(', 'sep', ')', '+', 'end', 'sf', '.', 'write', '(', 'Soutstring', '+', "'\\n'", ')', 'if', 'len', '(', 'site', '[', "'er_site_names'", ']', '.', 'split', '(', '":"', ')', ')', '==', '1', 'and', 'site', '[', "'data_type'", ']', '==', "'i'", ':', 'if', "'average_int_sigma_perc'", 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', "'average_int_sigma_perc'", ']', '=', '"0"', 'if', 'site', '[', '"average_int_sigma"', ']', '==', '""', ':', 'site', '[', '"average_int_sigma"', ']', '=', '"0"', 'if', 'site', '[', '"average_int_sigma_perc"', ']', '==', '""', ':', 'site', '[', '"average_int_sigma_perc"', ']', '=', '"0"', 'if', 'site', '[', '"vadm"', ']', '==', '""', ':', 'site', '[', '"vadm"', ']', '=', '"0"', 'if', 'site', '[', '"vadm_sigma"', ']', '==', '""', ':', 'site', '[', '"vadm_sigma"', ']', '=', '"0"', 'for', 'key', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', '# reformat vadms, intensities', 'if', 'key', 'in', 'Micro', ':', 'site', '[', 'key', ']', '=', "'%7.1f'", '%', '(', 'float', '(', 'site', '[', 'key', ']', ')', '*', '1e6', ')', 'if', 'key', 'in', 'Zeta', ':', 'site', '[', 'key', ']', '=', "'%7.1f'", '%', '(', 'float', '(', 'site', '[', 'key', ']', ')', '*', '1e-21', ')', 'outstring', '=', '""', 'for', 'key', 'in', 'IntKeys', ':', 'if', 'key', 'not', 'in', 'list', '(', 'site', '.', 'keys', '(', ')', ')', ':', 'site', '[', 'key', ']', '=', '""', 'outstring', '=', 'outstring', '+', 'site', '[', 'key', ']', '+', 'sep', 'outstring', '=', 'outstring', '.', 'strip', '(', 'sep', ')', '+', 'end', '+', "'\\n'", 'fI', '.', 'write', '(', 'outstring', ')', "# VDMs=pmag.get_dictitem(Sites,'vdm','','F') # get non-blank VDMs", '# for site in VDMs: # do results level stuff', '# if len(site[\'er_site_names\'].split(":"))==1:', '# if \'average_int_sigma_perc\' not in site.keys():site[\'average_int_sigma_perc\']="0"', '# if site["average_int_sigma"]=="":site["average_int_sigma"]="0"', '# if site["average_int_sigma_perc"]=="":site["average_int_sigma_perc"]="0"', '# if site["vadm"]=="":site["vadm"]="0"', '# if site["vadm_sigma"]=="":site["vadm_sigma"]="0"', '# for key in site.keys(): # reformat vadms, intensities', "# if key in Micro: site[key]='%7.1f'%(float(site[key])*1e6)", "# if key in Zeta: site[key]='%7.1f'%(float(site[key])*1e-21)", '# outstring=""', '# for key in IntKeys:', '# outstring=outstring+site[key]+sep', "# fI.write(outstring.strip(sep)+'\\n')", 'if', 'spec_file', ':', 'SpecsInts', '=', 'pmag', '.', 'get_dictitem', '(', 'Specs', ',', "'specimen_int'", ',', "''", ',', "'F'", ')', 'for', 'spec', 'in', 'SpecsInts', ':', 'spec', '[', "'trange'", ']', '=', "'%i'", '%', '(', 'int', '(', 'float', '(', 'spec', '[', "'measurement_step_min'", ']', ')', '-', '273', ')', ')', '+', "'-'", '+', "'%i'", '%', '(', 'int', '(', 'float', '(', 'spec', '[', "'measurement_step_max'", ']', ')', '-', '273', ')', ')', 'meths', '=', 'spec', '[', "'magic_method_codes'", ']', '.', 'split', '(', "':'", ')', 'corrections', '=', "''", 'for', 'meth', 'in', 'meths', ':', 'if', "'DA'", 'in', 'meth', ':', 'corrections', '=', 'corrections', '+', 'meth', '[', '3', ':', ']', '+', "':'", 'corrections', '=', 'corrections', '.', 'strip', '(', "':'", ')', 'if', 'corrections', '.', 'strip', '(', ')', '==', '""', ':', 'corrections', '=', '"None"', 'spec', '[', "'corrections'", ']', '=', 'corrections', 'outstring', '=', '""', 'for', 'key', 'in', 'SpecKeys', ':', 'if', 'key', 'in', 'Micro', ':', 'spec', '[', 'key', ']', '=', "'%7.1f'", '%', '(', 'float', '(', 'spec', '[', 'key', ']', ')', '*', '1e6', ')', 'if', 'key', 'in', 'Zeta', ':', 'spec', '[', 'key', ']', '=', "'%7.1f'", '%', '(', 'float', '(', 'spec', '[', 'key', ']', ')', '*', '1e-21', ')', 'outstring', '=', 'outstring', '+', 'spec', '[', 'key', ']', '+', 'sep', 'fsp', '.', 'write', '(', 'outstring', '.', 'strip', '(', 'sep', ')', '+', 'end', '+', "'\\n'", ')', '#', 'if', 'latex', ':', '# write out the tail stuff', 'f', '.', 'write', '(', "'\\hline\\n'", ')', 'sf', '.', 'write', '(', "'\\hline\\n'", ')', 'fI', '.', 'write', '(', "'\\hline\\n'", ')', 'f', '.', 'write', '(', "'\\end{longtable}\\n'", ')', 'sf', '.', 'write', '(', "'\\end{longtable}\\n'", ')', 'fI', '.', 'write', '(', "'\\end{longtable}\\n'", ')', 'f', '.', 'write', '(', "'\\end{document}\\n'", ')', 'sf', '.', 'write', '(', "'\\end{document}\\n'", ')', 'fI', '.', 'write', '(', "'\\end{document}\\n'", ')', 'if', 'spec_file', ':', 'fsp', '.', 'write', '(', "'\\hline\\n'", ')', 'fsp', '.', 'write', '(', "'\\end{longtable}\\n'", ')', 'fsp', '.', 'write', '(', "'\\end{document}\\n'", ')', 'if', 'crit_file', ':', 'cr', '.', 'write', '(', "'\\hline\\n'", ')', 'cr', '.', 'write', '(', "'\\end{longtable}\\n'", ')', 'cr', '.', 'write', '(', "'\\end{document}\\n'", ')', 'f', '.', 'close', '(', ')', 'sf', '.', 'close', '(', ')', 'fI', '.', 'close', '(', ')', 'print', '(', "'data saved in: '", ',', 'outfile', ',', 'Ioutfile', ',', 'Soutfile', ')', 'outfiles', '=', '[', 'outfile', ',', 'Ioutfile', ',', 'Soutfile', ']', 'if', 'spec_file', ':', 'fsp', '.', 'close', '(', ')', 'print', '(', "'specimen data saved in: '", ',', 'Specout', ')', 'outfiles', '.', 'append', '(', 'Specout', ')', 'if', 'crit_file', ':', 'cr', '.', 'close', '(', ')', 'print', '(', "'Selection criteria saved in: '", ',', 'Critout', ')', 'outfiles', '.', 'append', '(', 'Critout', ')', 'return', 'True', ',', 'outfiles'] | Generate tab delimited output file(s) with result data.
Save output files and return True if successful.
Possible output files: Directions, Intensities, SiteNfo, Criteria,
Specimens
Optional Parameters (defaults are used if not specified)
----------
res_file : name of pmag_results file (default is "pmag_results.txt")
crit_file : name of criteria file (default is "pmag_criteria.txt")
spec_file : name of specimen file (default is "pmag_specimens.txt")
age_file : name of age file (default is "er_ages.txt")
latex : boolean argument to output in LaTeX (default is False)
WD : path to directory that contains input files and takes output (default is current directory, '.') | ['Generate', 'tab', 'delimited', 'output', 'file', '(', 's', ')', 'with', 'result', 'data', '.', 'Save', 'output', 'files', 'and', 'return', 'True', 'if', 'successful', '.', 'Possible', 'output', 'files', ':', 'Directions', 'Intensities', 'SiteNfo', 'Criteria', 'Specimens'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/ipmag.py#L7097-L7456 |
2,476 | wright-group/WrightTools | WrightTools/data/_data.py | Data.source | def source(self):
"""Source."""
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None | python | def source(self):
"""Source."""
if "source" not in self.attrs.keys():
self.attrs["source"] = "None"
value = self.attrs["source"]
return value if not value == "None" else None | ['def', 'source', '(', 'self', ')', ':', 'if', '"source"', 'not', 'in', 'self', '.', 'attrs', '.', 'keys', '(', ')', ':', 'self', '.', 'attrs', '[', '"source"', ']', '=', '"None"', 'value', '=', 'self', '.', 'attrs', '[', '"source"', ']', 'return', 'value', 'if', 'not', 'value', '==', '"None"', 'else', 'None'] | Source. | ['Source', '.'] | train | https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_data.py#L168-L173 |
2,477 | manns/pyspread | pyspread/src/gui/_grid.py | GridEventHandlers.OnRowSize | def OnRowSize(self, event):
"""Row size event handler"""
row = event.GetRowOrCol()
tab = self.grid.current_table
rowsize = self.grid.GetRowSize(row) / self.grid.grid_renderer.zoom
# Detect for resizing group of rows
rows = self.grid.GetSelectedRows()
if len(rows) == 0:
rows = [row, ]
# Detect for selection of rows spanning all columns
selection = self.grid.selection
num_cols = self.grid.code_array.shape[1]-1
for box in zip(selection.block_tl, selection.block_br):
leftmost_col = box[0][1]
rightmost_col = box[1][1]
if leftmost_col == 0 and rightmost_col == num_cols:
rows += range(box[0][0], box[1][0]+1)
# All row resizing is undone in one click
with undo.group(_("Resize Rows")):
for row in rows:
self.grid.code_array.set_row_height(row, tab, rowsize)
zoomed_rowsize = rowsize * self.grid.grid_renderer.zoom
self.grid.SetRowSize(row, zoomed_rowsize)
# Mark content as changed
post_command_event(self.grid.main_window, self.grid.ContentChangedMsg)
event.Skip()
self.grid.ForceRefresh() | python | def OnRowSize(self, event):
"""Row size event handler"""
row = event.GetRowOrCol()
tab = self.grid.current_table
rowsize = self.grid.GetRowSize(row) / self.grid.grid_renderer.zoom
# Detect for resizing group of rows
rows = self.grid.GetSelectedRows()
if len(rows) == 0:
rows = [row, ]
# Detect for selection of rows spanning all columns
selection = self.grid.selection
num_cols = self.grid.code_array.shape[1]-1
for box in zip(selection.block_tl, selection.block_br):
leftmost_col = box[0][1]
rightmost_col = box[1][1]
if leftmost_col == 0 and rightmost_col == num_cols:
rows += range(box[0][0], box[1][0]+1)
# All row resizing is undone in one click
with undo.group(_("Resize Rows")):
for row in rows:
self.grid.code_array.set_row_height(row, tab, rowsize)
zoomed_rowsize = rowsize * self.grid.grid_renderer.zoom
self.grid.SetRowSize(row, zoomed_rowsize)
# Mark content as changed
post_command_event(self.grid.main_window, self.grid.ContentChangedMsg)
event.Skip()
self.grid.ForceRefresh() | ['def', 'OnRowSize', '(', 'self', ',', 'event', ')', ':', 'row', '=', 'event', '.', 'GetRowOrCol', '(', ')', 'tab', '=', 'self', '.', 'grid', '.', 'current_table', 'rowsize', '=', 'self', '.', 'grid', '.', 'GetRowSize', '(', 'row', ')', '/', 'self', '.', 'grid', '.', 'grid_renderer', '.', 'zoom', '# Detect for resizing group of rows', 'rows', '=', 'self', '.', 'grid', '.', 'GetSelectedRows', '(', ')', 'if', 'len', '(', 'rows', ')', '==', '0', ':', 'rows', '=', '[', 'row', ',', ']', '# Detect for selection of rows spanning all columns', 'selection', '=', 'self', '.', 'grid', '.', 'selection', 'num_cols', '=', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', '[', '1', ']', '-', '1', 'for', 'box', 'in', 'zip', '(', 'selection', '.', 'block_tl', ',', 'selection', '.', 'block_br', ')', ':', 'leftmost_col', '=', 'box', '[', '0', ']', '[', '1', ']', 'rightmost_col', '=', 'box', '[', '1', ']', '[', '1', ']', 'if', 'leftmost_col', '==', '0', 'and', 'rightmost_col', '==', 'num_cols', ':', 'rows', '+=', 'range', '(', 'box', '[', '0', ']', '[', '0', ']', ',', 'box', '[', '1', ']', '[', '0', ']', '+', '1', ')', '# All row resizing is undone in one click', 'with', 'undo', '.', 'group', '(', '_', '(', '"Resize Rows"', ')', ')', ':', 'for', 'row', 'in', 'rows', ':', 'self', '.', 'grid', '.', 'code_array', '.', 'set_row_height', '(', 'row', ',', 'tab', ',', 'rowsize', ')', 'zoomed_rowsize', '=', 'rowsize', '*', 'self', '.', 'grid', '.', 'grid_renderer', '.', 'zoom', 'self', '.', 'grid', '.', 'SetRowSize', '(', 'row', ',', 'zoomed_rowsize', ')', '# Mark content as changed', 'post_command_event', '(', 'self', '.', 'grid', '.', 'main_window', ',', 'self', '.', 'grid', '.', 'ContentChangedMsg', ')', 'event', '.', 'Skip', '(', ')', 'self', '.', 'grid', '.', 'ForceRefresh', '(', ')'] | Row size event handler | ['Row', 'size', 'event', 'handler'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1486-L1518 |
2,478 | kpdyer/regex2dfa | third_party/re2/lib/codereview/codereview.py | VersionControlSystem.GetBaseFiles | def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files | python | def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files | ['def', 'GetBaseFiles', '(', 'self', ',', 'diff', ')', ':', 'files', '=', '{', '}', 'for', 'line', 'in', 'diff', '.', 'splitlines', '(', 'True', ')', ':', 'if', 'line', '.', 'startswith', '(', "'Index:'", ')', 'or', 'line', '.', 'startswith', '(', "'Property changes on:'", ')', ':', 'unused', ',', 'filename', '=', 'line', '.', 'split', '(', "':'", ',', '1', ')', "# On Windows if a file has property changes its filename uses '\\'", "# instead of '/'.", 'filename', '=', 'to_slash', '(', 'filename', '.', 'strip', '(', ')', ')', 'files', '[', 'filename', ']', '=', 'self', '.', 'GetBaseFile', '(', 'filename', ')', 'return', 'files'] | Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:". | ['Helper', 'that', 'calls', 'GetBase', 'file', 'for', 'each', 'file', 'in', 'the', 'patch', '.'] | train | https://github.com/kpdyer/regex2dfa/blob/109f877e60ef0dfcb430f11516d215930b7b9936/third_party/re2/lib/codereview/codereview.py#L3241-L3257 |
2,479 | SBRG/ssbio | ssbio/protein/structure/properties/residues.py | hse_output | def hse_output(pdb_file, file_type):
"""
The solvent exposure of an amino acid residue is important for analyzing,
understanding and predicting aspects of protein structure and function [73].
A residue's solvent exposure can be classified as four categories: exposed, partly exposed,
buried and deeply buried residues. Hamelryck et al. [73] established a new 2D measure that provides a
different view of solvent exposure, i.e. half-sphere exposure (HSE). By conceptually dividing the sphere
of a residue into two halves- HSE-up and HSE-down, HSE provides a more detailed description of an amino
acid residue's spatial neighborhood. HSE is calculated by the hsexpo module implemented in the BioPython
package [74] from a PDB file.
http://onlinelibrary.wiley.com/doi/10.1002/prot.20379/abstract
Args:
pdb_file:
Returns:
"""
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
# Calculate HSEalpha
exp_ca = HSExposureCA(model)
# Calculate HSEbeta
exp_cb = HSExposureCB(model)
# Calculate classical coordination number
exp_fs = ExposureCN(model)
return | python | def hse_output(pdb_file, file_type):
"""
The solvent exposure of an amino acid residue is important for analyzing,
understanding and predicting aspects of protein structure and function [73].
A residue's solvent exposure can be classified as four categories: exposed, partly exposed,
buried and deeply buried residues. Hamelryck et al. [73] established a new 2D measure that provides a
different view of solvent exposure, i.e. half-sphere exposure (HSE). By conceptually dividing the sphere
of a residue into two halves- HSE-up and HSE-down, HSE provides a more detailed description of an amino
acid residue's spatial neighborhood. HSE is calculated by the hsexpo module implemented in the BioPython
package [74] from a PDB file.
http://onlinelibrary.wiley.com/doi/10.1002/prot.20379/abstract
Args:
pdb_file:
Returns:
"""
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
# Calculate HSEalpha
exp_ca = HSExposureCA(model)
# Calculate HSEbeta
exp_cb = HSExposureCB(model)
# Calculate classical coordination number
exp_fs = ExposureCN(model)
return | ['def', 'hse_output', '(', 'pdb_file', ',', 'file_type', ')', ':', '# Get the first model', 'my_structure', '=', 'StructureIO', '(', 'pdb_file', ')', 'model', '=', 'my_structure', '.', 'first_model', '# Calculate HSEalpha', 'exp_ca', '=', 'HSExposureCA', '(', 'model', ')', '# Calculate HSEbeta', 'exp_cb', '=', 'HSExposureCB', '(', 'model', ')', '# Calculate classical coordination number', 'exp_fs', '=', 'ExposureCN', '(', 'model', ')', 'return'] | The solvent exposure of an amino acid residue is important for analyzing,
understanding and predicting aspects of protein structure and function [73].
A residue's solvent exposure can be classified as four categories: exposed, partly exposed,
buried and deeply buried residues. Hamelryck et al. [73] established a new 2D measure that provides a
different view of solvent exposure, i.e. half-sphere exposure (HSE). By conceptually dividing the sphere
of a residue into two halves- HSE-up and HSE-down, HSE provides a more detailed description of an amino
acid residue's spatial neighborhood. HSE is calculated by the hsexpo module implemented in the BioPython
package [74] from a PDB file.
http://onlinelibrary.wiley.com/doi/10.1002/prot.20379/abstract
Args:
pdb_file:
Returns: | ['The', 'solvent', 'exposure', 'of', 'an', 'amino', 'acid', 'residue', 'is', 'important', 'for', 'analyzing', 'understanding', 'and', 'predicting', 'aspects', 'of', 'protein', 'structure', 'and', 'function', '[', '73', ']', '.', 'A', 'residue', 's', 'solvent', 'exposure', 'can', 'be', 'classified', 'as', 'four', 'categories', ':', 'exposed', 'partly', 'exposed', 'buried', 'and', 'deeply', 'buried', 'residues', '.', 'Hamelryck', 'et', 'al', '.', '[', '73', ']', 'established', 'a', 'new', '2D', 'measure', 'that', 'provides', 'a', 'different', 'view', 'of', 'solvent', 'exposure', 'i', '.', 'e', '.', 'half', '-', 'sphere', 'exposure', '(', 'HSE', ')', '.', 'By', 'conceptually', 'dividing', 'the', 'sphere', 'of', 'a', 'residue', 'into', 'two', 'halves', '-', 'HSE', '-', 'up', 'and', 'HSE', '-', 'down', 'HSE', 'provides', 'a', 'more', 'detailed', 'description', 'of', 'an', 'amino', 'acid', 'residue', 's', 'spatial', 'neighborhood', '.', 'HSE', 'is', 'calculated', 'by', 'the', 'hsexpo', 'module', 'implemented', 'in', 'the', 'BioPython', 'package', '[', '74', ']', 'from', 'a', 'PDB', 'file', '.'] | train | https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/properties/residues.py#L391-L421 |
2,480 | creare-com/pydem | pydem/dem_processing.py | TileEdge.fix_shapes | def fix_shapes(self):
"""
Fixes the shape of the data fields on edges. Left edges should be
column vectors, and top edges should be row vectors, for example.
"""
for i in xrange(self.n_chunks):
for side in ['left', 'right', 'top', 'bottom']:
edge = getattr(self, side).ravel()[i]
if side in ['left', 'right']:
shp = [edge.todo.size, 1]
else:
shp = [1, edge.todo.size]
edge.done = edge.done.reshape(shp)
edge.data = edge.data.reshape(shp)
edge.todo = edge.todo.reshape(shp) | python | def fix_shapes(self):
"""
Fixes the shape of the data fields on edges. Left edges should be
column vectors, and top edges should be row vectors, for example.
"""
for i in xrange(self.n_chunks):
for side in ['left', 'right', 'top', 'bottom']:
edge = getattr(self, side).ravel()[i]
if side in ['left', 'right']:
shp = [edge.todo.size, 1]
else:
shp = [1, edge.todo.size]
edge.done = edge.done.reshape(shp)
edge.data = edge.data.reshape(shp)
edge.todo = edge.todo.reshape(shp) | ['def', 'fix_shapes', '(', 'self', ')', ':', 'for', 'i', 'in', 'xrange', '(', 'self', '.', 'n_chunks', ')', ':', 'for', 'side', 'in', '[', "'left'", ',', "'right'", ',', "'top'", ',', "'bottom'", ']', ':', 'edge', '=', 'getattr', '(', 'self', ',', 'side', ')', '.', 'ravel', '(', ')', '[', 'i', ']', 'if', 'side', 'in', '[', "'left'", ',', "'right'", ']', ':', 'shp', '=', '[', 'edge', '.', 'todo', '.', 'size', ',', '1', ']', 'else', ':', 'shp', '=', '[', '1', ',', 'edge', '.', 'todo', '.', 'size', ']', 'edge', '.', 'done', '=', 'edge', '.', 'done', '.', 'reshape', '(', 'shp', ')', 'edge', '.', 'data', '=', 'edge', '.', 'data', '.', 'reshape', '(', 'shp', ')', 'edge', '.', 'todo', '=', 'edge', '.', 'todo', '.', 'reshape', '(', 'shp', ')'] | Fixes the shape of the data fields on edges. Left edges should be
column vectors, and top edges should be row vectors, for example. | ['Fixes', 'the', 'shape', 'of', 'the', 'data', 'fields', 'on', 'edges', '.', 'Left', 'edges', 'should', 'be', 'column', 'vectors', 'and', 'top', 'edges', 'should', 'be', 'row', 'vectors', 'for', 'example', '.'] | train | https://github.com/creare-com/pydem/blob/c2fc8d84cfb411df84f71a6dec9edc4b544f710a/pydem/dem_processing.py#L400-L414 |
2,481 | lingthio/Flask-User | flask_user/email_manager.py | EmailManager.send_registered_email | def send_registered_email(self, user, user_email, request_email_confirmation):
"""Send the 'user has registered' notification email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL: return
if not self.user_manager.USER_SEND_REGISTERED_EMAIL: return
# The registered email is sent to a specific user_email.email or user.email
email = user_email.email if user_email else user.email
# Add a request to confirm email if needed
if request_email_confirmation:
# Generate a confirm_email_link
token = self.user_manager.generate_token(user_email.id if user_email else user.id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
else:
confirm_email_link = None
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(
email,
user,
self.user_manager.USER_REGISTERED_EMAIL_TEMPLATE,
confirm_email_link=confirm_email_link,
) | python | def send_registered_email(self, user, user_email, request_email_confirmation):
"""Send the 'user has registered' notification email."""
# Verify config settings
if not self.user_manager.USER_ENABLE_EMAIL: return
if not self.user_manager.USER_SEND_REGISTERED_EMAIL: return
# The registered email is sent to a specific user_email.email or user.email
email = user_email.email if user_email else user.email
# Add a request to confirm email if needed
if request_email_confirmation:
# Generate a confirm_email_link
token = self.user_manager.generate_token(user_email.id if user_email else user.id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
else:
confirm_email_link = None
# Render email from templates and send it via the configured EmailAdapter
self._render_and_send_email(
email,
user,
self.user_manager.USER_REGISTERED_EMAIL_TEMPLATE,
confirm_email_link=confirm_email_link,
) | ['def', 'send_registered_email', '(', 'self', ',', 'user', ',', 'user_email', ',', 'request_email_confirmation', ')', ':', '# Verify config settings', 'if', 'not', 'self', '.', 'user_manager', '.', 'USER_ENABLE_EMAIL', ':', 'return', 'if', 'not', 'self', '.', 'user_manager', '.', 'USER_SEND_REGISTERED_EMAIL', ':', 'return', '# The registered email is sent to a specific user_email.email or user.email', 'email', '=', 'user_email', '.', 'email', 'if', 'user_email', 'else', 'user', '.', 'email', '# Add a request to confirm email if needed', 'if', 'request_email_confirmation', ':', '# Generate a confirm_email_link', 'token', '=', 'self', '.', 'user_manager', '.', 'generate_token', '(', 'user_email', '.', 'id', 'if', 'user_email', 'else', 'user', '.', 'id', ')', 'confirm_email_link', '=', 'url_for', '(', "'user.confirm_email'", ',', 'token', '=', 'token', ',', '_external', '=', 'True', ')', 'else', ':', 'confirm_email_link', '=', 'None', '# Render email from templates and send it via the configured EmailAdapter', 'self', '.', '_render_and_send_email', '(', 'email', ',', 'user', ',', 'self', '.', 'user_manager', '.', 'USER_REGISTERED_EMAIL_TEMPLATE', ',', 'confirm_email_link', '=', 'confirm_email_link', ',', ')'] | Send the 'user has registered' notification email. | ['Send', 'the', 'user', 'has', 'registered', 'notification', 'email', '.'] | train | https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/flask_user/email_manager.py#L130-L154 |
2,482 | ThreatConnect-Inc/tcex | tcex/tcex_ti/tcex_ti_tc_request.py | TiTcRequest._iterate | def _iterate(self, url, params, api_entity):
"""
Args:
url:
params:
api_entity:
Return:
"""
params['resultLimit'] = self.result_limit
should_iterate = True
result_start = 0
while should_iterate:
# params['resultOffset'] = result_offset
params['resultStart'] = result_start
r = self.tcex.session.get(url, params=params)
if not self.success(r):
err = r.text or r.reason
self.tcex.handle_error(950, [r.status_code, err, r.url])
data = r.json().get('data').get(api_entity)
if len(data) < self.result_limit:
should_iterate = False
result_start += self.result_limit
for result in data:
yield result | python | def _iterate(self, url, params, api_entity):
"""
Args:
url:
params:
api_entity:
Return:
"""
params['resultLimit'] = self.result_limit
should_iterate = True
result_start = 0
while should_iterate:
# params['resultOffset'] = result_offset
params['resultStart'] = result_start
r = self.tcex.session.get(url, params=params)
if not self.success(r):
err = r.text or r.reason
self.tcex.handle_error(950, [r.status_code, err, r.url])
data = r.json().get('data').get(api_entity)
if len(data) < self.result_limit:
should_iterate = False
result_start += self.result_limit
for result in data:
yield result | ['def', '_iterate', '(', 'self', ',', 'url', ',', 'params', ',', 'api_entity', ')', ':', 'params', '[', "'resultLimit'", ']', '=', 'self', '.', 'result_limit', 'should_iterate', '=', 'True', 'result_start', '=', '0', 'while', 'should_iterate', ':', "# params['resultOffset'] = result_offset", 'params', '[', "'resultStart'", ']', '=', 'result_start', 'r', '=', 'self', '.', 'tcex', '.', 'session', '.', 'get', '(', 'url', ',', 'params', '=', 'params', ')', 'if', 'not', 'self', '.', 'success', '(', 'r', ')', ':', 'err', '=', 'r', '.', 'text', 'or', 'r', '.', 'reason', 'self', '.', 'tcex', '.', 'handle_error', '(', '950', ',', '[', 'r', '.', 'status_code', ',', 'err', ',', 'r', '.', 'url', ']', ')', 'data', '=', 'r', '.', 'json', '(', ')', '.', 'get', '(', "'data'", ')', '.', 'get', '(', 'api_entity', ')', 'if', 'len', '(', 'data', ')', '<', 'self', '.', 'result_limit', ':', 'should_iterate', '=', 'False', 'result_start', '+=', 'self', '.', 'result_limit', 'for', 'result', 'in', 'data', ':', 'yield', 'result'] | Args:
url:
params:
api_entity:
Return: | ['Args', ':', 'url', ':', 'params', ':', 'api_entity', ':'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti_tc_request.py#L149-L176 |
2,483 | joferkington/mpldatacursor | mpldatacursor/datacursor.py | DataCursor._show_annotation_box | def _show_annotation_box(self, event):
"""Update an existing box or create an annotation box for an event."""
ax = event.artist.axes
# Get the pre-created annotation box for the axes or create a new one.
if self.display != 'multiple':
annotation = self.annotations[ax]
elif event.mouseevent in self.annotations:
# Avoid creating multiple datacursors for the same click event
# when several artists are selected.
annotation = self.annotations[event.mouseevent]
else:
annotation = self.annotate(ax, **self._annotation_kwargs)
self.annotations[event.mouseevent] = annotation
if self.display == 'single':
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
self.update(event, annotation) | python | def _show_annotation_box(self, event):
"""Update an existing box or create an annotation box for an event."""
ax = event.artist.axes
# Get the pre-created annotation box for the axes or create a new one.
if self.display != 'multiple':
annotation = self.annotations[ax]
elif event.mouseevent in self.annotations:
# Avoid creating multiple datacursors for the same click event
# when several artists are selected.
annotation = self.annotations[event.mouseevent]
else:
annotation = self.annotate(ax, **self._annotation_kwargs)
self.annotations[event.mouseevent] = annotation
if self.display == 'single':
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
self.update(event, annotation) | ['def', '_show_annotation_box', '(', 'self', ',', 'event', ')', ':', 'ax', '=', 'event', '.', 'artist', '.', 'axes', '# Get the pre-created annotation box for the axes or create a new one.', 'if', 'self', '.', 'display', '!=', "'multiple'", ':', 'annotation', '=', 'self', '.', 'annotations', '[', 'ax', ']', 'elif', 'event', '.', 'mouseevent', 'in', 'self', '.', 'annotations', ':', '# Avoid creating multiple datacursors for the same click event', '# when several artists are selected.', 'annotation', '=', 'self', '.', 'annotations', '[', 'event', '.', 'mouseevent', ']', 'else', ':', 'annotation', '=', 'self', '.', 'annotate', '(', 'ax', ',', '*', '*', 'self', '.', '_annotation_kwargs', ')', 'self', '.', 'annotations', '[', 'event', '.', 'mouseevent', ']', '=', 'annotation', 'if', 'self', '.', 'display', '==', "'single'", ':', '# Hide any other annotation boxes...', 'for', 'ann', 'in', 'self', '.', 'annotations', '.', 'values', '(', ')', ':', 'ann', '.', 'set_visible', '(', 'False', ')', 'self', '.', 'update', '(', 'event', ',', 'annotation', ')'] | Update an existing box or create an annotation box for an event. | ['Update', 'an', 'existing', 'box', 'or', 'create', 'an', 'annotation', 'box', 'for', 'an', 'event', '.'] | train | https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/datacursor.py#L256-L275 |
2,484 | hendrix/hendrix | hendrix/contrib/concurrency/messaging.py | send_json_message | def send_json_message(address, message, **kwargs):
"""
a shortcut for message sending
"""
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data) | python | def send_json_message(address, message, **kwargs):
"""
a shortcut for message sending
"""
data = {
'message': message,
}
if not kwargs.get('subject_id'):
data['subject_id'] = address
data.update(kwargs)
hxdispatcher.send(address, data) | ['def', 'send_json_message', '(', 'address', ',', 'message', ',', '*', '*', 'kwargs', ')', ':', 'data', '=', '{', "'message'", ':', 'message', ',', '}', 'if', 'not', 'kwargs', '.', 'get', '(', "'subject_id'", ')', ':', 'data', '[', "'subject_id'", ']', '=', 'address', 'data', '.', 'update', '(', 'kwargs', ')', 'hxdispatcher', '.', 'send', '(', 'address', ',', 'data', ')'] | a shortcut for message sending | ['a', 'shortcut', 'for', 'message', 'sending'] | train | https://github.com/hendrix/hendrix/blob/175af011a7e5822b772bfec0e11a46466bb8688d/hendrix/contrib/concurrency/messaging.py#L125-L139 |
2,485 | apache/spark | python/pyspark/sql/streaming.py | DataStreamReader.orc | def orc(self, path):
"""Loads a ORC file stream, returning the result as a :class:`DataFrame`.
.. note:: Evolving.
>>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp())
>>> orc_sdf.isStreaming
True
>>> orc_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.orc(path))
else:
raise TypeError("path can be only a single string") | python | def orc(self, path):
"""Loads a ORC file stream, returning the result as a :class:`DataFrame`.
.. note:: Evolving.
>>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp())
>>> orc_sdf.isStreaming
True
>>> orc_sdf.schema == sdf_schema
True
"""
if isinstance(path, basestring):
return self._df(self._jreader.orc(path))
else:
raise TypeError("path can be only a single string") | ['def', 'orc', '(', 'self', ',', 'path', ')', ':', 'if', 'isinstance', '(', 'path', ',', 'basestring', ')', ':', 'return', 'self', '.', '_df', '(', 'self', '.', '_jreader', '.', 'orc', '(', 'path', ')', ')', 'else', ':', 'raise', 'TypeError', '(', '"path can be only a single string"', ')'] | Loads a ORC file stream, returning the result as a :class:`DataFrame`.
.. note:: Evolving.
>>> orc_sdf = spark.readStream.schema(sdf_schema).orc(tempfile.mkdtemp())
>>> orc_sdf.isStreaming
True
>>> orc_sdf.schema == sdf_schema
True | ['Loads', 'a', 'ORC', 'file', 'stream', 'returning', 'the', 'result', 'as', 'a', ':', 'class', ':', 'DataFrame', '.'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/streaming.py#L506-L520 |
2,486 | HazyResearch/fonduer | src/fonduer/parser/spacy_parser.py | Spacy.model_installed | def model_installed(name):
"""Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return:
"""
data_path = util.get_data_path()
if not data_path or not data_path.exists():
raise IOError(f"Can't find spaCy data path: {data_path}")
if name in {d.name for d in data_path.iterdir()}:
return True
if Spacy.is_package(name): # installed as package
return True
if Path(name).exists(): # path to model data directory
return True
return False | python | def model_installed(name):
"""Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return:
"""
data_path = util.get_data_path()
if not data_path or not data_path.exists():
raise IOError(f"Can't find spaCy data path: {data_path}")
if name in {d.name for d in data_path.iterdir()}:
return True
if Spacy.is_package(name): # installed as package
return True
if Path(name).exists(): # path to model data directory
return True
return False | ['def', 'model_installed', '(', 'name', ')', ':', 'data_path', '=', 'util', '.', 'get_data_path', '(', ')', 'if', 'not', 'data_path', 'or', 'not', 'data_path', '.', 'exists', '(', ')', ':', 'raise', 'IOError', '(', 'f"Can\'t find spaCy data path: {data_path}"', ')', 'if', 'name', 'in', '{', 'd', '.', 'name', 'for', 'd', 'in', 'data_path', '.', 'iterdir', '(', ')', '}', ':', 'return', 'True', 'if', 'Spacy', '.', 'is_package', '(', 'name', ')', ':', '# installed as package', 'return', 'True', 'if', 'Path', '(', 'name', ')', '.', 'exists', '(', ')', ':', '# path to model data directory', 'return', 'True', 'return', 'False'] | Check if spaCy language model is installed.
From https://github.com/explosion/spaCy/blob/master/spacy/util.py
:param name:
:return: | ['Check', 'if', 'spaCy', 'language', 'model', 'is', 'installed', '.'] | train | https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/parser/spacy_parser.py#L87-L104 |
2,487 | jbloomlab/phydms | phydmslib/models.py | ExpCM_fitprefs._update_dPrxy | def _update_dPrxy(self):
"""Update `dPrxy`."""
super(ExpCM_fitprefs, self)._update_dPrxy()
if 'zeta' in self.freeparams:
tildeFrxyQxy = self.tildeFrxy * self.Qxy
j = 0
zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
for r in range(self.nsites):
for i in range(N_AA - 1):
zetari = self.zeta[j]
zetaxterm.fill(0)
zetayterm.fill(0)
zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari
zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0)
zetayterm[r][self._aa_for_y > i] = 1.0 / zetari
zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0)
self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm)
_fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices)
j += 1 | python | def _update_dPrxy(self):
"""Update `dPrxy`."""
super(ExpCM_fitprefs, self)._update_dPrxy()
if 'zeta' in self.freeparams:
tildeFrxyQxy = self.tildeFrxy * self.Qxy
j = 0
zetaxterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
zetayterm = scipy.ndarray((self.nsites, N_CODON, N_CODON), dtype='float')
for r in range(self.nsites):
for i in range(N_AA - 1):
zetari = self.zeta[j]
zetaxterm.fill(0)
zetayterm.fill(0)
zetaxterm[r][self._aa_for_x > i] = -1.0 / zetari
zetaxterm[r][self._aa_for_x == i] = -1.0 / (zetari - 1.0)
zetayterm[r][self._aa_for_y > i] = 1.0 / zetari
zetayterm[r][self._aa_for_y == i] = 1.0 / (zetari - 1.0)
self.dPrxy['zeta'][j] = tildeFrxyQxy * (zetayterm + zetaxterm)
_fill_diagonals(self.dPrxy['zeta'][j], self._diag_indices)
j += 1 | ['def', '_update_dPrxy', '(', 'self', ')', ':', 'super', '(', 'ExpCM_fitprefs', ',', 'self', ')', '.', '_update_dPrxy', '(', ')', 'if', "'zeta'", 'in', 'self', '.', 'freeparams', ':', 'tildeFrxyQxy', '=', 'self', '.', 'tildeFrxy', '*', 'self', '.', 'Qxy', 'j', '=', '0', 'zetaxterm', '=', 'scipy', '.', 'ndarray', '(', '(', 'self', '.', 'nsites', ',', 'N_CODON', ',', 'N_CODON', ')', ',', 'dtype', '=', "'float'", ')', 'zetayterm', '=', 'scipy', '.', 'ndarray', '(', '(', 'self', '.', 'nsites', ',', 'N_CODON', ',', 'N_CODON', ')', ',', 'dtype', '=', "'float'", ')', 'for', 'r', 'in', 'range', '(', 'self', '.', 'nsites', ')', ':', 'for', 'i', 'in', 'range', '(', 'N_AA', '-', '1', ')', ':', 'zetari', '=', 'self', '.', 'zeta', '[', 'j', ']', 'zetaxterm', '.', 'fill', '(', '0', ')', 'zetayterm', '.', 'fill', '(', '0', ')', 'zetaxterm', '[', 'r', ']', '[', 'self', '.', '_aa_for_x', '>', 'i', ']', '=', '-', '1.0', '/', 'zetari', 'zetaxterm', '[', 'r', ']', '[', 'self', '.', '_aa_for_x', '==', 'i', ']', '=', '-', '1.0', '/', '(', 'zetari', '-', '1.0', ')', 'zetayterm', '[', 'r', ']', '[', 'self', '.', '_aa_for_y', '>', 'i', ']', '=', '1.0', '/', 'zetari', 'zetayterm', '[', 'r', ']', '[', 'self', '.', '_aa_for_y', '==', 'i', ']', '=', '1.0', '/', '(', 'zetari', '-', '1.0', ')', 'self', '.', 'dPrxy', '[', "'zeta'", ']', '[', 'j', ']', '=', 'tildeFrxyQxy', '*', '(', 'zetayterm', '+', 'zetaxterm', ')', '_fill_diagonals', '(', 'self', '.', 'dPrxy', '[', "'zeta'", ']', '[', 'j', ']', ',', 'self', '.', '_diag_indices', ')', 'j', '+=', '1'] | Update `dPrxy`. | ['Update', 'dPrxy', '.'] | train | https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L1068-L1088 |
2,488 | libtcod/python-tcod | tcod/libtcodpy.py | noise_get_fbm | def noise_get_fbm(
n: tcod.noise.Noise,
f: Sequence[float],
oc: float,
typ: int = NOISE_DEFAULT,
) -> float:
"""Return the fractal Brownian motion sampled from the ``f`` coordinate.
Args:
n (Noise): A Noise instance.
f (Sequence[float]): The point to sample the noise from.
typ (int): The noise algorithm to use.
octaves (float): The level of level. Should be more than 1.
Returns:
float: The sampled noise value.
"""
return float(
lib.TCOD_noise_get_fbm_ex(n.noise_c, ffi.new("float[4]", f), oc, typ)
) | python | def noise_get_fbm(
n: tcod.noise.Noise,
f: Sequence[float],
oc: float,
typ: int = NOISE_DEFAULT,
) -> float:
"""Return the fractal Brownian motion sampled from the ``f`` coordinate.
Args:
n (Noise): A Noise instance.
f (Sequence[float]): The point to sample the noise from.
typ (int): The noise algorithm to use.
octaves (float): The level of level. Should be more than 1.
Returns:
float: The sampled noise value.
"""
return float(
lib.TCOD_noise_get_fbm_ex(n.noise_c, ffi.new("float[4]", f), oc, typ)
) | ['def', 'noise_get_fbm', '(', 'n', ':', 'tcod', '.', 'noise', '.', 'Noise', ',', 'f', ':', 'Sequence', '[', 'float', ']', ',', 'oc', ':', 'float', ',', 'typ', ':', 'int', '=', 'NOISE_DEFAULT', ',', ')', '->', 'float', ':', 'return', 'float', '(', 'lib', '.', 'TCOD_noise_get_fbm_ex', '(', 'n', '.', 'noise_c', ',', 'ffi', '.', 'new', '(', '"float[4]"', ',', 'f', ')', ',', 'oc', ',', 'typ', ')', ')'] | Return the fractal Brownian motion sampled from the ``f`` coordinate.
Args:
n (Noise): A Noise instance.
f (Sequence[float]): The point to sample the noise from.
typ (int): The noise algorithm to use.
octaves (float): The level of level. Should be more than 1.
Returns:
float: The sampled noise value. | ['Return', 'the', 'fractal', 'Brownian', 'motion', 'sampled', 'from', 'the', 'f', 'coordinate', '.'] | train | https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L3409-L3428 |
2,489 | datajoint/datajoint-python | datajoint/utils.py | safe_write | def safe_write(filename, blob):
"""
A two-step write.
:param filename: full path
:param blob: binary data
:return: None
"""
temp_file = filename + '.saving'
with open(temp_file, 'bw') as f:
f.write(blob)
os.rename(temp_file, filename) | python | def safe_write(filename, blob):
"""
A two-step write.
:param filename: full path
:param blob: binary data
:return: None
"""
temp_file = filename + '.saving'
with open(temp_file, 'bw') as f:
f.write(blob)
os.rename(temp_file, filename) | ['def', 'safe_write', '(', 'filename', ',', 'blob', ')', ':', 'temp_file', '=', 'filename', '+', "'.saving'", 'with', 'open', '(', 'temp_file', ',', "'bw'", ')', 'as', 'f', ':', 'f', '.', 'write', '(', 'blob', ')', 'os', '.', 'rename', '(', 'temp_file', ',', 'filename', ')'] | A two-step write.
:param filename: full path
:param blob: binary data
:return: None | ['A', 'two', '-', 'step', 'write', '.', ':', 'param', 'filename', ':', 'full', 'path', ':', 'param', 'blob', ':', 'binary', 'data', ':', 'return', ':', 'None'] | train | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/utils.py#L73-L83 |
2,490 | phareous/insteonlocal | insteonlocal/Dimmer.py | Dimmer.stop_change | def stop_change(self):
"""Stop changing light level manually"""
self.logger.info("Dimmer %s stop_change", self.device_id)
self.hub.direct_command(self.device_id, '18', '00')
success = self.hub.check_success(self.device_id, '18', '00')
if success:
self.logger.info("Dimmer %s stop_change: Light stopped changing successfully",
self.device_id)
self.hub.clear_device_command_cache(self.device_id)
else:
self.logger.error("Dimmer %s stop_change: Light did not stop",
self.device_id)
return success | python | def stop_change(self):
"""Stop changing light level manually"""
self.logger.info("Dimmer %s stop_change", self.device_id)
self.hub.direct_command(self.device_id, '18', '00')
success = self.hub.check_success(self.device_id, '18', '00')
if success:
self.logger.info("Dimmer %s stop_change: Light stopped changing successfully",
self.device_id)
self.hub.clear_device_command_cache(self.device_id)
else:
self.logger.error("Dimmer %s stop_change: Light did not stop",
self.device_id)
return success | ['def', 'stop_change', '(', 'self', ')', ':', 'self', '.', 'logger', '.', 'info', '(', '"Dimmer %s stop_change"', ',', 'self', '.', 'device_id', ')', 'self', '.', 'hub', '.', 'direct_command', '(', 'self', '.', 'device_id', ',', "'18'", ',', "'00'", ')', 'success', '=', 'self', '.', 'hub', '.', 'check_success', '(', 'self', '.', 'device_id', ',', "'18'", ',', "'00'", ')', 'if', 'success', ':', 'self', '.', 'logger', '.', 'info', '(', '"Dimmer %s stop_change: Light stopped changing successfully"', ',', 'self', '.', 'device_id', ')', 'self', '.', 'hub', '.', 'clear_device_command_cache', '(', 'self', '.', 'device_id', ')', 'else', ':', 'self', '.', 'logger', '.', 'error', '(', '"Dimmer %s stop_change: Light did not stop"', ',', 'self', '.', 'device_id', ')', 'return', 'success'] | Stop changing light level manually | ['Stop', 'changing', 'light', 'level', 'manually'] | train | https://github.com/phareous/insteonlocal/blob/a4544a17d143fb285852cb873e862c270d55dd00/insteonlocal/Dimmer.py#L183-L197 |
2,491 | avihad/twistes | twistes/parser.py | EsParser._parse_string_host | def _parse_string_host(host_str):
"""
Parse host string into a dictionary host
:param host_str:
:return:
"""
host_str = EsParser._fix_host_prefix(host_str)
parsed_url = urlparse(host_str)
host = {HostParsing.HOST: parsed_url.hostname}
if parsed_url.port:
host[HostParsing.PORT] = parsed_url.port
if parsed_url.scheme == HostParsing.HTTPS:
host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
host[HostParsing.SCHEME] = HostParsing.HTTPS
elif parsed_url.scheme:
host[HostParsing.SCHEME] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
host[HostParsing.URL_PREFIX] = parsed_url.path
return host | python | def _parse_string_host(host_str):
"""
Parse host string into a dictionary host
:param host_str:
:return:
"""
host_str = EsParser._fix_host_prefix(host_str)
parsed_url = urlparse(host_str)
host = {HostParsing.HOST: parsed_url.hostname}
if parsed_url.port:
host[HostParsing.PORT] = parsed_url.port
if parsed_url.scheme == HostParsing.HTTPS:
host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT
host[HostParsing.USE_SSL] = True
host[HostParsing.SCHEME] = HostParsing.HTTPS
elif parsed_url.scheme:
host[HostParsing.SCHEME] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
host[HostParsing.URL_PREFIX] = parsed_url.path
return host | ['def', '_parse_string_host', '(', 'host_str', ')', ':', 'host_str', '=', 'EsParser', '.', '_fix_host_prefix', '(', 'host_str', ')', 'parsed_url', '=', 'urlparse', '(', 'host_str', ')', 'host', '=', '{', 'HostParsing', '.', 'HOST', ':', 'parsed_url', '.', 'hostname', '}', 'if', 'parsed_url', '.', 'port', ':', 'host', '[', 'HostParsing', '.', 'PORT', ']', '=', 'parsed_url', '.', 'port', 'if', 'parsed_url', '.', 'scheme', '==', 'HostParsing', '.', 'HTTPS', ':', 'host', '[', 'HostParsing', '.', 'PORT', ']', '=', 'parsed_url', '.', 'port', 'or', 'EsParser', '.', 'SSL_DEFAULT_PORT', 'host', '[', 'HostParsing', '.', 'USE_SSL', ']', '=', 'True', 'host', '[', 'HostParsing', '.', 'SCHEME', ']', '=', 'HostParsing', '.', 'HTTPS', 'elif', 'parsed_url', '.', 'scheme', ':', 'host', '[', 'HostParsing', '.', 'SCHEME', ']', '=', 'parsed_url', '.', 'scheme', 'if', 'parsed_url', '.', 'username', 'or', 'parsed_url', '.', 'password', ':', 'host', '[', 'HostParsing', '.', 'HTTP_AUTH', ']', '=', "'%s:%s'", '%', '(', 'parsed_url', '.', 'username', ',', 'parsed_url', '.', 'password', ')', 'if', 'parsed_url', '.', 'path', 'and', 'parsed_url', '.', 'path', '!=', "'/'", ':', 'host', '[', 'HostParsing', '.', 'URL_PREFIX', ']', '=', 'parsed_url', '.', 'path', 'return', 'host'] | Parse host string into a dictionary host
:param host_str:
:return: | ['Parse', 'host', 'string', 'into', 'a', 'dictionary', 'host', ':', 'param', 'host_str', ':', ':', 'return', ':'] | train | https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L70-L91 |
2,492 | karan/TPB | tpb/tpb.py | List.items | def items(self):
"""
Request URL and parse response. Yield a ``Torrent`` for every torrent
on page.
"""
request = get(str(self.url), headers={'User-Agent' : "Magic Browser","origin_req_host" : "thepiratebay.se"})
root = html.fromstring(request.text)
items = [self._build_torrent(row) for row in
self._get_torrent_rows(root)]
for item in items:
yield item | python | def items(self):
"""
Request URL and parse response. Yield a ``Torrent`` for every torrent
on page.
"""
request = get(str(self.url), headers={'User-Agent' : "Magic Browser","origin_req_host" : "thepiratebay.se"})
root = html.fromstring(request.text)
items = [self._build_torrent(row) for row in
self._get_torrent_rows(root)]
for item in items:
yield item | ['def', 'items', '(', 'self', ')', ':', 'request', '=', 'get', '(', 'str', '(', 'self', '.', 'url', ')', ',', 'headers', '=', '{', "'User-Agent'", ':', '"Magic Browser"', ',', '"origin_req_host"', ':', '"thepiratebay.se"', '}', ')', 'root', '=', 'html', '.', 'fromstring', '(', 'request', '.', 'text', ')', 'items', '=', '[', 'self', '.', '_build_torrent', '(', 'row', ')', 'for', 'row', 'in', 'self', '.', '_get_torrent_rows', '(', 'root', ')', ']', 'for', 'item', 'in', 'items', ':', 'yield', 'item'] | Request URL and parse response. Yield a ``Torrent`` for every torrent
on page. | ['Request', 'URL', 'and', 'parse', 'response', '.', 'Yield', 'a', 'Torrent', 'for', 'every', 'torrent', 'on', 'page', '.'] | train | https://github.com/karan/TPB/blob/f424a73a10d4bcf4e363d7e7e8cb915a3a057671/tpb/tpb.py#L54-L64 |
2,493 | KelSolaar/Umbra | umbra/ui/widgets/delayed_QSplashScreen.py | Delayed_QSplashScreen.text_color | def text_color(self, value):
"""
Setter for **self.__text_color** attribute.
:param value: Attribute value.
:type value: int or QColor
"""
if value is not None:
assert type(value) in (Qt.GlobalColor, QColor), \
"'{0}' attribute: '{1}' type is not 'int' or 'QColor'!".format("text_color", value)
self.__text_color = value | python | def text_color(self, value):
"""
Setter for **self.__text_color** attribute.
:param value: Attribute value.
:type value: int or QColor
"""
if value is not None:
assert type(value) in (Qt.GlobalColor, QColor), \
"'{0}' attribute: '{1}' type is not 'int' or 'QColor'!".format("text_color", value)
self.__text_color = value | ['def', 'text_color', '(', 'self', ',', 'value', ')', ':', 'if', 'value', 'is', 'not', 'None', ':', 'assert', 'type', '(', 'value', ')', 'in', '(', 'Qt', '.', 'GlobalColor', ',', 'QColor', ')', ',', '"\'{0}\' attribute: \'{1}\' type is not \'int\' or \'QColor\'!"', '.', 'format', '(', '"text_color"', ',', 'value', ')', 'self', '.', '__text_color', '=', 'value'] | Setter for **self.__text_color** attribute.
:param value: Attribute value.
:type value: int or QColor | ['Setter', 'for', '**', 'self', '.', '__text_color', '**', 'attribute', '.'] | train | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/widgets/delayed_QSplashScreen.py#L122-L133 |
2,494 | mozilla-releng/scriptworker | scriptworker/ed25519.py | ed25519_public_key_to_string | def ed25519_public_key_to_string(key):
"""Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw,
), None).decode('utf-8') | python | def ed25519_public_key_to_string(key):
"""Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str
"""
return base64.b64encode(key.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw,
), None).decode('utf-8') | ['def', 'ed25519_public_key_to_string', '(', 'key', ')', ':', 'return', 'base64', '.', 'b64encode', '(', 'key', '.', 'public_bytes', '(', 'encoding', '=', 'serialization', '.', 'Encoding', '.', 'Raw', ',', 'format', '=', 'serialization', '.', 'PublicFormat', '.', 'Raw', ',', ')', ',', 'None', ')', '.', 'decode', '(', "'utf-8'", ')'] | Convert an ed25519 public key to a base64-encoded string.
Args:
key (Ed25519PublicKey): the key to write to the file.
Returns:
str: the key representation as a str | ['Convert', 'an', 'ed25519', 'public', 'key', 'to', 'a', 'base64', '-', 'encoded', 'string', '.'] | train | https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/ed25519.py#L103-L116 |
2,495 | kensho-technologies/graphql-compiler | graphql_compiler/compiler/blocks.py | QueryRoot.to_gremlin | def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
if len(self.start_class) == 1:
# The official Gremlin documentation claims that this approach
# is generally faster than the one below, since it makes using indexes easier.
# http://gremlindocs.spmallette.documentup.com/#filter/has
start_class = list(self.start_class)[0]
return u'g.V({}, {})'.format('\'@class\'', safe_quoted_string(start_class))
else:
start_classes_list = ','.join(safe_quoted_string(x) for x in self.start_class)
return u'g.V.has(\'@class\', T.in, [{}])'.format(start_classes_list) | python | def to_gremlin(self):
"""Return a unicode object with the Gremlin representation of this block."""
self.validate()
if len(self.start_class) == 1:
# The official Gremlin documentation claims that this approach
# is generally faster than the one below, since it makes using indexes easier.
# http://gremlindocs.spmallette.documentup.com/#filter/has
start_class = list(self.start_class)[0]
return u'g.V({}, {})'.format('\'@class\'', safe_quoted_string(start_class))
else:
start_classes_list = ','.join(safe_quoted_string(x) for x in self.start_class)
return u'g.V.has(\'@class\', T.in, [{}])'.format(start_classes_list) | ['def', 'to_gremlin', '(', 'self', ')', ':', 'self', '.', 'validate', '(', ')', 'if', 'len', '(', 'self', '.', 'start_class', ')', '==', '1', ':', '# The official Gremlin documentation claims that this approach', '# is generally faster than the one below, since it makes using indexes easier.', '# http://gremlindocs.spmallette.documentup.com/#filter/has', 'start_class', '=', 'list', '(', 'self', '.', 'start_class', ')', '[', '0', ']', 'return', "u'g.V({}, {})'", '.', 'format', '(', "'\\'@class\\''", ',', 'safe_quoted_string', '(', 'start_class', ')', ')', 'else', ':', 'start_classes_list', '=', "','", '.', 'join', '(', 'safe_quoted_string', '(', 'x', ')', 'for', 'x', 'in', 'self', '.', 'start_class', ')', 'return', "u'g.V.has(\\'@class\\', T.in, [{}])'", '.', 'format', '(', 'start_classes_list', ')'] | Return a unicode object with the Gremlin representation of this block. | ['Return', 'a', 'unicode', 'object', 'with', 'the', 'Gremlin', 'representation', 'of', 'this', 'block', '.'] | train | https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/blocks.py#L44-L55 |
2,496 | ergoithz/browsepy | browsepy/file.py | check_forbidden_filename | def check_forbidden_filename(filename,
destiny_os=os.name,
restricted_names=restricted_names):
'''
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
'''
return (
filename in restricted_names or
destiny_os == 'nt' and
filename.split('.', 1)[0].upper() in nt_device_names
) | python | def check_forbidden_filename(filename,
destiny_os=os.name,
restricted_names=restricted_names):
'''
Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool
'''
return (
filename in restricted_names or
destiny_os == 'nt' and
filename.split('.', 1)[0].upper() in nt_device_names
) | ['def', 'check_forbidden_filename', '(', 'filename', ',', 'destiny_os', '=', 'os', '.', 'name', ',', 'restricted_names', '=', 'restricted_names', ')', ':', 'return', '(', 'filename', 'in', 'restricted_names', 'or', 'destiny_os', '==', "'nt'", 'and', 'filename', '.', 'split', '(', "'.'", ',', '1', ')', '[', '0', ']', '.', 'upper', '(', ')', 'in', 'nt_device_names', ')'] | Get if given filename is forbidden for current OS or filesystem.
:param filename:
:param destiny_os: destination operative system
:param fs_encoding: destination filesystem filename encoding
:return: wether is forbidden on given OS (or filesystem) or not
:rtype: bool | ['Get', 'if', 'given', 'filename', 'is', 'forbidden', 'for', 'current', 'OS', 'or', 'filesystem', '.'] | train | https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/file.py#L828-L844 |
2,497 | zetaops/zengine | zengine/views/task_manager_actions.py | TaskManagerActionsView.assign_yourself | def assign_yourself(self):
"""
Assigning the workflow to itself.
The selected job is checked to see if there is an assigned role.
If it does not have a role assigned to it, it takes the job to itself
and displays a message that the process is successful.
If there is a role assigned to it, it does not do any operation
and the message is displayed on the screen.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if not wfi.current_actor.exist:
wfi.current_actor = self.current.role
wfi.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"You have successfully assigned the job to yourself.")
else:
title = _(u"Unsuccessful")
msg = _(u"Unfortunately, this job is already taken by someone else.")
self.current.msg_box(title=title, msg=msg) | python | def assign_yourself(self):
"""
Assigning the workflow to itself.
The selected job is checked to see if there is an assigned role.
If it does not have a role assigned to it, it takes the job to itself
and displays a message that the process is successful.
If there is a role assigned to it, it does not do any operation
and the message is displayed on the screen.
.. code-block:: python
# request:
{
'task_inv_key': string,
}
"""
task_invitation = TaskInvitation.objects.get(self.task_invitation_key)
wfi = task_invitation.instance
if not wfi.current_actor.exist:
wfi.current_actor = self.current.role
wfi.save()
[inv.delete() for inv in TaskInvitation.objects.filter(instance=wfi) if
not inv == task_invitation]
title = _(u"Successful")
msg = _(u"You have successfully assigned the job to yourself.")
else:
title = _(u"Unsuccessful")
msg = _(u"Unfortunately, this job is already taken by someone else.")
self.current.msg_box(title=title, msg=msg) | ['def', 'assign_yourself', '(', 'self', ')', ':', 'task_invitation', '=', 'TaskInvitation', '.', 'objects', '.', 'get', '(', 'self', '.', 'task_invitation_key', ')', 'wfi', '=', 'task_invitation', '.', 'instance', 'if', 'not', 'wfi', '.', 'current_actor', '.', 'exist', ':', 'wfi', '.', 'current_actor', '=', 'self', '.', 'current', '.', 'role', 'wfi', '.', 'save', '(', ')', '[', 'inv', '.', 'delete', '(', ')', 'for', 'inv', 'in', 'TaskInvitation', '.', 'objects', '.', 'filter', '(', 'instance', '=', 'wfi', ')', 'if', 'not', 'inv', '==', 'task_invitation', ']', 'title', '=', '_', '(', 'u"Successful"', ')', 'msg', '=', '_', '(', 'u"You have successfully assigned the job to yourself."', ')', 'else', ':', 'title', '=', '_', '(', 'u"Unsuccessful"', ')', 'msg', '=', '_', '(', 'u"Unfortunately, this job is already taken by someone else."', ')', 'self', '.', 'current', '.', 'msg_box', '(', 'title', '=', 'title', ',', 'msg', '=', 'msg', ')'] | Assigning the workflow to itself.
The selected job is checked to see if there is an assigned role.
If it does not have a role assigned to it, it takes the job to itself
and displays a message that the process is successful.
If there is a role assigned to it, it does not do any operation
and the message is displayed on the screen.
.. code-block:: python
# request:
{
'task_inv_key': string,
} | ['Assigning', 'the', 'workflow', 'to', 'itself', '.', 'The', 'selected', 'job', 'is', 'checked', 'to', 'see', 'if', 'there', 'is', 'an', 'assigned', 'role', '.', 'If', 'it', 'does', 'not', 'have', 'a', 'role', 'assigned', 'to', 'it', 'it', 'takes', 'the', 'job', 'to', 'itself', 'and', 'displays', 'a', 'message', 'that', 'the', 'process', 'is', 'successful', '.', 'If', 'there', 'is', 'a', 'role', 'assigned', 'to', 'it', 'it', 'does', 'not', 'do', 'any', 'operation', 'and', 'the', 'message', 'is', 'displayed', 'on', 'the', 'screen', '.'] | train | https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/views/task_manager_actions.py#L29-L60 |
2,498 | tanghaibao/jcvi | jcvi/algorithms/ec.py | eaSimpleConverge | def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, callback=None, verbose=True):
"""This algorithm reproduce the simplest evolutionary algorithm as
presented in chapter 7 of [Back2000]_.
Modified to allow checking if there is no change for ngen, as a simple
rule for convergence. Interface is similar to eaSimple(). However, in
eaSimple, ngen is total number of iterations; in eaSimpleConverge, we
terminate only when the best is NOT updated for ngen iterations.
"""
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
# Begin the generational process
gen = 1
best = (0,)
while True:
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
if callback is not None:
callback(halloffame[0], gen)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
current_best = record['max']
if gen % 20 == 0 and verbose:
print("Current iteration {0}: max_score={1}".
format(gen, current_best), file=sys.stderr)
if current_best > best:
best = current_best
updated = gen
gen += 1
if gen - updated > ngen:
break
return population | python | def eaSimpleConverge(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, callback=None, verbose=True):
"""This algorithm reproduce the simplest evolutionary algorithm as
presented in chapter 7 of [Back2000]_.
Modified to allow checking if there is no change for ngen, as a simple
rule for convergence. Interface is similar to eaSimple(). However, in
eaSimple, ngen is total number of iterations; in eaSimpleConverge, we
terminate only when the best is NOT updated for ngen iterations.
"""
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
# Begin the generational process
gen = 1
best = (0,)
while True:
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
if callback is not None:
callback(halloffame[0], gen)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
current_best = record['max']
if gen % 20 == 0 and verbose:
print("Current iteration {0}: max_score={1}".
format(gen, current_best), file=sys.stderr)
if current_best > best:
best = current_best
updated = gen
gen += 1
if gen - updated > ngen:
break
return population | ['def', 'eaSimpleConverge', '(', 'population', ',', 'toolbox', ',', 'cxpb', ',', 'mutpb', ',', 'ngen', ',', 'stats', '=', 'None', ',', 'halloffame', '=', 'None', ',', 'callback', '=', 'None', ',', 'verbose', '=', 'True', ')', ':', '# Evaluate the individuals with an invalid fitness', 'invalid_ind', '=', '[', 'ind', 'for', 'ind', 'in', 'population', 'if', 'not', 'ind', '.', 'fitness', '.', 'valid', ']', 'fitnesses', '=', 'toolbox', '.', 'map', '(', 'toolbox', '.', 'evaluate', ',', 'invalid_ind', ')', 'for', 'ind', ',', 'fit', 'in', 'zip', '(', 'invalid_ind', ',', 'fitnesses', ')', ':', 'ind', '.', 'fitness', '.', 'values', '=', 'fit', 'if', 'halloffame', 'is', 'not', 'None', ':', 'halloffame', '.', 'update', '(', 'population', ')', 'record', '=', 'stats', '.', 'compile', '(', 'population', ')', 'if', 'stats', 'else', '{', '}', '# Begin the generational process', 'gen', '=', '1', 'best', '=', '(', '0', ',', ')', 'while', 'True', ':', '# Select the next generation individuals', 'offspring', '=', 'toolbox', '.', 'select', '(', 'population', ',', 'len', '(', 'population', ')', ')', '# Vary the pool of individuals', 'offspring', '=', 'varAnd', '(', 'offspring', ',', 'toolbox', ',', 'cxpb', ',', 'mutpb', ')', '# Evaluate the individuals with an invalid fitness', 'invalid_ind', '=', '[', 'ind', 'for', 'ind', 'in', 'offspring', 'if', 'not', 'ind', '.', 'fitness', '.', 'valid', ']', 'fitnesses', '=', 'toolbox', '.', 'map', '(', 'toolbox', '.', 'evaluate', ',', 'invalid_ind', ')', 'for', 'ind', ',', 'fit', 'in', 'zip', '(', 'invalid_ind', ',', 'fitnesses', ')', ':', 'ind', '.', 'fitness', '.', 'values', '=', 'fit', '# Update the hall of fame with the generated individuals', 'if', 'halloffame', 'is', 'not', 'None', ':', 'halloffame', '.', 'update', '(', 'offspring', ')', 'if', 'callback', 'is', 'not', 'None', ':', 'callback', '(', 'halloffame', '[', '0', ']', ',', 'gen', ')', '# Replace the current population by the offspring', 'population', '[', ':', ']', '=', 'offspring', '# Append the current generation statistics to the logbook', 'record', '=', 'stats', '.', 'compile', '(', 'population', ')', 'if', 'stats', 'else', '{', '}', 'current_best', '=', 'record', '[', "'max'", ']', 'if', 'gen', '%', '20', '==', '0', 'and', 'verbose', ':', 'print', '(', '"Current iteration {0}: max_score={1}"', '.', 'format', '(', 'gen', ',', 'current_best', ')', ',', 'file', '=', 'sys', '.', 'stderr', ')', 'if', 'current_best', '>', 'best', ':', 'best', '=', 'current_best', 'updated', '=', 'gen', 'gen', '+=', '1', 'if', 'gen', '-', 'updated', '>', 'ngen', ':', 'break', 'return', 'population'] | This algorithm reproduce the simplest evolutionary algorithm as
presented in chapter 7 of [Back2000]_.
Modified to allow checking if there is no change for ngen, as a simple
rule for convergence. Interface is similar to eaSimple(). However, in
eaSimple, ngen is total number of iterations; in eaSimpleConverge, we
terminate only when the best is NOT updated for ngen iterations. | ['This', 'algorithm', 'reproduce', 'the', 'simplest', 'evolutionary', 'algorithm', 'as', 'presented', 'in', 'chapter', '7', 'of', '[', 'Back2000', ']', '_', '.'] | train | https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/ec.py#L99-L161 |
2,499 | ThreatConnect-Inc/tcex | tcex/tcex_ti/tcex_ti.py | TcExTi.signature | def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs):
"""
Create the Signature TI object.
Args:
owner:
file_content:
file_name:
file_type:
name:
**kwargs:
Return:
"""
return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs) | python | def signature(self, name, file_name, file_type, file_content, owner=None, **kwargs):
"""
Create the Signature TI object.
Args:
owner:
file_content:
file_name:
file_type:
name:
**kwargs:
Return:
"""
return Signature(self.tcex, name, file_name, file_type, file_content, owner=owner, **kwargs) | ['def', 'signature', '(', 'self', ',', 'name', ',', 'file_name', ',', 'file_type', ',', 'file_content', ',', 'owner', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'return', 'Signature', '(', 'self', '.', 'tcex', ',', 'name', ',', 'file_name', ',', 'file_type', ',', 'file_content', ',', 'owner', '=', 'owner', ',', '*', '*', 'kwargs', ')'] | Create the Signature TI object.
Args:
owner:
file_content:
file_name:
file_type:
name:
**kwargs:
Return: | ['Create', 'the', 'Signature', 'TI', 'object', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti.py#L356-L371 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.