body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def kill_workers(*args):
'Syntax: [storm kill_workers]\n\n Kill the workers running on this supervisor. This command should be run\n on a supervisor node. If the cluster is running in secure mode, then user needs\n to have admin rights on the node to be able to successfully kill all workers.\n '
exec_storm_class('org.apache.storm.command.KillWorkers', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')]) | 180,778,641,335,337,180 | Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers. | bin/storm.py | kill_workers | JamiesZhang/Storm | python | def kill_workers(*args):
'Syntax: [storm kill_workers]\n\n Kill the workers running on this supervisor. This command should be run\n on a supervisor node. If the cluster is running in secure mode, then user needs\n to have admin rights on the node to be able to successfully kill all workers.\n '
exec_storm_class('org.apache.storm.command.KillWorkers', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')]) |
def admin(*args):
"Syntax: [storm admin cmd [options]]\n\n The storm admin command provides access to several operations that can help\n an administrator debug or fix a cluster.\n\n remove_corrupt_topologies - This command should be run on a nimbus node as\n the same user nimbus runs as. It will go directly to zookeeper + blobstore\n and find topologies that appear to be corrupted because of missing blobs.\n It will kill those topologies.\n\n zk_cli [options] - This command will launch a zookeeper cli pointing to the\n storm zookeeper instance logged in as the nimbus user. It should be run on\n a nimbus server as the user nimbus runs as.\n -s --server <connection string>: Set the connection string to use,\n defaults to storm connection string.\n -t --time-out <timeout>: Set the timeout to use, defaults to storm\n zookeeper timeout.\n -w --write: Allow for writes, defaults to read only, we don't want to\n cause problems.\n -n --no-root: Don't include the storm root on the default connection string.\n -j --jaas <jaas_file>: Include a jaas file that should be used when\n authenticating with ZK defaults to the\n java.security.auth.login.config conf.\n\n creds topology_id - Print the credential keys for a topology.\n "
exec_storm_class('org.apache.storm.command.AdminCommands', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')]) | 3,164,070,418,118,184,000 | Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology. | bin/storm.py | admin | JamiesZhang/Storm | python | def admin(*args):
"Syntax: [storm admin cmd [options]]\n\n The storm admin command provides access to several operations that can help\n an administrator debug or fix a cluster.\n\n remove_corrupt_topologies - This command should be run on a nimbus node as\n the same user nimbus runs as. It will go directly to zookeeper + blobstore\n and find topologies that appear to be corrupted because of missing blobs.\n It will kill those topologies.\n\n zk_cli [options] - This command will launch a zookeeper cli pointing to the\n storm zookeeper instance logged in as the nimbus user. It should be run on\n a nimbus server as the user nimbus runs as.\n -s --server <connection string>: Set the connection string to use,\n defaults to storm connection string.\n -t --time-out <timeout>: Set the timeout to use, defaults to storm\n zookeeper timeout.\n -w --write: Allow for writes, defaults to read only, we don't want to\n cause problems.\n -n --no-root: Don't include the storm root on the default connection string.\n -j --jaas <jaas_file>: Include a jaas file that should be used when\n authenticating with ZK defaults to the\n java.security.auth.login.config conf.\n\n creds topology_id - Print the credential keys for a topology.\n "
exec_storm_class('org.apache.storm.command.AdminCommands', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')]) |
def shell(resourcesdir, command, *args):
'Syntax: [storm shell resourcesdir command args]\n\n Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.\n eg: `storm shell resources/ python topology.py arg1 arg2`\n '
tmpjarpath = (('stormshell' + str(random.randint(0, 10000000))) + '.jar')
os.system(('jar cf %s %s' % (tmpjarpath, resourcesdir)))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class('org.apache.storm.command.shell_submission', args=runnerargs, jvmtype='-client', extrajars=[USER_CONF_DIR], fork=True)
os.system(('rm ' + tmpjarpath)) | -4,195,633,902,917,097,500 | Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2` | bin/storm.py | shell | JamiesZhang/Storm | python | def shell(resourcesdir, command, *args):
'Syntax: [storm shell resourcesdir command args]\n\n Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.\n eg: `storm shell resources/ python topology.py arg1 arg2`\n '
tmpjarpath = (('stormshell' + str(random.randint(0, 10000000))) + '.jar')
os.system(('jar cf %s %s' % (tmpjarpath, resourcesdir)))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class('org.apache.storm.command.shell_submission', args=runnerargs, jvmtype='-client', extrajars=[USER_CONF_DIR], fork=True)
os.system(('rm ' + tmpjarpath)) |
def repl():
'Syntax: [storm repl]\n\n Opens up a Clojure REPL with the storm jars and configuration\n on the classpath. Useful for debugging.\n '
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class('clojure.main', jvmtype='-client', extrajars=cppaths) | -630,971,226,495,617,300 | Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging. | bin/storm.py | repl | JamiesZhang/Storm | python | def repl():
'Syntax: [storm repl]\n\n Opens up a Clojure REPL with the storm jars and configuration\n on the classpath. Useful for debugging.\n '
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class('clojure.main', jvmtype='-client', extrajars=cppaths) |
def nimbus(klass='org.apache.storm.daemon.nimbus.Nimbus'):
'Syntax: [storm nimbus]\n\n Launches the nimbus daemon. This command should be run under\n supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('nimbus.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=nimbus.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
exec_storm_class(klass, jvmtype='-server', daemonName='nimbus', extrajars=cppaths, jvmopts=jvmopts) | -5,802,446,814,074,783,000 | Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) | bin/storm.py | nimbus | JamiesZhang/Storm | python | def nimbus(klass='org.apache.storm.daemon.nimbus.Nimbus'):
'Syntax: [storm nimbus]\n\n Launches the nimbus daemon. This command should be run under\n supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('nimbus.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=nimbus.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
exec_storm_class(klass, jvmtype='-server', daemonName='nimbus', extrajars=cppaths, jvmopts=jvmopts) |
def pacemaker(klass='org.apache.storm.pacemaker.Pacemaker'):
'Syntax: [storm pacemaker]\n\n Launches the Pacemaker daemon. This command should be run under\n supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('pacemaker.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=pacemaker.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
exec_storm_class(klass, jvmtype='-server', daemonName='pacemaker', extrajars=cppaths, jvmopts=jvmopts) | -8,574,779,595,315,885,000 | Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) | bin/storm.py | pacemaker | JamiesZhang/Storm | python | def pacemaker(klass='org.apache.storm.pacemaker.Pacemaker'):
'Syntax: [storm pacemaker]\n\n Launches the Pacemaker daemon. This command should be run under\n supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('pacemaker.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=pacemaker.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
exec_storm_class(klass, jvmtype='-server', daemonName='pacemaker', extrajars=cppaths, jvmopts=jvmopts) |
def supervisor(klass='org.apache.storm.daemon.supervisor.Supervisor'):
'Syntax: [storm supervisor]\n\n Launches the supervisor daemon. This command should be run\n under supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('supervisor.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', ('-Dlogfile.name=' + STORM_SUPERVISOR_LOG_FILE), ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
exec_storm_class(klass, jvmtype='-server', daemonName='supervisor', extrajars=cppaths, jvmopts=jvmopts) | -6,424,705,986,325,982,000 | Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) | bin/storm.py | supervisor | JamiesZhang/Storm | python | def supervisor(klass='org.apache.storm.daemon.supervisor.Supervisor'):
'Syntax: [storm supervisor]\n\n Launches the supervisor daemon. This command should be run\n under supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('supervisor.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', ('-Dlogfile.name=' + STORM_SUPERVISOR_LOG_FILE), ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
exec_storm_class(klass, jvmtype='-server', daemonName='supervisor', extrajars=cppaths, jvmopts=jvmopts) |
def ui():
'Syntax: [storm ui]\n\n Launches the UI daemon. The UI provides a web interface for a Storm\n cluster and shows detailed stats about running topologies. This command\n should be run under supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('ui.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=ui.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class('org.apache.storm.daemon.ui.UIServer', jvmtype='-server', daemonName='ui', jvmopts=jvmopts, extrajars=allextrajars) | 4,587,990,193,543,811,600 | Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) | bin/storm.py | ui | JamiesZhang/Storm | python | def ui():
'Syntax: [storm ui]\n\n Launches the UI daemon. The UI provides a web interface for a Storm\n cluster and shows detailed stats about running topologies. This command\n should be run under supervision with a tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('ui.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=ui.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class('org.apache.storm.daemon.ui.UIServer', jvmtype='-server', daemonName='ui', jvmopts=jvmopts, extrajars=allextrajars) |
def logviewer():
'Syntax: [storm logviewer]\n\n Launches the log viewer daemon. It provides a web interface for viewing\n storm log files. This command should be run under supervision with a\n tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('logviewer.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=logviewer.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class('org.apache.storm.daemon.logviewer.LogviewerServer', jvmtype='-server', daemonName='logviewer', jvmopts=jvmopts, extrajars=allextrajars) | -3,782,745,241,320,201,000 | Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster) | bin/storm.py | logviewer | JamiesZhang/Storm | python | def logviewer():
'Syntax: [storm logviewer]\n\n Launches the log viewer daemon. It provides a web interface for viewing\n storm log files. This command should be run under supervision with a\n tool like daemontools or monit.\n\n See Setting up a Storm cluster for more information.\n (http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('logviewer.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=logviewer.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class('org.apache.storm.daemon.logviewer.LogviewerServer', jvmtype='-server', daemonName='logviewer', jvmopts=jvmopts, extrajars=allextrajars) |
def drpcclient(*args):
'Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]\n\n Provides a very simple way to send DRPC requests.\n If a -f argument is supplied to set the function name all of the arguments are treated\n as arguments to the function. If no function is given the arguments must\n be pairs of function argument.\n\n The server and port are picked from the configs.\n '
if (not args):
print_usage(command='drpc-client')
sys.exit(2)
exec_storm_class('org.apache.storm.command.BasicDrpcClient', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR]) | 4,256,926,995,565,434,400 | Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs. | bin/storm.py | drpcclient | JamiesZhang/Storm | python | def drpcclient(*args):
'Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]\n\n Provides a very simple way to send DRPC requests.\n If a -f argument is supplied to set the function name all of the arguments are treated\n as arguments to the function. If no function is given the arguments must\n be pairs of function argument.\n\n The server and port are picked from the configs.\n '
if (not args):
print_usage(command='drpc-client')
sys.exit(2)
exec_storm_class('org.apache.storm.command.BasicDrpcClient', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR]) |
def drpc():
'Syntax: [storm drpc]\n\n Launches a DRPC daemon. This command should be run under supervision\n with a tool like daemontools or monit.\n\n See Distributed RPC for more information.\n (http://storm.apache.org/documentation/Distributed-RPC)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('drpc.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=drpc.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class('org.apache.storm.daemon.drpc.DRPCServer', jvmtype='-server', daemonName='drpc', jvmopts=jvmopts, extrajars=allextrajars) | 709,845,887,769,927,800 | Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC) | bin/storm.py | drpc | JamiesZhang/Storm | python | def drpc():
'Syntax: [storm drpc]\n\n Launches a DRPC daemon. This command should be run under supervision\n with a tool like daemontools or monit.\n\n See Distributed RPC for more information.\n (http://storm.apache.org/documentation/Distributed-RPC)\n '
cppaths = [CLUSTER_CONF_DIR]
jvmopts = (parse_args(confvalue('drpc.childopts', cppaths)) + ['-Djava.deserialization.disabled=true', '-Dlogfile.name=drpc.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))])
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class('org.apache.storm.daemon.drpc.DRPCServer', jvmtype='-server', daemonName='drpc', jvmopts=jvmopts, extrajars=allextrajars) |
def dev_zookeeper():
'Syntax: [storm dev-zookeeper]\n\n Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and\n "storm.zookeeper.port" as its port. This is only intended for development/testing, the\n Zookeeper instance launched is not configured to be used in production.\n '
jvmopts = ['-Dlogfile.name=dev-zookeeper.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class('org.apache.storm.command.DevZookeeper', jvmtype='-server', daemonName='dev_zookeeper', jvmopts=jvmopts, extrajars=[CLUSTER_CONF_DIR]) | 3,622,109,075,034,233,000 | Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production. | bin/storm.py | dev_zookeeper | JamiesZhang/Storm | python | def dev_zookeeper():
'Syntax: [storm dev-zookeeper]\n\n Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and\n "storm.zookeeper.port" as its port. This is only intended for development/testing, the\n Zookeeper instance launched is not configured to be used in production.\n '
jvmopts = ['-Dlogfile.name=dev-zookeeper.log', ('-Dlog4j.configurationFile=' + os.path.join(get_log4j2_conf_dir(), 'cluster.xml'))]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class('org.apache.storm.command.DevZookeeper', jvmtype='-server', daemonName='dev_zookeeper', jvmopts=jvmopts, extrajars=[CLUSTER_CONF_DIR]) |
def version():
'Syntax: [storm version]\n\n Prints the version number of this Storm release.\n '
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class('org.apache.storm.utils.VersionInfo', jvmtype='-client', extrajars=[CLUSTER_CONF_DIR]) | 6,929,928,848,461,484,000 | Syntax: [storm version]
Prints the version number of this Storm release. | bin/storm.py | version | JamiesZhang/Storm | python | def version():
'Syntax: [storm version]\n\n Prints the version number of this Storm release.\n '
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class('org.apache.storm.utils.VersionInfo', jvmtype='-client', extrajars=[CLUSTER_CONF_DIR]) |
def print_classpath():
'Syntax: [storm classpath]\n\n Prints the classpath used by the storm client when running commands.\n '
print(get_classpath([], client=True)) | -1,740,646,617,593,392,600 | Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands. | bin/storm.py | print_classpath | JamiesZhang/Storm | python | def print_classpath():
'Syntax: [storm classpath]\n\n Prints the classpath used by the storm client when running commands.\n '
print(get_classpath([], client=True)) |
def print_server_classpath():
'Syntax: [storm server_classpath]\n\n Prints the classpath used by the storm servers when running commands.\n '
print(get_classpath([], daemon=True)) | -5,675,609,904,092,449,000 | Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands. | bin/storm.py | print_server_classpath | JamiesZhang/Storm | python | def print_server_classpath():
'Syntax: [storm server_classpath]\n\n Prints the classpath used by the storm servers when running commands.\n '
print(get_classpath([], daemon=True)) |
def monitor(*args):
"Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]\n\n Monitor given topology's throughput interactively.\n One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]\n By default,\n poll-interval is 4 seconds;\n all component-ids will be list;\n stream-id is 'default';\n watch-item is 'emitted';\n "
exec_storm_class('org.apache.storm.command.Monitor', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR]) | -4,058,287,528,590,285,300 | Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted'; | bin/storm.py | monitor | JamiesZhang/Storm | python | def monitor(*args):
"Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]\n\n Monitor given topology's throughput interactively.\n One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]\n By default,\n poll-interval is 4 seconds;\n all component-ids will be list;\n stream-id is 'default';\n watch-item is 'emitted';\n "
exec_storm_class('org.apache.storm.command.Monitor', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR]) |
def print_commands():
'Print all client commands and link to documentation'
print(('Commands:\n\t' + '\n\t'.join(sorted(COMMANDS.keys()))))
print('\nHelp: \n\thelp \n\thelp <command>')
print('\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n')
print('Configs can be overridden using one or more -c flags, e.g. "storm list -c nimbus.host=nimbus.mycompany.com"\n') | 6,484,770,233,767,060,000 | Print all client commands and link to documentation | bin/storm.py | print_commands | JamiesZhang/Storm | python | def print_commands():
print(('Commands:\n\t' + '\n\t'.join(sorted(COMMANDS.keys()))))
print('\nHelp: \n\thelp \n\thelp <command>')
print('\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n')
print('Configs can be overridden using one or more -c flags, e.g. "storm list -c nimbus.host=nimbus.mycompany.com"\n') |
def print_usage(command=None):
'Print one help message or list of available commands'
if (command != None):
if (command in COMMANDS):
print((COMMANDS[command].__doc__ or ('No documentation provided for <%s>' % command)))
else:
print(('<%s> is not a valid command' % command))
else:
print_commands() | 7,656,778,314,449,597,000 | Print one help message or list of available commands | bin/storm.py | print_usage | JamiesZhang/Storm | python | def print_usage(command=None):
if (command != None):
if (command in COMMANDS):
print((COMMANDS[command].__doc__ or ('No documentation provided for <%s>' % command)))
else:
print(('<%s> is not a valid command' % command))
else:
print_commands() |
def __init__(self, latitude, longitude):
'Init BOM data collector.'
self.observations_data = None
self.daily_forecasts_data = None
self.geohash = self.geohash_encode(latitude, longitude)
_LOGGER.debug(f'geohash: {self.geohash}') | 4,990,285,546,407,237,000 | Init BOM data collector. | custom_components/bureau_of_meteorology/PyBoM/collector.py | __init__ | QziP22/HomeAssistantConfig | python | def __init__(self, latitude, longitude):
self.observations_data = None
self.daily_forecasts_data = None
self.geohash = self.geohash_encode(latitude, longitude)
_LOGGER.debug(f'geohash: {self.geohash}') |
async def get_location_name(self):
'Get JSON location name from BOM API endpoint.'
url = (BASE_URL + LOCATIONS_URL.format(self.geohash))
async with aiohttp.ClientSession() as session:
response = (await session.get(url))
if ((response is not None) and (response.status == 200)):
locations_data = (await response.json())
self.location_name = locations_data['data']['name']
return True | -6,402,569,652,663,228,000 | Get JSON location name from BOM API endpoint. | custom_components/bureau_of_meteorology/PyBoM/collector.py | get_location_name | QziP22/HomeAssistantConfig | python | async def get_location_name(self):
url = (BASE_URL + LOCATIONS_URL.format(self.geohash))
async with aiohttp.ClientSession() as session:
response = (await session.get(url))
if ((response is not None) and (response.status == 200)):
locations_data = (await response.json())
self.location_name = locations_data['data']['name']
return True |
async def get_observations_data(self):
'Get JSON observations data from BOM API endpoint.'
url = OBSERVATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = (await session.get(url))
if ((response is not None) and (response.status == 200)):
self.observations_data = (await response.json())
(await self.format_observations_data()) | -483,857,492,838,944,900 | Get JSON observations data from BOM API endpoint. | custom_components/bureau_of_meteorology/PyBoM/collector.py | get_observations_data | QziP22/HomeAssistantConfig | python | async def get_observations_data(self):
url = OBSERVATIONS_URL.format(self.geohash)
async with aiohttp.ClientSession() as session:
response = (await session.get(url))
if ((response is not None) and (response.status == 200)):
self.observations_data = (await response.json())
(await self.format_observations_data()) |
async def format_observations_data(self):
'Flatten out wind and gust data.'
flattened = {}
wind = self.observations_data['data']['wind']
flattened['wind_speed_kilometre'] = wind['speed_kilometre']
flattened['wind_speed_knot'] = wind['speed_knot']
flattened['wind_direction'] = wind['direction']
if (self.observations_data['data']['gust'] is not None):
gust = self.observations_data['data']['gust']
flattened['gust_speed_kilometre'] = gust['speed_kilometre']
flattened['gust_speed_knot'] = gust['speed_knot']
else:
flattened['gust_speed_kilometre'] = None
flattened['gust_speed_knot'] = None
self.observations_data['data'].update(flattened) | -3,654,381,207,656,843,300 | Flatten out wind and gust data. | custom_components/bureau_of_meteorology/PyBoM/collector.py | format_observations_data | QziP22/HomeAssistantConfig | python | async def format_observations_data(self):
flattened = {}
wind = self.observations_data['data']['wind']
flattened['wind_speed_kilometre'] = wind['speed_kilometre']
flattened['wind_speed_knot'] = wind['speed_knot']
flattened['wind_direction'] = wind['direction']
if (self.observations_data['data']['gust'] is not None):
gust = self.observations_data['data']['gust']
flattened['gust_speed_kilometre'] = gust['speed_kilometre']
flattened['gust_speed_knot'] = gust['speed_knot']
else:
flattened['gust_speed_kilometre'] = None
flattened['gust_speed_knot'] = None
self.observations_data['data'].update(flattened) |
async def get_daily_forecasts_data(self):
'Get JSON daily forecasts data from BOM API endpoint.'
url = (BASE_URL + DAILY_FORECASTS_URL.format(self.geohash))
async with aiohttp.ClientSession() as session:
response = (await session.get(url))
if ((response is not None) and (response.status == 200)):
self.daily_forecasts_data = (await response.json())
(await self.format_forecast_data()) | 8,678,075,092,967,983,000 | Get JSON daily forecasts data from BOM API endpoint. | custom_components/bureau_of_meteorology/PyBoM/collector.py | get_daily_forecasts_data | QziP22/HomeAssistantConfig | python | async def get_daily_forecasts_data(self):
url = (BASE_URL + DAILY_FORECASTS_URL.format(self.geohash))
async with aiohttp.ClientSession() as session:
response = (await session.get(url))
if ((response is not None) and (response.status == 200)):
self.daily_forecasts_data = (await response.json())
(await self.format_forecast_data()) |
async def format_forecast_data(self):
'Flatten out forecast data.'
flattened = {}
days = len(self.daily_forecasts_data['data'])
for day in range(0, days):
icon = self.daily_forecasts_data['data'][day]['icon_descriptor']
flattened['mdi_icon'] = MDI_ICON_MAP[icon]
uv = self.daily_forecasts_data['data'][day]['uv']
flattened['uv_category'] = UV_MAP[uv['category']]
flattened['uv_max_index'] = uv['max_index']
flattened['uv_start_time'] = uv['start_time']
flattened['uv_end_time'] = uv['end_time']
rain = self.daily_forecasts_data['data'][day]['rain']
flattened['rain_chance'] = rain['chance']
flattened['rain_amount_min'] = rain['amount']['min']
if (rain['amount']['max'] is None):
flattened['rain_amount_max'] = flattened['rain_amount_min']
flattened['rain_amount_range'] = rain['amount']['min']
else:
flattened['rain_amount_max'] = rain['amount']['max']
flattened['rain_amount_range'] = '{} to {}'.format(rain['amount']['min'], rain['amount']['max'])
self.daily_forecasts_data['data'][day].update(flattened) | 5,247,102,597,364,240,000 | Flatten out forecast data. | custom_components/bureau_of_meteorology/PyBoM/collector.py | format_forecast_data | QziP22/HomeAssistantConfig | python | async def format_forecast_data(self):
flattened = {}
days = len(self.daily_forecasts_data['data'])
for day in range(0, days):
icon = self.daily_forecasts_data['data'][day]['icon_descriptor']
flattened['mdi_icon'] = MDI_ICON_MAP[icon]
uv = self.daily_forecasts_data['data'][day]['uv']
flattened['uv_category'] = UV_MAP[uv['category']]
flattened['uv_max_index'] = uv['max_index']
flattened['uv_start_time'] = uv['start_time']
flattened['uv_end_time'] = uv['end_time']
rain = self.daily_forecasts_data['data'][day]['rain']
flattened['rain_chance'] = rain['chance']
flattened['rain_amount_min'] = rain['amount']['min']
if (rain['amount']['max'] is None):
flattened['rain_amount_max'] = flattened['rain_amount_min']
flattened['rain_amount_range'] = rain['amount']['min']
else:
flattened['rain_amount_max'] = rain['amount']['max']
flattened['rain_amount_range'] = '{} to {}'.format(rain['amount']['min'], rain['amount']['max'])
self.daily_forecasts_data['data'][day].update(flattened) |
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
'Refresh the data on the collector object.'
(await self.get_observations_data())
(await self.get_daily_forecasts_data()) | 581,590,499,631,114,500 | Refresh the data on the collector object. | custom_components/bureau_of_meteorology/PyBoM/collector.py | async_update | QziP22/HomeAssistantConfig | python | @Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
(await self.get_observations_data())
(await self.get_daily_forecasts_data()) |
def basic_auth(username, password) -> bool:
' HTTP basic authorization '
query_result = Application.query.join(User, (User.id == Application.userIntID)).with_entities(Application, User).filter((Application.appStatus == 1), (User.enable == 1), (Application.appID == username)).first()
if (not query_result):
raise AuthFailed(field='appID')
(application, user) = query_result
date_now = arrow.now().naive
if (application.expiredAt and (date_now > application.expiredAt)):
raise AuthFailed(field='expiredAt')
if (application.appToken != password):
raise AuthFailed(field='appToken')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = application.roleIntID
g.app_uid: str = application.appID
user.lastRequestTime = date_now
user.update()
return True | 351,060,006,020,399,360 | HTTP basic authorization | server/actor_libs/auth/base.py | basic_auth | Mateus-dang/ActorCloud | python | def basic_auth(username, password) -> bool:
' '
query_result = Application.query.join(User, (User.id == Application.userIntID)).with_entities(Application, User).filter((Application.appStatus == 1), (User.enable == 1), (Application.appID == username)).first()
if (not query_result):
raise AuthFailed(field='appID')
(application, user) = query_result
date_now = arrow.now().naive
if (application.expiredAt and (date_now > application.expiredAt)):
raise AuthFailed(field='expiredAt')
if (application.appToken != password):
raise AuthFailed(field='appToken')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = application.roleIntID
g.app_uid: str = application.appID
user.lastRequestTime = date_now
user.update()
return True |
def token_auth(token) -> bool:
' HTTP bearer token authorization '
jwt = JWT(current_app.config['SECRET_KEY'])
try:
data = jwt.loads(token)
except Exception:
raise AuthFailed(field='token')
if data.get('consumer_id'):
...
else:
if (('user_id' or 'role_id') not in data):
raise AuthFailed(field='token')
if ((data['role_id'] != 1) and (not data.get('tenant_uid'))):
raise AuthFailed(field='token')
user = User.query.filter((User.roleIntID == data['role_id']), (User.id == data['user_id']), (User.tenantID == data['tenant_uid'])).first()
if (not user):
raise AuthFailed(field='token')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = user.roleIntID
g.app_uid: str = None
g.user_auth_type: int = user.userAuthType
user.lastRequestTime = arrow.now().naive
user.update()
return True | 7,266,384,097,824,410,000 | HTTP bearer token authorization | server/actor_libs/auth/base.py | token_auth | Mateus-dang/ActorCloud | python | def token_auth(token) -> bool:
' '
jwt = JWT(current_app.config['SECRET_KEY'])
try:
data = jwt.loads(token)
except Exception:
raise AuthFailed(field='token')
if data.get('consumer_id'):
...
else:
if (('user_id' or 'role_id') not in data):
raise AuthFailed(field='token')
if ((data['role_id'] != 1) and (not data.get('tenant_uid'))):
raise AuthFailed(field='token')
user = User.query.filter((User.roleIntID == data['role_id']), (User.id == data['user_id']), (User.tenantID == data['tenant_uid'])).first()
if (not user):
raise AuthFailed(field='token')
g.user_id: int = user.id
g.tenant_uid: str = user.tenantID
g.role_id: int = user.roleIntID
g.app_uid: str = None
g.user_auth_type: int = user.userAuthType
user.lastRequestTime = arrow.now().naive
user.update()
return True |
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
'Return results of a Gremlin traversal as pandas dataframe.\n\n Parameters\n ----------\n client : neptune.Client\n instance of the neptune client to use\n traversal : str\n The gremlin traversal to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run a Gremlin Query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")\n '
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df | -4,956,243,994,760,486,000 | Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)") | awswrangler/neptune/neptune.py | execute_gremlin | minwook-shin/aws-data-wrangler | python | def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
'Return results of a Gremlin traversal as pandas dataframe.\n\n Parameters\n ----------\n client : neptune.Client\n instance of the neptune client to use\n traversal : str\n The gremlin traversal to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run a Gremlin Query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")\n '
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df |
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
'Return results of a openCypher traversal as pandas dataframe.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n query : str\n The openCypher query to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run an openCypher query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")\n '
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df | -6,708,623,386,071,468,000 | Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1") | awswrangler/neptune/neptune.py | execute_opencypher | minwook-shin/aws-data-wrangler | python | def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
'Return results of a openCypher traversal as pandas dataframe.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n query : str\n The openCypher query to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run an openCypher query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")\n '
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df |
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
'Return results of a SPARQL query as pandas dataframe.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n query : str\n The SPARQL traversal to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run a SPARQL query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n SELECT ?name\n WHERE {\n ?person foaf:name ?name .\n '
data = client.read_sparql(query)
df = None
if (('results' in data) and ('bindings' in data['results'])):
df = pd.DataFrame(data['results']['bindings'])
df.applymap((lambda x: x['value']))
else:
df = pd.DataFrame(data)
return df | -8,020,320,469,512,584,000 | Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name . | awswrangler/neptune/neptune.py | execute_sparql | minwook-shin/aws-data-wrangler | python | def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
'Return results of a SPARQL query as pandas dataframe.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n query : str\n The SPARQL traversal to execute\n\n Returns\n -------\n Union[pandas.DataFrame, Iterator[pandas.DataFrame]]\n Results as Pandas DataFrame\n\n Examples\n --------\n Run a SPARQL query\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>\n SELECT ?name\n WHERE {\n ?person foaf:name ?name .\n '
data = client.read_sparql(query)
df = None
if (('results' in data) and ('bindings' in data['results'])):
df = pd.DataFrame(data['results']['bindings'])
df.applymap((lambda x: x['value']))
else:
df = pd.DataFrame(data)
return df |
def to_property_graph(client: NeptuneClient, df: pd.DataFrame, batch_size: int=50, use_header_cardinality: bool=True) -> bool:
'Write records stored in a DataFrame into Amazon Neptune.\n\n If writing to a property graph then DataFrames for vertices and edges must be written separately.\n DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.\n If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.\n If no ~label column exists an exception will be thrown.\n DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist\n the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column\n exists an exception will be thrown.\n\n If you would like to save data using `single` cardinality then you can postfix (single) to the column header and\n set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property\n as single\n cardinality. You can disable this by setting by setting `use_header_cardinality=False`.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n df : pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n batch_size: int\n The number of rows to save at a time. Default 50\n use_header_cardinality: bool\n If True, then the header cardinality will be used to save the data. Default True\n\n Returns\n -------\n bool\n True if records were written\n\n Examples\n --------\n Writing to Amazon Neptune\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> wr.neptune.gremlin.to_property_graph(\n ... df=df\n ... )\n '
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if ('~id' in df.columns):
if ('~label' in df.columns):
is_update_df = False
if (('~to' in df.columns) and ('~from' in df.columns)):
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue('Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune')
for (index, row) in df.iterrows():
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
if ((index > 0) and ((index % batch_size) == 0)):
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g) | 2,575,334,211,846,941,700 | Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... ) | awswrangler/neptune/neptune.py | to_property_graph | minwook-shin/aws-data-wrangler | python | def to_property_graph(client: NeptuneClient, df: pd.DataFrame, batch_size: int=50, use_header_cardinality: bool=True) -> bool:
'Write records stored in a DataFrame into Amazon Neptune.\n\n If writing to a property graph then DataFrames for vertices and edges must be written separately.\n DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.\n If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.\n If no ~label column exists an exception will be thrown.\n DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist\n the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column\n exists an exception will be thrown.\n\n If you would like to save data using `single` cardinality then you can postfix (single) to the column header and\n set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property\n as single\n cardinality. You can disable this by setting by setting `use_header_cardinality=False`.\n\n Parameters\n ----------\n client : NeptuneClient\n instance of the neptune client to use\n df : pandas.DataFrame\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n batch_size: int\n The number of rows to save at a time. Default 50\n use_header_cardinality: bool\n If True, then the header cardinality will be used to save the data. Default True\n\n Returns\n -------\n bool\n True if records were written\n\n Examples\n --------\n Writing to Amazon Neptune\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> wr.neptune.gremlin.to_property_graph(\n ... df=df\n ... )\n '
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if ('~id' in df.columns):
if ('~label' in df.columns):
is_update_df = False
if (('~to' in df.columns) and ('~from' in df.columns)):
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue('Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune')
for (index, row) in df.iterrows():
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
if ((index > 0) and ((index % batch_size) == 0)):
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g) |
def to_rdf_graph(client: NeptuneClient, df: pd.DataFrame, batch_size: int=50, subject_column: str='s', predicate_column: str='p', object_column: str='o', graph_column: str='g') -> bool:
"Write records stored in a DataFrame into Amazon Neptune.\n\n The DataFrame must consist of triples with column names for the subject, predicate, and object specified.\n If you want to add data into a named graph then you will also need the graph column.\n\n Parameters\n ----------\n client (NeptuneClient) :\n instance of the neptune client to use\n df (pandas.DataFrame) :\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n subject_column (str, optional) :\n The column name in the dataframe for the subject. Defaults to 's'\n predicate_column (str, optional) :\n The column name in the dataframe for the predicate. Defaults to 'p'\n object_column (str, optional) :\n The column name in the dataframe for the object. Defaults to 'o'\n graph_column (str, optional) :\n The column name in the dataframe for the graph if sending across quads. Defaults to 'g'\n\n Returns\n -------\n bool\n True if records were written\n\n Examples\n --------\n Writing to Amazon Neptune\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> wr.neptune.gremlin.to_rdf_graph(\n ... df=df\n ... )\n "
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if (graph_column in df.columns):
is_quads = True
else:
raise exceptions.InvalidArgumentValue('Dataframe must contain at least the subject, predicate, and object columns defined or the defaults\n (s, p, o) to be saved to Amazon Neptune')
query = ''
for (index, row) in df.iterrows():
if is_quads:
insert = f'''INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; '''
query = (query + insert)
else:
insert = f'''INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; '''
query = (query + insert)
if ((index > 0) and ((index % batch_size) == 0)):
res = client.write_sparql(query)
if res:
query = ''
return client.write_sparql(query) | 3,725,097,353,831,719,000 | Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... ) | awswrangler/neptune/neptune.py | to_rdf_graph | minwook-shin/aws-data-wrangler | python | def to_rdf_graph(client: NeptuneClient, df: pd.DataFrame, batch_size: int=50, subject_column: str='s', predicate_column: str='p', object_column: str='o', graph_column: str='g') -> bool:
"Write records stored in a DataFrame into Amazon Neptune.\n\n The DataFrame must consist of triples with column names for the subject, predicate, and object specified.\n If you want to add data into a named graph then you will also need the graph column.\n\n Parameters\n ----------\n client (NeptuneClient) :\n instance of the neptune client to use\n df (pandas.DataFrame) :\n Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html\n subject_column (str, optional) :\n The column name in the dataframe for the subject. Defaults to 's'\n predicate_column (str, optional) :\n The column name in the dataframe for the predicate. Defaults to 'p'\n object_column (str, optional) :\n The column name in the dataframe for the object. Defaults to 'o'\n graph_column (str, optional) :\n The column name in the dataframe for the graph if sending across quads. Defaults to 'g'\n\n Returns\n -------\n bool\n True if records were written\n\n Examples\n --------\n Writing to Amazon Neptune\n\n >>> import awswrangler as wr\n >>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)\n >>> wr.neptune.gremlin.to_rdf_graph(\n ... df=df\n ... )\n "
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if (graph_column in df.columns):
is_quads = True
else:
raise exceptions.InvalidArgumentValue('Dataframe must contain at least the subject, predicate, and object columns defined or the defaults\n (s, p, o) to be saved to Amazon Neptune')
query =
for (index, row) in df.iterrows():
if is_quads:
insert = f'INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; '
query = (query + insert)
else:
insert = f'INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; '
query = (query + insert)
if ((index > 0) and ((index % batch_size) == 0)):
res = client.write_sparql(query)
if res:
query =
return client.write_sparql(query) |
def connect(host: str, port: int, iam_enabled: bool=False, **kwargs: Any) -> NeptuneClient:
'Create a connection to a Neptune cluster.\n\n Parameters\n ----------\n host : str\n The host endpoint to connect to\n port : int\n The port endpoint to connect to\n iam_enabled : bool, optional\n True if IAM is enabled on the cluster. Defaults to False.\n\n Returns\n -------\n NeptuneClient\n [description]\n '
return NeptuneClient(host, port, iam_enabled, **kwargs) | -8,125,250,883,127,492,000 | Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description] | awswrangler/neptune/neptune.py | connect | minwook-shin/aws-data-wrangler | python | def connect(host: str, port: int, iam_enabled: bool=False, **kwargs: Any) -> NeptuneClient:
'Create a connection to a Neptune cluster.\n\n Parameters\n ----------\n host : str\n The host endpoint to connect to\n port : int\n The port endpoint to connect to\n iam_enabled : bool, optional\n True if IAM is enabled on the cluster. Defaults to False.\n\n Returns\n -------\n NeptuneClient\n [description]\n '
return NeptuneClient(host, port, iam_enabled, **kwargs) |
def flatten_nested_df(df: pd.DataFrame, include_prefix: bool=True, seperator: str='_', recursive: bool=True) -> pd.DataFrame:
'Flatten the lists and dictionaries of the input data frame.\n\n Parameters\n ----------\n df : pd.DataFrame\n The input data frame\n include_prefix : bool, optional\n If True, then it will prefix the new column name with the original column name.\n Defaults to True.\n seperator : str, optional\n The seperator to use between field names when a dictionary is exploded.\n Defaults to "_".\n recursive : bool, optional\n If True, then this will recurse the fields in the data frame. Defaults to True.\n\n Returns\n -------\n pd.DataFrame: The flattened data frame\n '
if (seperator is None):
seperator = '_'
df = df.reset_index()
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if ((len(list_columns) > 0) or (len(dict_columns) > 0)):
new_columns = []
for col in dict_columns:
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f'{col}{seperator}')
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f'{seperator}')
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if (recursive and ((len(list_columns) > 0) or (len(dict_columns) > 0))):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df | -7,279,316,436,020,046,000 | Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame | awswrangler/neptune/neptune.py | flatten_nested_df | minwook-shin/aws-data-wrangler | python | def flatten_nested_df(df: pd.DataFrame, include_prefix: bool=True, seperator: str='_', recursive: bool=True) -> pd.DataFrame:
'Flatten the lists and dictionaries of the input data frame.\n\n Parameters\n ----------\n df : pd.DataFrame\n The input data frame\n include_prefix : bool, optional\n If True, then it will prefix the new column name with the original column name.\n Defaults to True.\n seperator : str, optional\n The seperator to use between field names when a dictionary is exploded.\n Defaults to "_".\n recursive : bool, optional\n If True, then this will recurse the fields in the data frame. Defaults to True.\n\n Returns\n -------\n pd.DataFrame: The flattened data frame\n '
if (seperator is None):
seperator = '_'
df = df.reset_index()
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if ((len(list_columns) > 0) or (len(dict_columns) > 0)):
new_columns = []
for col in dict_columns:
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f'{col}{seperator}')
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f'{seperator}')
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if (recursive and ((len(list_columns) > 0) or (len(dict_columns) > 0))):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df |
def calc_bbox_overlap_union_iou(pred: (np.ndarray or None), teacher: np.ndarray) -> Tuple[(float, float, float)]:
'\n :param pred: ndarray (4, )\n :param teacher: ndarray (4, )\n :return: overlap, union, iou\n '
teacher_area = ((teacher[2] - teacher[0]) * (teacher[3] - teacher[1]))
if (pred is None):
return (0.0, teacher_area, 0.0)
pred_area = ((pred[2] - pred[0]) * (pred[3] - pred[1]))
intersection_width = np.maximum((np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0])), 0)
intersection_height = np.maximum((np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1])), 0)
overlap = (intersection_width * intersection_height)
union = ((teacher_area + pred_area) - overlap)
iou = (overlap / union)
return (overlap, union, iou) | 1,256,112,090,592,343,300 | :param pred: ndarray (4, )
:param teacher: ndarray (4, )
:return: overlap, union, iou | deepext_with_lightning/metrics/object_detection.py | calc_bbox_overlap_union_iou | pei223/deepext-with-lightning | python | def calc_bbox_overlap_union_iou(pred: (np.ndarray or None), teacher: np.ndarray) -> Tuple[(float, float, float)]:
'\n :param pred: ndarray (4, )\n :param teacher: ndarray (4, )\n :return: overlap, union, iou\n '
teacher_area = ((teacher[2] - teacher[0]) * (teacher[3] - teacher[1]))
if (pred is None):
return (0.0, teacher_area, 0.0)
pred_area = ((pred[2] - pred[0]) * (pred[3] - pred[1]))
intersection_width = np.maximum((np.minimum(pred[2], teacher[2]) - np.maximum(pred[0], teacher[0])), 0)
intersection_height = np.maximum((np.minimum(pred[3], teacher[3]) - np.maximum(pred[1], teacher[1])), 0)
overlap = (intersection_width * intersection_height)
union = ((teacher_area + pred_area) - overlap)
iou = (overlap / union)
return (overlap, union, iou) |
def update(self, preds: List[np.ndarray], targets: Union[(np.ndarray, torch.Tensor)]) -> None:
'\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n '
targets = (targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets)
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
bbox_annotations = bbox_annotations[(bbox_annotations[:, 4] >= 0)]
pred_by_class = preds_by_class[i]
'\n 1画像でラベルごとに計算.\n ラベルごとの面積合計/overlapを計算\n 1画像ごとにIoU算出、最終的に画像平均を算出\n '
total_area_by_classes = [0 for _ in range(self._n_classes)]
total_overlap_by_classes = [0 for _ in range(self._n_classes)]
is_label_appeared = [False for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
total_area_by_classes[label] += calc_area(bbox_annotation)
pred_bboxes = pred_by_class[label]
if ((pred_bboxes is None) or (len(pred_bboxes) == 0)):
continue
for pred_bbox in pred_bboxes:
(overlap, _, _) = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
total_overlap_by_classes[label] += overlap
if is_label_appeared[label]:
continue
total_area_by_classes[label] += calc_area(pred_bbox)
is_label_appeared[label] = True
for label in range(self._n_classes):
if (total_area_by_classes[label] <= 0):
continue
self.total_iou_by_classes[label] += (total_overlap_by_classes[label] / (total_area_by_classes[label] - total_overlap_by_classes[label]))
self.image_count_by_classes[label] += 1 | -6,679,091,060,328,572,000 | :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return: | deepext_with_lightning/metrics/object_detection.py | update | pei223/deepext-with-lightning | python | def update(self, preds: List[np.ndarray], targets: Union[(np.ndarray, torch.Tensor)]) -> None:
'\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n '
targets = (targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets)
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
bbox_annotations = bbox_annotations[(bbox_annotations[:, 4] >= 0)]
pred_by_class = preds_by_class[i]
'\n 1画像でラベルごとに計算.\n ラベルごとの面積合計/overlapを計算\n 1画像ごとにIoU算出、最終的に画像平均を算出\n '
total_area_by_classes = [0 for _ in range(self._n_classes)]
total_overlap_by_classes = [0 for _ in range(self._n_classes)]
is_label_appeared = [False for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
total_area_by_classes[label] += calc_area(bbox_annotation)
pred_bboxes = pred_by_class[label]
if ((pred_bboxes is None) or (len(pred_bboxes) == 0)):
continue
for pred_bbox in pred_bboxes:
(overlap, _, _) = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
total_overlap_by_classes[label] += overlap
if is_label_appeared[label]:
continue
total_area_by_classes[label] += calc_area(pred_bbox)
is_label_appeared[label] = True
for label in range(self._n_classes):
if (total_area_by_classes[label] <= 0):
continue
self.total_iou_by_classes[label] += (total_overlap_by_classes[label] / (total_area_by_classes[label] - total_overlap_by_classes[label]))
self.image_count_by_classes[label] += 1 |
def update(self, preds: List[np.ndarray], targets: Union[(np.ndarray, torch.Tensor)]) -> None:
'\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n '
targets = (targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets)
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
bbox_annotations = bbox_annotations[(bbox_annotations[:, 4] >= 0)]
pred_by_class = preds_by_class[i]
applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
pred_bboxes = pred_by_class[label]
if ((pred_bboxes is None) or (len(pred_bboxes) == 0)):
self.fn_by_classes[label] += 1
continue
is_matched = False
for pred_bbox in pred_bboxes:
(overlap, union, iou) = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
if (iou >= 0.5):
applied_bbox_count_by_classes[label] += 1
self.tp_by_classes[label] += 1
is_matched = True
break
if (not is_matched):
self.fn_by_classes[label] += 1
for label in range(self._n_classes):
self.fp_by_classes[label] += (len(pred_by_class[label]) - applied_bbox_count_by_classes[label]) | 446,419,670,556,090,200 | :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return: | deepext_with_lightning/metrics/object_detection.py | update | pei223/deepext-with-lightning | python | def update(self, preds: List[np.ndarray], targets: Union[(np.ndarray, torch.Tensor)]) -> None:
'\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n '
targets = (targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets)
preds_by_class = []
for pred_bboxes in preds:
pred_by_class = [[] for _ in range(self._n_classes)]
for pred_bbox in pred_bboxes:
pred_by_class[int(pred_bbox[4])].append(pred_bbox)
preds_by_class.append(pred_by_class)
for i in range(targets.shape[0]):
bbox_annotations = targets[i, :, :]
bbox_annotations = bbox_annotations[(bbox_annotations[:, 4] >= 0)]
pred_by_class = preds_by_class[i]
applied_bbox_count_by_classes = [0 for _ in range(self._n_classes)]
for bbox_annotation in bbox_annotations:
label = int(bbox_annotation[4])
pred_bboxes = pred_by_class[label]
if ((pred_bboxes is None) or (len(pred_bboxes) == 0)):
self.fn_by_classes[label] += 1
continue
is_matched = False
for pred_bbox in pred_bboxes:
(overlap, union, iou) = calc_bbox_overlap_union_iou(pred_bbox, bbox_annotation)
if (iou >= 0.5):
applied_bbox_count_by_classes[label] += 1
self.tp_by_classes[label] += 1
is_matched = True
break
if (not is_matched):
self.fn_by_classes[label] += 1
for label in range(self._n_classes):
self.fp_by_classes[label] += (len(pred_by_class[label]) - applied_bbox_count_by_classes[label]) |
def update(self, preds: List[np.ndarray], targets: Union[(np.ndarray, torch.Tensor)]) -> None:
'\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n '
targets = (targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets)
for i in range(len(preds)):
(pred_bboxes, target_bboxes) = (preds[i], targets[i])
target_bboxes = target_bboxes[(target_bboxes[:, 4] >= 0)]
self._update_num_annotations(target_bboxes)
self._update_tp_fp_score(pred_bboxes, target_bboxes) | 5,742,389,805,774,645,000 | :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))
:param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))
:return: | deepext_with_lightning/metrics/object_detection.py | update | pei223/deepext-with-lightning | python | def update(self, preds: List[np.ndarray], targets: Union[(np.ndarray, torch.Tensor)]) -> None:
'\n :param preds: Sorted by score. (Batch size, bounding boxes by batch, 5(x_min, y_min, x_max, y_max, label))\n :param targets: (batch size, bounding box count, 5(x_min, y_min, x_max, y_max, label))\n :return:\n '
targets = (targets.cpu().detach().numpy() if isinstance(targets, torch.Tensor) else targets)
for i in range(len(preds)):
(pred_bboxes, target_bboxes) = (preds[i], targets[i])
target_bboxes = target_bboxes[(target_bboxes[:, 4] >= 0)]
self._update_num_annotations(target_bboxes)
self._update_tp_fp_score(pred_bboxes, target_bboxes) |
def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):
'\n :param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))\n :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))\n '
detected_indices = []
for i in range(pred_bboxes.shape[0]):
(pred_label, pred_score) = (int(pred_bboxes[i][4]), pred_bboxes[i][5])
matched = False
for j in filter((lambda k: ((int(target_bboxes[k][4]) == pred_label) and (k not in detected_indices))), range(target_bboxes.shape[0])):
(overlap, union, iou) = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])
if (iou >= 0.5):
detected_indices.append(j)
self.fp_list_by_classes[pred_label].append(0)
self.tp_list_by_classes[pred_label].append(1)
matched = True
break
if (not matched):
self.fp_list_by_classes[pred_label].append(1)
self.tp_list_by_classes[pred_label].append(0)
self.score_list_by_classes[pred_label].append(pred_score) | 6,164,836,586,989,498,000 | :param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))
:param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class)) | deepext_with_lightning/metrics/object_detection.py | _update_tp_fp_score | pei223/deepext-with-lightning | python | def _update_tp_fp_score(self, pred_bboxes: np.ndarray, target_bboxes: np.ndarray):
'\n :param pred_bboxes: (N, 6(xmin, ymin, xmax, ymax, class, score))\n :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))\n '
detected_indices = []
for i in range(pred_bboxes.shape[0]):
(pred_label, pred_score) = (int(pred_bboxes[i][4]), pred_bboxes[i][5])
matched = False
for j in filter((lambda k: ((int(target_bboxes[k][4]) == pred_label) and (k not in detected_indices))), range(target_bboxes.shape[0])):
(overlap, union, iou) = calc_bbox_overlap_union_iou(pred_bboxes[i], target_bboxes[j])
if (iou >= 0.5):
detected_indices.append(j)
self.fp_list_by_classes[pred_label].append(0)
self.tp_list_by_classes[pred_label].append(1)
matched = True
break
if (not matched):
self.fp_list_by_classes[pred_label].append(1)
self.tp_list_by_classes[pred_label].append(0)
self.score_list_by_classes[pred_label].append(pred_score) |
def _update_num_annotations(self, target_bboxes: np.ndarray):
'\n :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class))\n '
counts = list(map((lambda i: np.count_nonzero((target_bboxes[:, 4] == i))), range(self._n_classes)))
self.num_annotations_by_classes = list(map((lambda i: (counts[i] + self.num_annotations_by_classes[i])), range(self._n_classes))) | -2,288,792,466,958,422,300 | :param target_bboxes: (N, 5(xmin, ymin, xmax, ymax, class)) | deepext_with_lightning/metrics/object_detection.py | _update_num_annotations | pei223/deepext-with-lightning | python | def _update_num_annotations(self, target_bboxes: np.ndarray):
'\n \n '
counts = list(map((lambda i: np.count_nonzero((target_bboxes[:, 4] == i))), range(self._n_classes)))
self.num_annotations_by_classes = list(map((lambda i: (counts[i] + self.num_annotations_by_classes[i])), range(self._n_classes))) |
@click.group()
def cli():
'This script showcases different terminal UI helpers in Click.'
pass | -6,101,637,174,138,122,000 | This script showcases different terminal UI helpers in Click. | examples/termui/termui.py | cli | D4N/asyncclick | python | @click.group()
def cli():
pass |
@cli.command()
def colordemo():
'Demonstrates ANSI color support.'
for color in ('red', 'green', 'blue'):
click.echo(click.style('I am colored {}'.format(color), fg=color))
click.echo(click.style('I am background colored {}'.format(color), bg=color)) | -6,081,257,435,468,193,000 | Demonstrates ANSI color support. | examples/termui/termui.py | colordemo | D4N/asyncclick | python | @cli.command()
def colordemo():
for color in ('red', 'green', 'blue'):
click.echo(click.style('I am colored {}'.format(color), fg=color))
click.echo(click.style('I am background colored {}'.format(color), bg=color)) |
@cli.command()
def pager():
'Demonstrates using the pager.'
lines = []
for x in range(200):
lines.append('{}. Hello World!'.format(click.style(str(x), fg='green')))
click.echo_via_pager('\n'.join(lines)) | -7,169,205,609,182,572,000 | Demonstrates using the pager. | examples/termui/termui.py | pager | D4N/asyncclick | python | @cli.command()
def pager():
lines = []
for x in range(200):
lines.append('{}. Hello World!'.format(click.style(str(x), fg='green')))
click.echo_via_pager('\n'.join(lines)) |
@cli.command()
@click.option('--count', default=8000, type=click.IntRange(1, 100000), help='The number of items to process.')
def progress(count):
'Demonstrates the progress bar.'
items = range(count)
def process_slowly(item):
time.sleep((0.002 * random.random()))
def filter(items):
for item in items:
if (random.random() > 0.3):
(yield item)
with click.progressbar(items, label='Processing accounts', fill_char=click.style('#', fg='green')) as bar:
for item in bar:
process_slowly(item)
def show_item(item):
if (item is not None):
return 'Item #{}'.format(item)
with click.progressbar(filter(items), label='Committing transaction', fill_char=click.style('#', fg='yellow'), item_show_func=show_item) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(length=count, label='Counting', bar_template='%(label)s %(bar)s | %(info)s', fill_char=click.style(u'█', fg='cyan'), empty_char=' ') as bar:
for item in bar:
process_slowly(item)
with click.progressbar(length=count, width=0, show_percent=False, show_eta=False, fill_char=click.style('#', fg='magenta')) as bar:
for item in bar:
process_slowly(item)
steps = [(math.exp(((x * 1.0) / 20)) - 1) for x in range(20)]
count = int(sum(steps))
with click.progressbar(length=count, show_percent=False, label='Slowing progress bar', fill_char=click.style(u'█', fg='green')) as bar:
for item in steps:
time.sleep(item)
bar.update(item) | 6,746,375,855,419,562,000 | Demonstrates the progress bar. | examples/termui/termui.py | progress | D4N/asyncclick | python | @cli.command()
@click.option('--count', default=8000, type=click.IntRange(1, 100000), help='The number of items to process.')
def progress(count):
items = range(count)
def process_slowly(item):
time.sleep((0.002 * random.random()))
def filter(items):
for item in items:
if (random.random() > 0.3):
(yield item)
with click.progressbar(items, label='Processing accounts', fill_char=click.style('#', fg='green')) as bar:
for item in bar:
process_slowly(item)
def show_item(item):
if (item is not None):
return 'Item #{}'.format(item)
with click.progressbar(filter(items), label='Committing transaction', fill_char=click.style('#', fg='yellow'), item_show_func=show_item) as bar:
for item in bar:
process_slowly(item)
with click.progressbar(length=count, label='Counting', bar_template='%(label)s %(bar)s | %(info)s', fill_char=click.style(u'█', fg='cyan'), empty_char=' ') as bar:
for item in bar:
process_slowly(item)
with click.progressbar(length=count, width=0, show_percent=False, show_eta=False, fill_char=click.style('#', fg='magenta')) as bar:
for item in bar:
process_slowly(item)
steps = [(math.exp(((x * 1.0) / 20)) - 1) for x in range(20)]
count = int(sum(steps))
with click.progressbar(length=count, show_percent=False, label='Slowing progress bar', fill_char=click.style(u'█', fg='green')) as bar:
for item in steps:
time.sleep(item)
bar.update(item) |
@cli.command()
@click.argument('url')
def open(url):
'Opens a file or URL In the default application.'
click.launch(url) | -104,038,030,430,769,630 | Opens a file or URL In the default application. | examples/termui/termui.py | open | D4N/asyncclick | python | @cli.command()
@click.argument('url')
def open(url):
click.launch(url) |
@cli.command()
@click.argument('url')
def locate(url):
'Opens a file or URL In the default application.'
click.launch(url, locate=True) | 1,854,477,687,427,131,400 | Opens a file or URL In the default application. | examples/termui/termui.py | locate | D4N/asyncclick | python | @cli.command()
@click.argument('url')
def locate(url):
click.launch(url, locate=True) |
@cli.command()
def edit():
'Opens an editor with some text in it.'
MARKER = '# Everything below is ignored\n'
message = click.edit('\n\n{}'.format(MARKER))
if (message is not None):
msg = message.split(MARKER, 1)[0].rstrip('\n')
if (not msg):
click.echo('Empty message!')
else:
click.echo('Message:\n{}'.format(msg))
else:
click.echo('You did not enter anything!') | -2,586,215,052,840,120,000 | Opens an editor with some text in it. | examples/termui/termui.py | edit | D4N/asyncclick | python | @cli.command()
def edit():
MARKER = '# Everything below is ignored\n'
message = click.edit('\n\n{}'.format(MARKER))
if (message is not None):
msg = message.split(MARKER, 1)[0].rstrip('\n')
if (not msg):
click.echo('Empty message!')
else:
click.echo('Message:\n{}'.format(msg))
else:
click.echo('You did not enter anything!') |
@cli.command()
def clear():
'Clears the entire screen.'
click.clear() | -3,175,494,085,147,564,500 | Clears the entire screen. | examples/termui/termui.py | clear | D4N/asyncclick | python | @cli.command()
def clear():
click.clear() |
@cli.command()
def pause():
'Waits for the user to press a button.'
click.pause() | 2,847,341,040,750,745,000 | Waits for the user to press a button. | examples/termui/termui.py | pause | D4N/asyncclick | python | @cli.command()
def pause():
click.pause() |
@cli.command()
def menu():
'Shows a simple menu.'
menu = 'main'
while 1:
if (menu == 'main'):
click.echo('Main menu:')
click.echo(' d: debug menu')
click.echo(' q: quit')
char = click.getchar()
if (char == 'd'):
menu = 'debug'
elif (char == 'q'):
menu = 'quit'
else:
click.echo('Invalid input')
elif (menu == 'debug'):
click.echo('Debug menu')
click.echo(' b: back')
char = click.getchar()
if (char == 'b'):
menu = 'main'
else:
click.echo('Invalid input')
elif (menu == 'quit'):
return | 5,626,892,119,203,902,000 | Shows a simple menu. | examples/termui/termui.py | menu | D4N/asyncclick | python | @cli.command()
def menu():
menu = 'main'
while 1:
if (menu == 'main'):
click.echo('Main menu:')
click.echo(' d: debug menu')
click.echo(' q: quit')
char = click.getchar()
if (char == 'd'):
menu = 'debug'
elif (char == 'q'):
menu = 'quit'
else:
click.echo('Invalid input')
elif (menu == 'debug'):
click.echo('Debug menu')
click.echo(' b: back')
char = click.getchar()
if (char == 'b'):
menu = 'main'
else:
click.echo('Invalid input')
elif (menu == 'quit'):
return |
def sort(a, axis=(- 1), kind=None, order=None):
"\n Return a sorted copy of an array.\n\n Parameters\n ----------\n a : array_like\n Array to be sorted.\n axis : int or None, optional\n Axis along which to sort. If None, the array is flattened before\n sorting. The default is -1, which sorts along the last axis.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional\n Sorting algorithm. The default is 'quicksort'. Note that both 'stable'\n and 'mergesort' use timsort or radix sort under the covers and, in general,\n the actual implementation will vary with data type. The 'mergesort' option\n is retained for backwards compatibility.\n\n .. versionchanged:: 1.15.0.\n The 'stable' option was added.\n\n order : str or list of str, optional\n When `a` is an array with fields defined, this argument specifies\n which fields to compare first, second, etc. A single field can\n be specified as a string, and not all fields need be specified,\n but unspecified fields will still be used, in the order in which\n they come up in the dtype, to break ties.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n Threading\n ---------\n Up to 8 threads\n\n See Also\n --------\n ndarray.sort : Method to sort an array in-place.\n argsort : Indirect sort.\n lexsort : Indirect stable sort on multiple keys.\n searchsorted : Find elements in a sorted array.\n partition : Partial sort.\n\n Notes\n -----\n The various sorting algorithms are characterized by their average speed,\n worst case performance, work space size, and whether they are stable. A\n stable sort keeps items with the same key in the same relative\n order. The four algorithms implemented in NumPy have the following\n properties:\n\n =========== ======= ============= ============ ========\n kind speed worst case work space stable\n =========== ======= ============= ============ ========\n 'quicksort' 1 O(n^2) 0 no\n 'heapsort' 3 O(n*log(n)) 0 no\n 'mergesort' 2 O(n*log(n)) ~n/2 yes\n 'timsort' 2 O(n*log(n)) ~n/2 yes\n =========== ======= ============= ============ ========\n\n .. note:: The datatype determines which of 'mergesort' or 'timsort'\n is actually used, even if 'mergesort' is specified. User selection\n at a finer scale is not currently available.\n\n All the sort algorithms make temporary copies of the data when\n sorting along any but the last axis. Consequently, sorting along\n the last axis is faster and uses less space than sorting along\n any other axis.\n\n The sort order for complex numbers is lexicographic. If both the real\n and imaginary parts are non-nan then the order is determined by the\n real parts except when they are equal, in which case the order is\n determined by the imaginary parts.\n\n Previous to numpy 1.4.0 sorting real and complex arrays containing nan\n values led to undefined behaviour. In numpy versions >= 1.4.0 nan\n values are sorted to the end. The extended sort order is:\n\n * Real: [R, nan]\n * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]\n\n where R is a non-nan real value. Complex values with the same nan\n placements are sorted according to the non-nan part if it exists.\n Non-nan values are sorted as before.\n\n .. versionadded:: 1.12.0\n\n quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.\n When sorting does not make enough progress it switches to\n `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.\n This implementation makes quicksort O(n*log(n)) in the worst case.\n\n 'stable' automatically chooses the best stable sorting algorithm\n for the data type being sorted.\n It, along with 'mergesort' is currently mapped to\n `timsort <https://en.wikipedia.org/wiki/Timsort>`_\n or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_\n depending on the data type.\n API forward compatibility currently limits the\n ability to select the implementation and it is hardwired for the different\n data types.\n\n .. versionadded:: 1.17.0\n\n Timsort is added for better performance on already or nearly\n sorted data. On random data timsort is almost identical to\n mergesort. It is now used for stable sort while quicksort is still the\n default sort if none is chosen. For timsort details, refer to\n `CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.\n 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an\n O(n) sort instead of O(n log n).\n\n .. versionchanged:: 1.18.0\n\n NaT now sorts to the end of arrays for consistency with NaN.\n\n Examples\n --------\n >>> a = np.array([[1,4],[3,1]])\n >>> np.sort(a) # sort along the last axis\n array([[1, 4],\n [1, 3]])\n >>> np.sort(a, axis=None) # sort the flattened array\n array([1, 1, 3, 4])\n >>> np.sort(a, axis=0) # sort along the first axis\n array([[1, 1],\n [3, 4]])\n\n Use the `order` keyword to specify a field to use when sorting a\n structured array:\n\n >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]\n >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),\n ... ('Galahad', 1.7, 38)]\n >>> a = np.array(values, dtype=dtype) # create a structured array\n >>> np.sort(a, order='height') # doctest: +SKIP\n array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),\n ('Lancelot', 1.8999999999999999, 38)],\n dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])\n\n Sort by age, then height if ages are equal:\n\n >>> np.sort(a, order=['age', 'height']) # doctest: +SKIP\n array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),\n ('Arthur', 1.8, 41)],\n dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])\n\n "
if (axis is None):
a = asanyarray(a).flatten()
axis = (- 1)
try:
sort(a, kind=kind)
return a
except Exception:
pass
else:
a = asanyarray(a).copy(order='K')
a.sort(axis=axis, kind=kind, order=order)
return a | -6,831,761,510,882,574,000 | Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Threading
---------
Up to 8 threads
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) | src/pnumpy/sort.py | sort | Quansight/numpy-threading-extensions | python | def sort(a, axis=(- 1), kind=None, order=None):
"\n Return a sorted copy of an array.\n\n Parameters\n ----------\n a : array_like\n Array to be sorted.\n axis : int or None, optional\n Axis along which to sort. If None, the array is flattened before\n sorting. The default is -1, which sorts along the last axis.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional\n Sorting algorithm. The default is 'quicksort'. Note that both 'stable'\n and 'mergesort' use timsort or radix sort under the covers and, in general,\n the actual implementation will vary with data type. The 'mergesort' option\n is retained for backwards compatibility.\n\n .. versionchanged:: 1.15.0.\n The 'stable' option was added.\n\n order : str or list of str, optional\n When `a` is an array with fields defined, this argument specifies\n which fields to compare first, second, etc. A single field can\n be specified as a string, and not all fields need be specified,\n but unspecified fields will still be used, in the order in which\n they come up in the dtype, to break ties.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n Threading\n ---------\n Up to 8 threads\n\n See Also\n --------\n ndarray.sort : Method to sort an array in-place.\n argsort : Indirect sort.\n lexsort : Indirect stable sort on multiple keys.\n searchsorted : Find elements in a sorted array.\n partition : Partial sort.\n\n Notes\n -----\n The various sorting algorithms are characterized by their average speed,\n worst case performance, work space size, and whether they are stable. A\n stable sort keeps items with the same key in the same relative\n order. The four algorithms implemented in NumPy have the following\n properties:\n\n =========== ======= ============= ============ ========\n kind speed worst case work space stable\n =========== ======= ============= ============ ========\n 'quicksort' 1 O(n^2) 0 no\n 'heapsort' 3 O(n*log(n)) 0 no\n 'mergesort' 2 O(n*log(n)) ~n/2 yes\n 'timsort' 2 O(n*log(n)) ~n/2 yes\n =========== ======= ============= ============ ========\n\n .. note:: The datatype determines which of 'mergesort' or 'timsort'\n is actually used, even if 'mergesort' is specified. User selection\n at a finer scale is not currently available.\n\n All the sort algorithms make temporary copies of the data when\n sorting along any but the last axis. Consequently, sorting along\n the last axis is faster and uses less space than sorting along\n any other axis.\n\n The sort order for complex numbers is lexicographic. If both the real\n and imaginary parts are non-nan then the order is determined by the\n real parts except when they are equal, in which case the order is\n determined by the imaginary parts.\n\n Previous to numpy 1.4.0 sorting real and complex arrays containing nan\n values led to undefined behaviour. In numpy versions >= 1.4.0 nan\n values are sorted to the end. The extended sort order is:\n\n * Real: [R, nan]\n * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]\n\n where R is a non-nan real value. Complex values with the same nan\n placements are sorted according to the non-nan part if it exists.\n Non-nan values are sorted as before.\n\n .. versionadded:: 1.12.0\n\n quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.\n When sorting does not make enough progress it switches to\n `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.\n This implementation makes quicksort O(n*log(n)) in the worst case.\n\n 'stable' automatically chooses the best stable sorting algorithm\n for the data type being sorted.\n It, along with 'mergesort' is currently mapped to\n `timsort <https://en.wikipedia.org/wiki/Timsort>`_\n or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_\n depending on the data type.\n API forward compatibility currently limits the\n ability to select the implementation and it is hardwired for the different\n data types.\n\n .. versionadded:: 1.17.0\n\n Timsort is added for better performance on already or nearly\n sorted data. On random data timsort is almost identical to\n mergesort. It is now used for stable sort while quicksort is still the\n default sort if none is chosen. For timsort details, refer to\n `CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.\n 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an\n O(n) sort instead of O(n log n).\n\n .. versionchanged:: 1.18.0\n\n NaT now sorts to the end of arrays for consistency with NaN.\n\n Examples\n --------\n >>> a = np.array([[1,4],[3,1]])\n >>> np.sort(a) # sort along the last axis\n array([[1, 4],\n [1, 3]])\n >>> np.sort(a, axis=None) # sort the flattened array\n array([1, 1, 3, 4])\n >>> np.sort(a, axis=0) # sort along the first axis\n array([[1, 1],\n [3, 4]])\n\n Use the `order` keyword to specify a field to use when sorting a\n structured array:\n\n >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]\n >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),\n ... ('Galahad', 1.7, 38)]\n >>> a = np.array(values, dtype=dtype) # create a structured array\n >>> np.sort(a, order='height') # doctest: +SKIP\n array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),\n ('Lancelot', 1.8999999999999999, 38)],\n dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])\n\n Sort by age, then height if ages are equal:\n\n >>> np.sort(a, order=['age', 'height']) # doctest: +SKIP\n array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),\n ('Arthur', 1.8, 41)],\n dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])\n\n "
if (axis is None):
a = asanyarray(a).flatten()
axis = (- 1)
try:
sort(a, kind=kind)
return a
except Exception:
pass
else:
a = asanyarray(a).copy(order='K')
a.sort(axis=axis, kind=kind, order=order)
return a |
def lexsort(*args, **kwargs):
'\n Perform an indirect stable sort using a sequence of keys.\n\n Given multiple sorting keys, which can be interpreted as columns in a\n spreadsheet, lexsort returns an array of integer indices that describes\n the sort order by multiple columns. The last key in the sequence is used\n for the primary sort order, the second-to-last key for the secondary sort\n order, and so on. The keys argument must be a sequence of objects that\n can be converted to arrays of the same shape. If a 2D array is provided\n for the keys argument, it\'s rows are interpreted as the sorting keys and\n sorting is according to the last row, second last row etc.\n\n Parameters\n ----------\n keys : (k, N) array or tuple containing k (N,)-shaped sequences\n The `k` different "columns" to be sorted. The last column (or row if\n `keys` is a 2D array) is the primary sort key.\n axis : int, optional\n Axis to be indirectly sorted. By default, sort over the last axis.\n\n Returns\n -------\n indices : (N,) ndarray of ints\n Array of indices that sort the keys along the specified axis.\n\n Threading\n ---------\n Up to 8 threads\n\n See Also\n --------\n argsort : Indirect sort.\n ndarray.sort : In-place sort.\n sort : Return a sorted copy of an array.\n\n Examples\n --------\n Sort names: first by surname, then by name.\n\n >>> surnames = (\'Hertz\', \'Galilei\', \'Hertz\')\n >>> first_names = (\'Heinrich\', \'Galileo\', \'Gustav\')\n >>> ind = np.lexsort((first_names, surnames))\n >>> ind\n array([1, 2, 0])\n\n >>> [surnames[i] + ", " + first_names[i] for i in ind]\n [\'Galilei, Galileo\', \'Hertz, Gustav\', \'Hertz, Heinrich\']\n\n Sort two columns of numbers:\n\n >>> a = [1,5,1,4,3,4,4] # First column\n >>> b = [9,4,0,4,0,2,1] # Second column\n >>> ind = np.lexsort((b,a)) # Sort by a, then by b\n >>> ind\n array([2, 0, 4, 6, 5, 3, 1])\n\n >>> [(a[i],b[i]) for i in ind]\n [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]\n\n Note that sorting is first according to the elements of ``a``.\n Secondary sorting is according to the elements of ``b``.\n\n A normal ``argsort`` would have yielded:\n\n >>> [(a[i],b[i]) for i in np.argsort(a)]\n [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]\n\n Structured arrays are sorted lexically by ``argsort``:\n\n >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],\n ... dtype=np.dtype([(\'x\', int), (\'y\', int)]))\n\n >>> np.argsort(x) # or np.argsort(x, order=(\'x\', \'y\'))\n array([2, 0, 4, 6, 5, 3, 1])\n '
try:
return lexsort32(*args, **kwargs)
except Exception:
return np.lexsort(*args, **kwargs) | -7,031,114,629,765,578,000 | Perform an indirect stable sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
Threading
---------
Up to 8 threads
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1]) | src/pnumpy/sort.py | lexsort | Quansight/numpy-threading-extensions | python | def lexsort(*args, **kwargs):
'\n Perform an indirect stable sort using a sequence of keys.\n\n Given multiple sorting keys, which can be interpreted as columns in a\n spreadsheet, lexsort returns an array of integer indices that describes\n the sort order by multiple columns. The last key in the sequence is used\n for the primary sort order, the second-to-last key for the secondary sort\n order, and so on. The keys argument must be a sequence of objects that\n can be converted to arrays of the same shape. If a 2D array is provided\n for the keys argument, it\'s rows are interpreted as the sorting keys and\n sorting is according to the last row, second last row etc.\n\n Parameters\n ----------\n keys : (k, N) array or tuple containing k (N,)-shaped sequences\n The `k` different "columns" to be sorted. The last column (or row if\n `keys` is a 2D array) is the primary sort key.\n axis : int, optional\n Axis to be indirectly sorted. By default, sort over the last axis.\n\n Returns\n -------\n indices : (N,) ndarray of ints\n Array of indices that sort the keys along the specified axis.\n\n Threading\n ---------\n Up to 8 threads\n\n See Also\n --------\n argsort : Indirect sort.\n ndarray.sort : In-place sort.\n sort : Return a sorted copy of an array.\n\n Examples\n --------\n Sort names: first by surname, then by name.\n\n >>> surnames = (\'Hertz\', \'Galilei\', \'Hertz\')\n >>> first_names = (\'Heinrich\', \'Galileo\', \'Gustav\')\n >>> ind = np.lexsort((first_names, surnames))\n >>> ind\n array([1, 2, 0])\n\n >>> [surnames[i] + ", " + first_names[i] for i in ind]\n [\'Galilei, Galileo\', \'Hertz, Gustav\', \'Hertz, Heinrich\']\n\n Sort two columns of numbers:\n\n >>> a = [1,5,1,4,3,4,4] # First column\n >>> b = [9,4,0,4,0,2,1] # Second column\n >>> ind = np.lexsort((b,a)) # Sort by a, then by b\n >>> ind\n array([2, 0, 4, 6, 5, 3, 1])\n\n >>> [(a[i],b[i]) for i in ind]\n [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]\n\n Note that sorting is first according to the elements of ``a``.\n Secondary sorting is according to the elements of ``b``.\n\n A normal ``argsort`` would have yielded:\n\n >>> [(a[i],b[i]) for i in np.argsort(a)]\n [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]\n\n Structured arrays are sorted lexically by ``argsort``:\n\n >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],\n ... dtype=np.dtype([(\'x\', int), (\'y\', int)]))\n\n >>> np.argsort(x) # or np.argsort(x, order=(\'x\', \'y\'))\n array([2, 0, 4, 6, 5, 3, 1])\n '
try:
return lexsort32(*args, **kwargs)
except Exception:
return np.lexsort(*args, **kwargs) |
def argsort(a, axis=(- 1), kind=None, order=None):
"\n Returns the indices that would sort an array.\n\n Perform an indirect sort along the given axis using the algorithm specified\n by the `kind` keyword. It returns an array of indices of the same shape as\n `a` that index data along the given axis in sorted order.\n\n Parameters\n ----------\n a : array_like\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional\n Sorting algorithm. The default is 'quicksort'. Note that both 'stable'\n and 'mergesort' use timsort under the covers and, in general, the\n actual implementation will vary with data type. The 'mergesort' option\n is retained for backwards compatibility.\n\n .. versionchanged:: 1.15.0.\n The 'stable' option was added.\n order : str or list of str, optional\n When `a` is an array with fields defined, this argument specifies\n which fields to compare first, second, etc. A single field can\n be specified as a string, and not all fields need be specified,\n but unspecified fields will still be used, in the order in which\n they come up in the dtype, to break ties.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified `axis`.\n If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.\n More generally, ``np.take_along_axis(a, index_array, axis=axis)``\n always yields the sorted `a`, irrespective of dimensionality.\n\n See Also\n --------\n sort : Describes sorting algorithms used.\n lexsort : Indirect stable sort with multiple keys.\n ndarray.sort : Inplace sort.\n argpartition : Indirect partial sort.\n take_along_axis : Apply ``index_array`` from argsort\n to an array as if by calling sort.\n\n Notes\n -----\n See `sort` for notes on the different sorting algorithms.\n\n As of NumPy 1.4.0 `argsort` works with real/complex arrays containing\n nan values. The enhanced sort order is documented in `sort`.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 1, 2])\n >>> np.argsort(x)\n array([1, 2, 0])\n\n Two-dimensional array:\n\n >>> x = np.array([[0, 3], [2, 2]])\n >>> x\n array([[0, 3],\n [2, 2]])\n\n >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)\n >>> ind\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)\n array([[0, 2],\n [2, 3]])\n\n >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)\n >>> ind\n array([[0, 1],\n [0, 1]])\n >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)\n array([[0, 3],\n [2, 2]])\n\n Indices of the sorted elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)\n >>> ind\n (array([0, 1, 1, 0]), array([0, 0, 1, 1]))\n >>> x[ind] # same as np.sort(x, axis=None)\n array([0, 2, 2, 3])\n\n Sorting with keys:\n\n >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])\n >>> x\n array([(1, 0), (0, 1)],\n dtype=[('x', '<i4'), ('y', '<i4')])\n\n >>> np.argsort(x, order=('x','y'))\n array([1, 0])\n\n >>> np.argsort(x, order=('y','x'))\n array([0, 1])\n\n "
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) | -5,738,350,829,677,030,000 | Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1]) | src/pnumpy/sort.py | argsort | Quansight/numpy-threading-extensions | python | def argsort(a, axis=(- 1), kind=None, order=None):
"\n Returns the indices that would sort an array.\n\n Perform an indirect sort along the given axis using the algorithm specified\n by the `kind` keyword. It returns an array of indices of the same shape as\n `a` that index data along the given axis in sorted order.\n\n Parameters\n ----------\n a : array_like\n Array to sort.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If None,\n the flattened array is used.\n kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional\n Sorting algorithm. The default is 'quicksort'. Note that both 'stable'\n and 'mergesort' use timsort under the covers and, in general, the\n actual implementation will vary with data type. The 'mergesort' option\n is retained for backwards compatibility.\n\n .. versionchanged:: 1.15.0.\n The 'stable' option was added.\n order : str or list of str, optional\n When `a` is an array with fields defined, this argument specifies\n which fields to compare first, second, etc. A single field can\n be specified as a string, and not all fields need be specified,\n but unspecified fields will still be used, in the order in which\n they come up in the dtype, to break ties.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that sort `a` along the specified `axis`.\n If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.\n More generally, ``np.take_along_axis(a, index_array, axis=axis)``\n always yields the sorted `a`, irrespective of dimensionality.\n\n See Also\n --------\n sort : Describes sorting algorithms used.\n lexsort : Indirect stable sort with multiple keys.\n ndarray.sort : Inplace sort.\n argpartition : Indirect partial sort.\n take_along_axis : Apply ``index_array`` from argsort\n to an array as if by calling sort.\n\n Notes\n -----\n See `sort` for notes on the different sorting algorithms.\n\n As of NumPy 1.4.0 `argsort` works with real/complex arrays containing\n nan values. The enhanced sort order is documented in `sort`.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 1, 2])\n >>> np.argsort(x)\n array([1, 2, 0])\n\n Two-dimensional array:\n\n >>> x = np.array([[0, 3], [2, 2]])\n >>> x\n array([[0, 3],\n [2, 2]])\n\n >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)\n >>> ind\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)\n array([[0, 2],\n [2, 3]])\n\n >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)\n >>> ind\n array([[0, 1],\n [0, 1]])\n >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)\n array([[0, 3],\n [2, 2]])\n\n Indices of the sorted elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)\n >>> ind\n (array([0, 1, 1, 0]), array([0, 0, 1, 1]))\n >>> x[ind] # same as np.sort(x, axis=None)\n array([0, 2, 2, 3])\n\n Sorting with keys:\n\n >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])\n >>> x\n array([(1, 0), (0, 1)],\n dtype=[('x', '<i4'), ('y', '<i4')])\n\n >>> np.argsort(x, order=('x','y'))\n array([1, 0])\n\n >>> np.argsort(x, order=('y','x'))\n array([0, 1])\n\n "
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order) |
def argmax(a, axis=None, out=None):
'\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of ints\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n See Also\n --------\n ndarray.argmax, argmin\n amax : The maximum value along a given axis.\n unravel_index : Convert a flat index into an index tuple.\n take_along_axis : Apply ``np.expand_dims(index_array, axis)``\n from argmax to an array as if by calling max.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10, 11, 12],\n [13, 14, 15]])\n >>> np.argmax(a)\n 5\n >>> np.argmax(a, axis=0)\n array([1, 1, 1])\n >>> np.argmax(a, axis=1)\n array([2, 2])\n\n Indexes of the maximal elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)\n >>> ind\n (1, 2)\n >>> a[ind]\n 15\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0, 5, 2, 3, 4, 5])\n >>> np.argmax(b) # Only the first occurrence is returned.\n 1\n\n >>> x = np.array([[4,2,3], [1,0,3]])\n >>> index_array = np.argmax(x, axis=-1)\n >>> # Same as np.max(x, axis=-1, keepdims=True)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)\n array([[4],\n [3]])\n >>> # Same as np.max(x, axis=-1)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)\n array([4, 3])\n\n '
return _wrapfunc(a, 'argmax', axis=axis, out=out) | -8,006,752,523,648,650,000 | Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3]) | src/pnumpy/sort.py | argmax | Quansight/numpy-threading-extensions | python | def argmax(a, axis=None, out=None):
'\n Returns the indices of the maximum values along an axis.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of ints\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n See Also\n --------\n ndarray.argmax, argmin\n amax : The maximum value along a given axis.\n unravel_index : Convert a flat index into an index tuple.\n take_along_axis : Apply ``np.expand_dims(index_array, axis)``\n from argmax to an array as if by calling max.\n\n Notes\n -----\n In case of multiple occurrences of the maximum values, the indices\n corresponding to the first occurrence are returned.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10, 11, 12],\n [13, 14, 15]])\n >>> np.argmax(a)\n 5\n >>> np.argmax(a, axis=0)\n array([1, 1, 1])\n >>> np.argmax(a, axis=1)\n array([2, 2])\n\n Indexes of the maximal elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)\n >>> ind\n (1, 2)\n >>> a[ind]\n 15\n\n >>> b = np.arange(6)\n >>> b[1] = 5\n >>> b\n array([0, 5, 2, 3, 4, 5])\n >>> np.argmax(b) # Only the first occurrence is returned.\n 1\n\n >>> x = np.array([[4,2,3], [1,0,3]])\n >>> index_array = np.argmax(x, axis=-1)\n >>> # Same as np.max(x, axis=-1, keepdims=True)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)\n array([[4],\n [3]])\n >>> # Same as np.max(x, axis=-1)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)\n array([4, 3])\n\n '
return _wrapfunc(a, 'argmax', axis=axis, out=out) |
def argmin(a, axis=None, out=None):
'\n Returns the indices of the minimum values along an axis.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of ints\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n See Also\n --------\n ndarray.argmin, argmax\n amin : The minimum value along a given axis.\n unravel_index : Convert a flat index into an index tuple.\n take_along_axis : Apply ``np.expand_dims(index_array, axis)``\n from argmin to an array as if by calling min.\n\n Notes\n -----\n In case of multiple occurrences of the minimum values, the indices\n corresponding to the first occurrence are returned.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10, 11, 12],\n [13, 14, 15]])\n >>> np.argmin(a)\n 0\n >>> np.argmin(a, axis=0)\n array([0, 0, 0])\n >>> np.argmin(a, axis=1)\n array([0, 0])\n\n Indices of the minimum elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)\n >>> ind\n (0, 0)\n >>> a[ind]\n 10\n\n >>> b = np.arange(6) + 10\n >>> b[4] = 10\n >>> b\n array([10, 11, 12, 13, 10, 15])\n >>> np.argmin(b) # Only the first occurrence is returned.\n 0\n\n >>> x = np.array([[4,2,3], [1,0,3]])\n >>> index_array = np.argmin(x, axis=-1)\n >>> # Same as np.min(x, axis=-1, keepdims=True)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)\n array([[2],\n [0]])\n >>> # Same as np.max(x, axis=-1)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)\n array([2, 0])\n\n '
return _wrapfunc(a, 'argmin', axis=axis, out=out) | -7,225,755,640,550,826,000 | Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0]) | src/pnumpy/sort.py | argmin | Quansight/numpy-threading-extensions | python | def argmin(a, axis=None, out=None):
'\n Returns the indices of the minimum values along an axis.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n By default, the index is into the flattened array, otherwise\n along the specified axis.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n Returns\n -------\n index_array : ndarray of ints\n Array of indices into the array. It has the same shape as `a.shape`\n with the dimension along `axis` removed.\n\n See Also\n --------\n ndarray.argmin, argmax\n amin : The minimum value along a given axis.\n unravel_index : Convert a flat index into an index tuple.\n take_along_axis : Apply ``np.expand_dims(index_array, axis)``\n from argmin to an array as if by calling min.\n\n Notes\n -----\n In case of multiple occurrences of the minimum values, the indices\n corresponding to the first occurrence are returned.\n\n Examples\n --------\n >>> a = np.arange(6).reshape(2,3) + 10\n >>> a\n array([[10, 11, 12],\n [13, 14, 15]])\n >>> np.argmin(a)\n 0\n >>> np.argmin(a, axis=0)\n array([0, 0, 0])\n >>> np.argmin(a, axis=1)\n array([0, 0])\n\n Indices of the minimum elements of a N-dimensional array:\n\n >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)\n >>> ind\n (0, 0)\n >>> a[ind]\n 10\n\n >>> b = np.arange(6) + 10\n >>> b[4] = 10\n >>> b\n array([10, 11, 12, 13, 10, 15])\n >>> np.argmin(b) # Only the first occurrence is returned.\n 0\n\n >>> x = np.array([[4,2,3], [1,0,3]])\n >>> index_array = np.argmin(x, axis=-1)\n >>> # Same as np.min(x, axis=-1, keepdims=True)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)\n array([[2],\n [0]])\n >>> # Same as np.max(x, axis=-1)\n >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)\n array([2, 0])\n\n '
return _wrapfunc(a, 'argmin', axis=axis, out=out) |
def searchsorted(a, v, side='left', sorter=None):
"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted array `a` such that, if the\n corresponding elements in `v` were inserted before the indices, the\n order of `a` would be preserved.\n\n Assuming that `a` is sorted:\n\n ====== ============================\n `side` returned index `i` satisfies\n ====== ============================\n left ``a[i-1] < v <= a[i]``\n right ``a[i-1] <= v < a[i]``\n ====== ============================\n\n Parameters\n ----------\n a : 1-D array_like\n Input array. If `sorter` is None, then it must be sorted in\n ascending order, otherwise `sorter` must be an array of indices\n that sort it.\n v : array_like\n Values to insert into `a`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `a`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort array a into ascending\n order. They are typically the result of argsort.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n indices : array of ints\n Array of insertion points with the same shape as `v`.\n\n See Also\n --------\n sort : Return a sorted copy of an array.\n histogram : Produce histogram from 1-D data.\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing\n `nan` values. The enhanced sort order is documented in `sort`.\n\n This function uses the same algorithm as the builtin python `bisect.bisect_left`\n (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,\n which is also vectorized in the `v` argument.\n\n Examples\n --------\n >>> np.searchsorted([1,2,3,4,5], 3)\n 2\n >>> np.searchsorted([1,2,3,4,5], 3, side='right')\n 3\n >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])\n array([0, 5, 1, 2])\n\n "
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) | 6,932,750,288,715,982,000 | Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2]) | src/pnumpy/sort.py | searchsorted | Quansight/numpy-threading-extensions | python | def searchsorted(a, v, side='left', sorter=None):
"\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted array `a` such that, if the\n corresponding elements in `v` were inserted before the indices, the\n order of `a` would be preserved.\n\n Assuming that `a` is sorted:\n\n ====== ============================\n `side` returned index `i` satisfies\n ====== ============================\n left ``a[i-1] < v <= a[i]``\n right ``a[i-1] <= v < a[i]``\n ====== ============================\n\n Parameters\n ----------\n a : 1-D array_like\n Input array. If `sorter` is None, then it must be sorted in\n ascending order, otherwise `sorter` must be an array of indices\n that sort it.\n v : array_like\n Values to insert into `a`.\n side : {'left', 'right'}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `a`).\n sorter : 1-D array_like, optional\n Optional array of integer indices that sort array a into ascending\n order. They are typically the result of argsort.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n indices : array of ints\n Array of insertion points with the same shape as `v`.\n\n See Also\n --------\n sort : Return a sorted copy of an array.\n histogram : Produce histogram from 1-D data.\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing\n `nan` values. The enhanced sort order is documented in `sort`.\n\n This function uses the same algorithm as the builtin python `bisect.bisect_left`\n (``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,\n which is also vectorized in the `v` argument.\n\n Examples\n --------\n >>> np.searchsorted([1,2,3,4,5], 3)\n 2\n >>> np.searchsorted([1,2,3,4,5], 3, side='right')\n 3\n >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])\n array([0, 5, 1, 2])\n\n "
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) |
def onlywhite(line):
'Return true if the line does only consist of whitespace characters.'
for c in line:
if ((c != ' ') and (c != ' ')):
return (c == ' ')
return line | 2,643,177,320,319,262,000 | Return true if the line does only consist of whitespace characters. | dev/html2text.py | onlywhite | awenz-uw/arlo | python | def onlywhite(line):
for c in line:
if ((c != ' ') and (c != ' ')):
return (c == ' ')
return line |
def dumb_property_dict(style):
'returns a hash of css attributes'
return dict([(x.strip(), y.strip()) for (x, y) in [z.split(':', 1) for z in style.split(';') if (':' in z)]]) | -1,786,496,490,863,415,000 | returns a hash of css attributes | dev/html2text.py | dumb_property_dict | awenz-uw/arlo | python | def dumb_property_dict(style):
return dict([(x.strip(), y.strip()) for (x, y) in [z.split(':', 1) for z in style.split(';') if (':' in z)]]) |
def dumb_css_parser(data):
'returns a hash of css selectors, each of which contains a hash of css attributes'
data += ';'
importIndex = data.find('@import')
while (importIndex != (- 1)):
data = (data[0:importIndex] + data[(data.find(';', importIndex) + 1):])
importIndex = data.find('@import')
elements = [x.split('{') for x in data.split('}') if ('{' in x.strip())]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for (a, b) in elements])
except ValueError:
elements = {}
return elements | -4,408,751,051,728,895,500 | returns a hash of css selectors, each of which contains a hash of css attributes | dev/html2text.py | dumb_css_parser | awenz-uw/arlo | python | def dumb_css_parser(data):
data += ';'
importIndex = data.find('@import')
while (importIndex != (- 1)):
data = (data[0:importIndex] + data[(data.find(';', importIndex) + 1):])
importIndex = data.find('@import')
elements = [x.split('{') for x in data.split('}') if ('{' in x.strip())]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for (a, b) in elements])
except ValueError:
elements = {}
return elements |
def element_style(attrs, style_def, parent_style):
"returns a hash of the 'final' style attributes of the element"
style = parent_style.copy()
if ('class' in attrs):
for css_class in attrs['class'].split():
css_style = style_def[('.' + css_class)]
style.update(css_style)
if ('style' in attrs):
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style | -1,413,663,789,123,905,300 | returns a hash of the 'final' style attributes of the element | dev/html2text.py | element_style | awenz-uw/arlo | python | def element_style(attrs, style_def, parent_style):
style = parent_style.copy()
if ('class' in attrs):
for css_class in attrs['class'].split():
css_style = style_def[('.' + css_class)]
style.update(css_style)
if ('style' in attrs):
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style |
def google_list_style(style):
'finds out whether this is an ordered or unordered list'
if ('list-style-type' in style):
list_style = style['list-style-type']
if (list_style in ['disc', 'circle', 'square', 'none']):
return 'ul'
return 'ol' | 6,299,580,701,757,265,000 | finds out whether this is an ordered or unordered list | dev/html2text.py | google_list_style | awenz-uw/arlo | python | def google_list_style(style):
if ('list-style-type' in style):
list_style = style['list-style-type']
if (list_style in ['disc', 'circle', 'square', 'none']):
return 'ul'
return 'ol' |
def google_has_height(style):
"check if the style of the element has the 'height' attribute explicitly defined"
if ('height' in style):
return True
return False | 640,041,204,446,125,700 | check if the style of the element has the 'height' attribute explicitly defined | dev/html2text.py | google_has_height | awenz-uw/arlo | python | def google_has_height(style):
if ('height' in style):
return True
return False |
def google_text_emphasis(style):
'return a list of all emphasis modifiers of the element'
emphasis = []
if ('text-decoration' in style):
emphasis.append(style['text-decoration'])
if ('font-style' in style):
emphasis.append(style['font-style'])
if ('font-weight' in style):
emphasis.append(style['font-weight'])
return emphasis | 3,806,007,217,956,230,700 | return a list of all emphasis modifiers of the element | dev/html2text.py | google_text_emphasis | awenz-uw/arlo | python | def google_text_emphasis(style):
emphasis = []
if ('text-decoration' in style):
emphasis.append(style['text-decoration'])
if ('font-style' in style):
emphasis.append(style['font-style'])
if ('font-weight' in style):
emphasis.append(style['font-weight'])
return emphasis |
def google_fixed_width_font(style):
'check if the css of the current element defines a fixed width font'
font_family = ''
if ('font-family' in style):
font_family = style['font-family']
if (('Courier New' == font_family) or ('Consolas' == font_family)):
return True
return False | -2,883,019,796,638,176,000 | check if the css of the current element defines a fixed width font | dev/html2text.py | google_fixed_width_font | awenz-uw/arlo | python | def google_fixed_width_font(style):
font_family =
if ('font-family' in style):
font_family = style['font-family']
if (('Courier New' == font_family) or ('Consolas' == font_family)):
return True
return False |
def list_numbering_start(attrs):
'extract numbering from list element attributes'
if ('start' in attrs):
return (int(attrs['start']) - 1)
else:
return 0 | 1,401,048,153,577,154,300 | extract numbering from list element attributes | dev/html2text.py | list_numbering_start | awenz-uw/arlo | python | def list_numbering_start(attrs):
if ('start' in attrs):
return (int(attrs['start']) - 1)
else:
return 0 |
def escape_md(text):
'Escapes markdown-sensitive characters within other markdown constructs.'
return md_chars_matcher.sub('\\\\\\1', text) | -5,401,994,510,614,652,000 | Escapes markdown-sensitive characters within other markdown constructs. | dev/html2text.py | escape_md | awenz-uw/arlo | python | def escape_md(text):
return md_chars_matcher.sub('\\\\\\1', text) |
def escape_md_section(text, snob=False):
'Escapes markdown-sensitive characters across whole document sections.'
text = md_backslash_matcher.sub('\\\\\\1', text)
if snob:
text = md_chars_matcher_all.sub('\\\\\\1', text)
text = md_dot_matcher.sub('\\1\\\\\\2', text)
text = md_plus_matcher.sub('\\1\\\\\\2', text)
text = md_dash_matcher.sub('\\1\\\\\\2', text)
return text | -1,093,320,531,801,034,600 | Escapes markdown-sensitive characters across whole document sections. | dev/html2text.py | escape_md_section | awenz-uw/arlo | python | def escape_md_section(text, snob=False):
text = md_backslash_matcher.sub('\\\\\\1', text)
if snob:
text = md_chars_matcher_all.sub('\\\\\\1', text)
text = md_dot_matcher.sub('\\1\\\\\\2', text)
text = md_plus_matcher.sub('\\1\\\\\\2', text)
text = md_dash_matcher.sub('\\1\\\\\\2', text)
return text |
def previousIndex(self, attrs):
' returns the index of certain set of attributes (of a link) in the\n self.a list\n\n If the set of attributes is not found, returns None\n '
if (not has_key(attrs, 'href')):
return None
i = (- 1)
for a in self.a:
i += 1
match = 0
if (has_key(a, 'href') and (a['href'] == attrs['href'])):
if (has_key(a, 'title') or has_key(attrs, 'title')):
if (has_key(a, 'title') and has_key(attrs, 'title') and (a['title'] == attrs['title'])):
match = True
else:
match = True
if match:
return i | 6,450,246,326,084,345,000 | returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None | dev/html2text.py | previousIndex | awenz-uw/arlo | python | def previousIndex(self, attrs):
' returns the index of certain set of attributes (of a link) in the\n self.a list\n\n If the set of attributes is not found, returns None\n '
if (not has_key(attrs, 'href')):
return None
i = (- 1)
for a in self.a:
i += 1
match = 0
if (has_key(a, 'href') and (a['href'] == attrs['href'])):
if (has_key(a, 'title') or has_key(attrs, 'title')):
if (has_key(a, 'title') and has_key(attrs, 'title') and (a['title'] == attrs['title'])):
match = True
else:
match = True
if match:
return i |
def handle_emphasis(self, start, tag_style, parent_style):
'handles various text emphases'
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
strikethrough = (('line-through' in tag_emphasis) and self.hide_strikethrough)
bold = (('bold' in tag_emphasis) and (not ('bold' in parent_emphasis)))
italic = (('italic' in tag_emphasis) and (not ('italic' in parent_emphasis)))
fixed = (google_fixed_width_font(tag_style) and (not google_fixed_width_font(parent_style)) and (not self.pre))
if start:
if (bold or italic or fixed):
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if (bold or italic or fixed):
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
if ((bold or italic) and (not self.emphasis)):
self.o(' ')
if strikethrough:
self.quiet -= 1 | 7,691,076,359,448,883,000 | handles various text emphases | dev/html2text.py | handle_emphasis | awenz-uw/arlo | python | def handle_emphasis(self, start, tag_style, parent_style):
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
strikethrough = (('line-through' in tag_emphasis) and self.hide_strikethrough)
bold = (('bold' in tag_emphasis) and (not ('bold' in parent_emphasis)))
italic = (('italic' in tag_emphasis) and (not ('italic' in parent_emphasis)))
fixed = (google_fixed_width_font(tag_style) and (not google_fixed_width_font(parent_style)) and (not self.pre))
if start:
if (bold or italic or fixed):
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if (bold or italic or fixed):
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
if ((bold or italic) and (not self.emphasis)):
self.o(' ')
if strikethrough:
self.quiet -= 1 |
def google_nest_count(self, style):
'calculate the nesting count of google doc lists'
nest_count = 0
if ('margin-left' in style):
nest_count = (int(style['margin-left'][:(- 2)]) / self.google_list_indent)
return nest_count | 5,612,216,284,702,896,000 | calculate the nesting count of google doc lists | dev/html2text.py | google_nest_count | awenz-uw/arlo | python | def google_nest_count(self, style):
nest_count = 0
if ('margin-left' in style):
nest_count = (int(style['margin-left'][:(- 2)]) / self.google_list_indent)
return nest_count |
def optwrap(self, text):
'Wrap all paragraphs in the provided text.'
if (not self.body_width):
return text
assert wrap, 'Requires Python 2.3.'
result = ''
newlines = 0
for para in text.split('\n'):
if (len(para) > 0):
if (not skipwrap(para)):
result += '\n'.join(wrap(para, self.body_width))
if para.endswith(' '):
result += ' \n'
newlines = 1
else:
result += '\n\n'
newlines = 2
elif (not onlywhite(para)):
result += (para + '\n')
newlines = 1
elif (newlines < 2):
result += '\n'
newlines += 1
return result | -4,554,985,554,149,714,400 | Wrap all paragraphs in the provided text. | dev/html2text.py | optwrap | awenz-uw/arlo | python | def optwrap(self, text):
if (not self.body_width):
return text
assert wrap, 'Requires Python 2.3.'
result =
newlines = 0
for para in text.split('\n'):
if (len(para) > 0):
if (not skipwrap(para)):
result += '\n'.join(wrap(para, self.body_width))
if para.endswith(' '):
result += ' \n'
newlines = 1
else:
result += '\n\n'
newlines = 2
elif (not onlywhite(para)):
result += (para + '\n')
newlines = 1
elif (newlines < 2):
result += '\n'
newlines += 1
return result |
def weight_variable(shape):
'weight_variable generates a weight variable of a given shape.'
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name='W') | 1,714,315,251,192,376,300 | weight_variable generates a weight variable of a given shape. | cnn_phi_psi.py | weight_variable | Graveheart/ProteinSSPrediction | python | def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name='W') |
def bias_variable(shape):
'bias_variable generates a bias variable of a given shape.'
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name='B') | -9,042,792,790,202,244,000 | bias_variable generates a bias variable of a given shape. | cnn_phi_psi.py | bias_variable | Graveheart/ProteinSSPrediction | python | def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name='B') |
def conv1d(x, W):
'conv1d returns a 1d convolution layer.'
return tf.nn.conv1d(x, W, 1, 'SAME') | -6,424,299,776,053,027,000 | conv1d returns a 1d convolution layer. | cnn_phi_psi.py | conv1d | Graveheart/ProteinSSPrediction | python | def conv1d(x, W):
return tf.nn.conv1d(x, W, 1, 'SAME') |
def convert_to_degrees(arr):
'Covert all phi and psi angles to degrees'
arr[0] = math.degrees(arr[0])
arr[1] = math.degrees(arr[1])
return arr | -3,319,070,818,183,693,000 | Covert all phi and psi angles to degrees | cnn_phi_psi.py | convert_to_degrees | Graveheart/ProteinSSPrediction | python | def convert_to_degrees(arr):
arr[0] = math.degrees(arr[0])
arr[1] = math.degrees(arr[1])
return arr |
def __init__(self, feed):
'\n Constructor\n '
self.feed = feed
self.cache = []
if os.path.isfile(CACHE_FILE):
self.cache = [line.strip() for line in codecs.open(CACHE_FILE, 'r', 'utf-8').readlines()] | 5,810,451,100,013,958,000 | Constructor | feedputter.py | __init__ | amake/puttools-py | python | def __init__(self, feed):
'\n \n '
self.feed = feed
self.cache = []
if os.path.isfile(CACHE_FILE):
self.cache = [line.strip() for line in codecs.open(CACHE_FILE, 'r', 'utf-8').readlines()] |
def get_to(self, target, method):
'\n Fetch linked torrents and save to the specified output folder.\n '
for item in self.__get_items():
title = item.find('title').text.strip()
link = item.find('link').text
log(('Found ' + title))
if (title in self.cache):
log('Already gotten. Skipping.')
continue
log('Getting ... ')
if (not method(link, target, title)):
continue
with codecs.open(CACHE_FILE, 'a', 'utf-8') as tmp:
tmp.write((title + '\n'))
log('Done') | 642,667,914,975,769,600 | Fetch linked torrents and save to the specified output folder. | feedputter.py | get_to | amake/puttools-py | python | def get_to(self, target, method):
'\n \n '
for item in self.__get_items():
title = item.find('title').text.strip()
link = item.find('link').text
log(('Found ' + title))
if (title in self.cache):
log('Already gotten. Skipping.')
continue
log('Getting ... ')
if (not method(link, target, title)):
continue
with codecs.open(CACHE_FILE, 'a', 'utf-8') as tmp:
tmp.write((title + '\n'))
log('Done') |
def __init__(self, index, previous_hash, timestamp=None, forger=None, transactions: List[Transaction]=None, signature=None, **kwargs):
'\n Create block\n :param index: the block index at the chain (0 for the genesis block and so on)\n :param previous_hash: hash of previous block\n :param timestamp: block creation time\n :param forger: public_address of forger wallet\n :param transactions: list of transactions\n :param signature: signature of the block hash by the forger\n '
if (timestamp is None):
timestamp = time()
if (transactions is None):
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature | -2,504,936,648,075,513,300 | Create block
:param index: the block index at the chain (0 for the genesis block and so on)
:param previous_hash: hash of previous block
:param timestamp: block creation time
:param forger: public_address of forger wallet
:param transactions: list of transactions
:param signature: signature of the block hash by the forger | src/blockchain/block.py | __init__ | thewh1teagle/yoyocoin | python | def __init__(self, index, previous_hash, timestamp=None, forger=None, transactions: List[Transaction]=None, signature=None, **kwargs):
'\n Create block\n :param index: the block index at the chain (0 for the genesis block and so on)\n :param previous_hash: hash of previous block\n :param timestamp: block creation time\n :param forger: public_address of forger wallet\n :param transactions: list of transactions\n :param signature: signature of the block hash by the forger\n '
if (timestamp is None):
timestamp = time()
if (transactions is None):
transactions = []
self.index = index
self.previous_hash = previous_hash
self.timestamp = timestamp
self.forger = forger
self.transactions = transactions
self.signature = signature |
def hash(self):
'\n Calculate the block hash (block number, previous hash, transactions)\n :return: String hash of block data (hex)\n '
block_dict = self._raw_data()
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest() | 6,995,041,676,680,614,000 | Calculate the block hash (block number, previous hash, transactions)
:return: String hash of block data (hex) | src/blockchain/block.py | hash | thewh1teagle/yoyocoin | python | def hash(self):
'\n Calculate the block hash (block number, previous hash, transactions)\n :return: String hash of block data (hex)\n '
block_dict = self._raw_data()
block_string = json.dumps(block_dict, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest() |
def add_transaction(self, transaction: Transaction):
"\n Add transaction to block\n :param transaction: Transaction object (see transaction.py)\n :raise Validation error if transaction isn't valid.\n :return: None\n "
self.transactions.append(transaction) | -7,499,446,428,048,659,000 | Add transaction to block
:param transaction: Transaction object (see transaction.py)
:raise Validation error if transaction isn't valid.
:return: None | src/blockchain/block.py | add_transaction | thewh1teagle/yoyocoin | python | def add_transaction(self, transaction: Transaction):
"\n Add transaction to block\n :param transaction: Transaction object (see transaction.py)\n :raise Validation error if transaction isn't valid.\n :return: None\n "
self.transactions.append(transaction) |
def is_signature_verified(self) -> bool:
'\n Check if block signature is valid\n :return: bool\n '
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False | 1,621,767,926,656,757,800 | Check if block signature is valid
:return: bool | src/blockchain/block.py | is_signature_verified | thewh1teagle/yoyocoin | python | def is_signature_verified(self) -> bool:
'\n Check if block signature is valid\n :return: bool\n '
try:
return self.forger_public_key.verify(self.signature, self.hash().encode())
except ecdsa.BadSignatureError:
return False |
def create_signature(self, forger_private_address: str):
'\n Create block signature for this block\n :param forger_private_address: base64(wallet private address)\n :return: None\n '
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if (forger_private_key.get_verifying_key() != self.forger_public_key):
raise ValueError('The forger is not the one signing')
self.signature = self.sign(forger_private_key) | -4,406,126,929,190,984,000 | Create block signature for this block
:param forger_private_address: base64(wallet private address)
:return: None | src/blockchain/block.py | create_signature | thewh1teagle/yoyocoin | python | def create_signature(self, forger_private_address: str):
'\n Create block signature for this block\n :param forger_private_address: base64(wallet private address)\n :return: None\n '
forger_private_key_string = bytes.fromhex(forger_private_address)
forger_private_key = ecdsa.SigningKey.from_string(forger_private_key_string, curve=ECDSA_CURVE)
if (forger_private_key.get_verifying_key() != self.forger_public_key):
raise ValueError('The forger is not the one signing')
self.signature = self.sign(forger_private_key) |
def validate(self, blockchain_state, is_test_net=False):
'\n Validate block\n 1. check block index (is the next block in the blockchain state)\n 2. check previous hash (is the hash of the previous block)\n 3. check forger wallet (is lottery member?)\n 4. check block signature\n 5. validate transactions\n\n :param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError\n :param blockchain_state: Blockchain state object\n :raises ValidationError\n :return: None\n '
if ((self.index == 0) and (blockchain_state.length == 0)):
genesis_is_valid = ((self.forger == DEVELOPER_KEY) and self.is_signature_verified())
if (not genesis_is_valid):
raise GenesisIsNotValidError()
return
if (self.index != blockchain_state.length):
raise NonSequentialBlockIndexError(f'block index not sequential index: {self.index} chain: {blockchain_state.length}')
if (self.previous_hash != blockchain_state.last_block_hash):
raise NonMatchingHashError('previous hash not match previous block hash')
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if ((forger_wallet is None) or (forger_wallet.balance < 100)):
if (not is_test_net):
raise NonLotteryMemberError()
if (not self.is_signature_verified()):
raise ValidationError('invalid signature')
for transaction in self.transactions:
transaction.validate(blockchain_state=blockchain_state, is_test_net=is_test_net) | -8,639,253,438,226,391,000 | Validate block
1. check block index (is the next block in the blockchain state)
2. check previous hash (is the hash of the previous block)
3. check forger wallet (is lottery member?)
4. check block signature
5. validate transactions
:param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError
:param blockchain_state: Blockchain state object
:raises ValidationError
:return: None | src/blockchain/block.py | validate | thewh1teagle/yoyocoin | python | def validate(self, blockchain_state, is_test_net=False):
'\n Validate block\n 1. check block index (is the next block in the blockchain state)\n 2. check previous hash (is the hash of the previous block)\n 3. check forger wallet (is lottery member?)\n 4. check block signature\n 5. validate transactions\n\n :param is_test_net: if True ignore InsufficientBalanceError and NonLotteryMemberError\n :param blockchain_state: Blockchain state object\n :raises ValidationError\n :return: None\n '
if ((self.index == 0) and (blockchain_state.length == 0)):
genesis_is_valid = ((self.forger == DEVELOPER_KEY) and self.is_signature_verified())
if (not genesis_is_valid):
raise GenesisIsNotValidError()
return
if (self.index != blockchain_state.length):
raise NonSequentialBlockIndexError(f'block index not sequential index: {self.index} chain: {blockchain_state.length}')
if (self.previous_hash != blockchain_state.last_block_hash):
raise NonMatchingHashError('previous hash not match previous block hash')
forger_wallet = blockchain_state.wallets.get(self.forger, None)
if ((forger_wallet is None) or (forger_wallet.balance < 100)):
if (not is_test_net):
raise NonLotteryMemberError()
if (not self.is_signature_verified()):
raise ValidationError('invalid signature')
for transaction in self.transactions:
transaction.validate(blockchain_state=blockchain_state, is_test_net=is_test_net) |
def transform_X(self, X):
'\n transforms X\n\n :param\n X: Input X\n :return\n transformed X\n '
raise NotImplementedError() | 5,377,569,359,033,843,000 | transforms X
:param
X: Input X
:return
transformed X | GP/data_transformation.py | transform_X | VirgiAgl/V_savigp | python | def transform_X(self, X):
'\n transforms X\n\n :param\n X: Input X\n :return\n transformed X\n '
raise NotImplementedError() |
def transform_Y(self, Y):
'\n transforms Y\n\n :param\n Y: Input Y\n :return\n transformed Y\n '
raise NotImplementedError() | 4,951,803,811,033,387,000 | transforms Y
:param
Y: Input Y
:return
transformed Y | GP/data_transformation.py | transform_Y | VirgiAgl/V_savigp | python | def transform_Y(self, Y):
'\n transforms Y\n\n :param\n Y: Input Y\n :return\n transformed Y\n '
raise NotImplementedError() |
def untransform_X(self, X):
'\n Untransforms X to its original values\n\n :param\n X: transformed X\n :return\n untransformed X\n '
raise NotImplementedError() | -280,706,843,099,893,820 | Untransforms X to its original values
:param
X: transformed X
:return
untransformed X | GP/data_transformation.py | untransform_X | VirgiAgl/V_savigp | python | def untransform_X(self, X):
'\n Untransforms X to its original values\n\n :param\n X: transformed X\n :return\n untransformed X\n '
raise NotImplementedError() |
def untransform_Y(self, Y):
'\n Untransforms Y\n :param\n Y: transformed Y\n :return\n untransfomred Y\n '
raise NotImplementedError() | -6,146,962,964,687,816,000 | Untransforms Y
:param
Y: transformed Y
:return
untransfomred Y | GP/data_transformation.py | untransform_Y | VirgiAgl/V_savigp | python | def untransform_Y(self, Y):
'\n Untransforms Y\n :param\n Y: transformed Y\n :return\n untransfomred Y\n '
raise NotImplementedError() |
def untransform_NLPD(self, NLPD):
'\n Untransfomrs NLPD to the original Y space\n\n :param\n NLPD: transfomred NLPD\n :return\n untransformed NLPD\n '
raise NotImplementedError() | -1,423,142,593,506,293,000 | Untransfomrs NLPD to the original Y space
:param
NLPD: transfomred NLPD
:return
untransformed NLPD | GP/data_transformation.py | untransform_NLPD | VirgiAgl/V_savigp | python | def untransform_NLPD(self, NLPD):
'\n Untransfomrs NLPD to the original Y space\n\n :param\n NLPD: transfomred NLPD\n :return\n untransformed NLPD\n '
raise NotImplementedError() |
def _args_useful_check(self):
'\n need sql which mapping the target features and arguments\n :return:\n '
arg_msg_list = FeatureFieldRel.objects.filter(feature_name__in=self.target_features, is_delete=False)
for arg_msg in arg_msg_list:
if (arg_msg.raw_field_name in self.arguments.keys()):
if (self.ret_msg and (arg_msg.feature_name == self.ret_msg[(- 1)]['target_field_name'])):
sub_msg = self.ret_msg[(- 1)]
if (arg_msg.feature_name == sub_msg['target_field_name']):
sub_msg['arguments'].update({arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name]})
self.ret_msg[(- 1)] = sub_msg
else:
temp_msg = {'data_identity': arg_msg.data_identity, 'target_field_name': arg_msg.feature_name, 'arguments': {arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name]}}
self.ret_msg.append(temp_msg)
else:
logger.error(('Response from the function of `judge._args_useful_check`, error_msg=%s, rel_err_msg=%s' % (ArgumentsAvailableError.message, 'Arguments are not enough to get all res_keys')), exc_info=True)
raise ArgumentsAvailableError | 2,747,303,079,021,362,700 | need sql which mapping the target features and arguments
:return: | procuratorate/dataocean_judger.py | _args_useful_check | diudiu/featurefactory | python | def _args_useful_check(self):
'\n need sql which mapping the target features and arguments\n :return:\n '
arg_msg_list = FeatureFieldRel.objects.filter(feature_name__in=self.target_features, is_delete=False)
for arg_msg in arg_msg_list:
if (arg_msg.raw_field_name in self.arguments.keys()):
if (self.ret_msg and (arg_msg.feature_name == self.ret_msg[(- 1)]['target_field_name'])):
sub_msg = self.ret_msg[(- 1)]
if (arg_msg.feature_name == sub_msg['target_field_name']):
sub_msg['arguments'].update({arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name]})
self.ret_msg[(- 1)] = sub_msg
else:
temp_msg = {'data_identity': arg_msg.data_identity, 'target_field_name': arg_msg.feature_name, 'arguments': {arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name]}}
self.ret_msg.append(temp_msg)
else:
logger.error(('Response from the function of `judge._args_useful_check`, error_msg=%s, rel_err_msg=%s' % (ArgumentsAvailableError.message, 'Arguments are not enough to get all res_keys')), exc_info=True)
raise ArgumentsAvailableError |
@pytest.fixture
def j1713_profile():
'\n Numpy array of J1713+0747 profile.\n '
path = 'psrsigsim/data/J1713+0747_profile.npy'
return np.load(path) | 959,887,131,043,089,500 | Numpy array of J1713+0747 profile. | tests/test_simulate.py | j1713_profile | bshapiroalbert/PsrSigSim | python | @pytest.fixture
def j1713_profile():
'\n \n '
path = 'psrsigsim/data/J1713+0747_profile.npy'
return np.load(path) |
@pytest.fixture
def PSRfits():
'\n Fixture psrfits class\n '
fitspath = 'data/test.fits'
tempfits = 'data/B1855+09.L-wide.PUPPI.11y.x.sum.sm'
return PSRFITS(path=fitspath, template=tempfits, fits_mode='copy') | 6,057,200,503,390,064,000 | Fixture psrfits class | tests/test_simulate.py | PSRfits | bshapiroalbert/PsrSigSim | python | @pytest.fixture
def PSRfits():
'\n \n '
fitspath = 'data/test.fits'
tempfits = 'data/B1855+09.L-wide.PUPPI.11y.x.sum.sm'
return PSRFITS(path=fitspath, template=tempfits, fits_mode='copy') |
@pytest.fixture
def param_dict():
'\n Fixture parameter dictionary.\n '
pdict = {'fcent': 430, 'bandwidth': 100, 'sample_rate': 1.5625, 'dtype': np.float32, 'Npols': 1, 'Nchan': 64, 'sublen': 2.0, 'fold': True, 'period': 1.0, 'Smean': 1.0, 'profiles': [0.5, 0.5, 1.0], 'tobs': 4.0, 'name': 'J0000+0000', 'dm': 10.0, 'tau_d': 5e-08, 'tau_d_ref_f': 1500.0, 'aperture': 100.0, 'area': 5500.0, 'Tsys': 35.0, 'tscope_name': 'TestScope', 'system_name': 'TestSys', 'rcvr_fcent': 430, 'rcvr_bw': 100, 'rcvr_name': 'TestRCVR', 'backend_samprate': 1.5625, 'backend_name': 'TestBack', 'tempfile': None, 'parfile': None}
return pdict | 3,766,590,244,466,666,000 | Fixture parameter dictionary. | tests/test_simulate.py | param_dict | bshapiroalbert/PsrSigSim | python | @pytest.fixture
def param_dict():
'\n \n '
pdict = {'fcent': 430, 'bandwidth': 100, 'sample_rate': 1.5625, 'dtype': np.float32, 'Npols': 1, 'Nchan': 64, 'sublen': 2.0, 'fold': True, 'period': 1.0, 'Smean': 1.0, 'profiles': [0.5, 0.5, 1.0], 'tobs': 4.0, 'name': 'J0000+0000', 'dm': 10.0, 'tau_d': 5e-08, 'tau_d_ref_f': 1500.0, 'aperture': 100.0, 'area': 5500.0, 'Tsys': 35.0, 'tscope_name': 'TestScope', 'system_name': 'TestSys', 'rcvr_fcent': 430, 'rcvr_bw': 100, 'rcvr_name': 'TestRCVR', 'backend_samprate': 1.5625, 'backend_name': 'TestBack', 'tempfile': None, 'parfile': None}
return pdict |
@pytest.fixture
def simulation():
'\n Fixture Simulation class. Cannot be the only simulation tested.\n '
sim = Simulation(fcent=430, bandwidth=100, sample_rate=((1.0 * 2048) * (10 ** (- 6))), dtype=np.float32, Npols=1, Nchan=64, sublen=2.0, fold=True, period=1.0, Smean=1.0, profiles=None, tobs=4.0, name='J0000+0000', dm=10.0, tau_d=5e-08, tau_d_ref_f=1500.0, aperture=100.0, area=5500.0, Tsys=35.0, tscope_name='TestScope', system_name='TestSys', rcvr_fcent=430, rcvr_bw=100, rcvr_name='TestRCVR', backend_samprate=1.5625, backend_name='TestBack', tempfile='data/B1855+09.L-wide.PUPPI.11y.x.sum.sm', parfile=None, psrdict=None)
return sim | -6,312,856,719,583,736,000 | Fixture Simulation class. Cannot be the only simulation tested. | tests/test_simulate.py | simulation | bshapiroalbert/PsrSigSim | python | @pytest.fixture
def simulation():
'\n \n '
sim = Simulation(fcent=430, bandwidth=100, sample_rate=((1.0 * 2048) * (10 ** (- 6))), dtype=np.float32, Npols=1, Nchan=64, sublen=2.0, fold=True, period=1.0, Smean=1.0, profiles=None, tobs=4.0, name='J0000+0000', dm=10.0, tau_d=5e-08, tau_d_ref_f=1500.0, aperture=100.0, area=5500.0, Tsys=35.0, tscope_name='TestScope', system_name='TestSys', rcvr_fcent=430, rcvr_bw=100, rcvr_name='TestRCVR', backend_samprate=1.5625, backend_name='TestBack', tempfile='data/B1855+09.L-wide.PUPPI.11y.x.sum.sm', parfile=None, psrdict=None)
return sim |
def test_initsim(param_dict):
'\n Test initializing the simulation from dictionary, parfile\n '
sim = Simulation(psrdict=param_dict)
with pytest.raises(NotImplementedError):
sim2 = Simulation(parfile='testpar.par') | 5,675,763,485,965,984,000 | Test initializing the simulation from dictionary, parfile | tests/test_simulate.py | test_initsim | bshapiroalbert/PsrSigSim | python | def test_initsim(param_dict):
'\n \n '
sim = Simulation(psrdict=param_dict)
with pytest.raises(NotImplementedError):
sim2 = Simulation(parfile='testpar.par') |
def test_initsig(simulation):
'\n Test init_signal function.\n '
simulation.init_signal()
simulation.init_signal(from_template=True) | 8,913,096,984,652,106,000 | Test init_signal function. | tests/test_simulate.py | test_initsig | bshapiroalbert/PsrSigSim | python | def test_initsig(simulation):
'\n \n '
simulation.init_signal()
simulation.init_signal(from_template=True) |
def test_initprof(simulation, j1713_profile):
'\n Test init_profile function.\n '
simulation.init_profile()
with pytest.raises(NotImplementedError):
def gprof(x, p0):
return (p0[0] * np.exp(((- 0.5) * (((x - p0[1]) / p0[2]) ** 2))))
simulation._profiles = gprof
simulation.init_profile()
simulation._profiles = [0.5, 0.5, 1.0]
simulation.init_profile()
simulation._profiles = j1713_profile
simulation.init_profile()
with pytest.raises(RuntimeError):
simulation._profiles = [0.5, 0.5]
simulation.init_profile()
pr = DataProfile(j1713_profile, phases=None)
print(type(pr), pr)
simulation._profiles = pr
simulation.init_profile() | 6,492,907,136,872,744,000 | Test init_profile function. | tests/test_simulate.py | test_initprof | bshapiroalbert/PsrSigSim | python | def test_initprof(simulation, j1713_profile):
'\n \n '
simulation.init_profile()
with pytest.raises(NotImplementedError):
def gprof(x, p0):
return (p0[0] * np.exp(((- 0.5) * (((x - p0[1]) / p0[2]) ** 2))))
simulation._profiles = gprof
simulation.init_profile()
simulation._profiles = [0.5, 0.5, 1.0]
simulation.init_profile()
simulation._profiles = j1713_profile
simulation.init_profile()
with pytest.raises(RuntimeError):
simulation._profiles = [0.5, 0.5]
simulation.init_profile()
pr = DataProfile(j1713_profile, phases=None)
print(type(pr), pr)
simulation._profiles = pr
simulation.init_profile() |
def test_initpsr(simulation):
'\n Test init_pulsar function.\n '
simulation.init_pulsar() | 5,682,775,826,932,213,000 | Test init_pulsar function. | tests/test_simulate.py | test_initpsr | bshapiroalbert/PsrSigSim | python | def test_initpsr(simulation):
'\n \n '
simulation.init_pulsar() |
def test_initism(simulation):
'\n Test init_ism function.\n '
simulation.init_ism() | -1,547,126,899,779,636,200 | Test init_ism function. | tests/test_simulate.py | test_initism | bshapiroalbert/PsrSigSim | python | def test_initism(simulation):
'\n \n '
simulation.init_ism() |
def test_inittscope(simulation):
'\n Test init_telescope function.\n '
simulation._tscope_name = 'GBT'
simulation.init_telescope()
simulation._tscope_name = 'Arecibo'
simulation.init_telescope()
simulation._tscope_name = 'TestScope'
simulation.init_telescope()
simulation._system_name = ['Sys1', 'Sys2']
simulation._rcvr_fcent = [430, 800]
simulation._rcvr_bw = [100, 200]
simulation._rcvr_name = ['R1', 'R2']
simulation._backend_samprate = [1.5625, 12.5]
simulation._backend_name = ['B1', 'B2']
simulation.init_telescope()
with pytest.raises(RuntimeError):
simulation._backend_name = ['B1', 'B2', 'B3']
simulation.init_telescope() | 4,775,317,322,399,384,000 | Test init_telescope function. | tests/test_simulate.py | test_inittscope | bshapiroalbert/PsrSigSim | python | def test_inittscope(simulation):
'\n \n '
simulation._tscope_name = 'GBT'
simulation.init_telescope()
simulation._tscope_name = 'Arecibo'
simulation.init_telescope()
simulation._tscope_name = 'TestScope'
simulation.init_telescope()
simulation._system_name = ['Sys1', 'Sys2']
simulation._rcvr_fcent = [430, 800]
simulation._rcvr_bw = [100, 200]
simulation._rcvr_name = ['R1', 'R2']
simulation._backend_samprate = [1.5625, 12.5]
simulation._backend_name = ['B1', 'B2']
simulation.init_telescope()
with pytest.raises(RuntimeError):
simulation._backend_name = ['B1', 'B2', 'B3']
simulation.init_telescope() |
def test_simulate(simulation):
'\n Test simulate function.\n '
simulation.simulate() | 1,285,313,123,298,872,600 | Test simulate function. | tests/test_simulate.py | test_simulate | bshapiroalbert/PsrSigSim | python | def test_simulate(simulation):
'\n \n '
simulation.simulate() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.