code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
try:
kwargs = {"acl": None, "ephemeral": params.ephemeral, "sequence": params.sequence}
if not self.in_transaction:
kwargs["makepath"] = params.recursive
if params.asynchronous and not self.in_transaction:
self.client_context.create_async(params.path, decoded(params.value), **kwargs)
else:
self.client_context.create(params.path, decoded(params.value), **kwargs)
except NodeExistsError:
self.show_output("Path %s exists", params.path)
except NoNodeError:
self.show_output("Missing path in %s (try recursive?)", params.path) | def do_create(self, params) | \x1b[1mNAME\x1b[0m
create - Creates a znode
\x1b[1mSYNOPSIS\x1b[0m
create <path> <value> [ephemeral] [sequence] [recursive] [async]
\x1b[1mOPTIONS\x1b[0m
* ephemeral: make the znode ephemeral (default: false)
* sequence: make the znode sequential (default: false)
* recursive: recursively create the path (default: false)
* async: don't block waiting on the result (default: false)
\x1b[1mEXAMPLES\x1b[0m
> create /foo 'bar'
# create an ephemeral znode
> create /foo1 '' true
# create an ephemeral|sequential znode
> create /foo1 '' true true
# recursively create a path
> create /very/long/path/here '' false false true
# check the new subtree
> tree
.
├── zookeeper
│ ├── config
│ ├── quota
├── very
│ ├── long
│ │ ├── path
│ │ │ ├── here | 4.722349 | 4.251451 | 1.110762 |
self.set(params.path, decoded(params.value), version=params.version) | def do_set(self, params) | \x1b[1mNAME\x1b[0m
set - Updates the znode's value
\x1b[1mSYNOPSIS\x1b[0m
set <path> <value> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> set /foo 'bar'
> set /foo 'verybar' 3 | 12.335482 | 12.006774 | 1.027377 |
complete_value = partial(complete_values, ["updated-value"])
complete_version = partial(complete_values, [str(i) for i in range(1, 11)])
completers = [self._complete_path, complete_value, complete_version]
return complete(completers, cmd_param_text, full_cmd, *rest) | def complete_set(self, cmd_param_text, full_cmd, *rest) | TODO: suggest the old value & the current version | 4.099356 | 3.483047 | 1.176945 |
self.set(params.path, None, version=params.version) | def do_zero(self, params) | \x1b[1mNAME\x1b[0m
zero - Set the znode's to None (no bytes)
\x1b[1mSYNOPSIS\x1b[0m
zero <path> [version]
\x1b[1mOPTIONS\x1b[0m
* version: only update if version matches (default: -1)
\x1b[1mEXAMPLES\x1b[0m
> zero /foo
> zero /foo 3 | 17.18729 | 11.350548 | 1.514226 |
if self.in_transaction:
self.client_context.set_data(path, value, version=version)
else:
self.client_context.set(path, value, version=version) | def set(self, path, value, version) | sets a znode's data | 3.423719 | 3.35494 | 1.020501 |
for path in params.paths:
try:
self.client_context.delete(path)
except NotEmptyError:
self.show_output("%s is not empty.", path)
except NoNodeError:
self.show_output("%s doesn't exist.", path) | def do_rm(self, params) | \x1b[1mNAME\x1b[0m
rm - Remove the znode
\x1b[1mSYNOPSIS\x1b[0m
rm <path> [path] [path] ... [path]
\x1b[1mEXAMPLES\x1b[0m
> rm /foo
> rm /foo /bar | 4.243081 | 4.220379 | 1.005379 |
if not self.in_transaction:
return
self.client_context.check(params.path, params.version) | def do_check(self, params) | \x1b[1mNAME\x1b[0m
check - Checks that a path is at a given version (only works within a transaction)
\x1b[1mSYNOPSIS\x1b[0m
check <path> <version>
\x1b[1mEXAMPLES\x1b[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1' | 12.358463 | 9.417613 | 1.312271 |
try:
with self.transaction():
for cmd in params.cmds:
try:
self.onecmd(cmd)
except AttributeError:
# silently swallow unrecognized commands
pass
except BadVersionError:
self.show_output("Bad version.")
except NoNodeError:
self.show_output("Missing path.")
except NodeExistsError:
self.show_output("One of the paths exists.") | def do_txn(self, params) | \x1b[1mNAME\x1b[0m
txn - Create and execute a transaction
\x1b[1mSYNOPSIS\x1b[0m
txn <cmd> [cmd] [cmd] ... [cmd]
\x1b[1mDESCRIPTION\x1b[0m
Allowed cmds are check, create, rm and set. Check parameters are:
check <path> <version>
For create, rm and set see their help menu for their respective parameters.
\x1b[1mEXAMPLES\x1b[0m
> txn 'create /foo "start"' 'check /foo 0' 'set /foo "end"' 'rm /foo 1' | 5.833329 | 5.619456 | 1.038059 |
fmt_str =
content = fmt_str % (
self._zk.client_state,
self._zk.sessionid,
list(self._zk.auth_data),
self._zk.protocol_version,
self._zk.xid,
self._zk.last_zxid,
self._zk.session_timeout,
self._zk.client,
self._zk.server,
",".join(self._zk.data_watches),
",".join(self._zk.child_watches)
)
output = get_matching(content, params.match)
self.show_output(output) | def do_session_info(self, params) | \x1b[1mNAME\x1b[0m
session_info - Shows information about the current session
\x1b[1mSYNOPSIS\x1b[0m
session_info [match]
\x1b[1mOPTIONS\x1b[0m
* match: only include lines that match (default: '')
\x1b[1mEXAMPLES\x1b[0m
> session_info
state=CONNECTED
xid=4
last_zxid=0x000000505f8be5b3
timeout=10000
client=('127.0.0.1', 60348)
server=('127.0.0.1', 2181) | 4.99808 | 4.061014 | 1.230747 |
hosts = params.hosts if params.hosts != "" else None
if hosts is not None and invalid_hosts(hosts):
self.show_output("List of hosts has the wrong syntax.")
return
if self._zk is None:
self._zk = XClient()
try:
content = get_matching(self._zk.mntr(hosts), params.match)
self.show_output(content)
except XClient.CmdFailed as ex:
self.show_output(str(ex)) | def do_mntr(self, params) | \x1b[1mNAME\x1b[0m
mntr - Executes the mntr four-letter command
\x1b[1mSYNOPSIS\x1b[0m
mntr [hosts] [match]
\x1b[1mOPTIONS\x1b[0m
* hosts: the hosts to connect to (default: the current connected host)
* match: only output lines that include the given string (default: '')
\x1b[1mEXAMPLES\x1b[0m
> mntr
zk_version 3.5.0--1, built on 11/14/2014 10:45 GMT
zk_min_latency 0
zk_max_latency 8
zk_avg_latency 0 | 5.830139 | 5.879459 | 0.991611 |
hosts = params.hosts if params.hosts != "" else None
if hosts is not None and invalid_hosts(hosts):
self.show_output("List of hosts has the wrong syntax.")
return
if self._zk is None:
self._zk = XClient()
try:
content = get_matching(self._zk.cons(hosts), params.match)
self.show_output(content)
except XClient.CmdFailed as ex:
self.show_output(str(ex)) | def do_cons(self, params) | \x1b[1mNAME\x1b[0m
cons - Executes the cons four-letter command
\x1b[1mSYNOPSIS\x1b[0m
cons [hosts] [match]
\x1b[1mOPTIONS\x1b[0m
* hosts: the hosts to connect to (default: the current connected host)
* match: only output lines that include the given string (default: '')
\x1b[1mEXAMPLES\x1b[0m
> cons
/127.0.0.1:40535[0](queued=0,recved=1,sent=0)
... | 6.258444 | 6.746768 | 0.927621 |
hosts = params.hosts if params.hosts != "" else None
if hosts is not None and invalid_hosts(hosts):
self.show_output("List of hosts has the wrong syntax.")
return
if self._zk is None:
self._zk = XClient()
try:
content = get_matching(self._zk.dump(hosts), params.match)
self.show_output(content)
except XClient.CmdFailed as ex:
self.show_output(str(ex)) | def do_dump(self, params) | \x1b[1mNAME\x1b[0m
dump - Executes the dump four-letter command
\x1b[1mSYNOPSIS\x1b[0m
dump [hosts] [match]
\x1b[1mOPTIONS\x1b[0m
* hosts: the hosts to connect to (default: the current connected host)
* match: only output lines that include the given string (default: '')
\x1b[1mEXAMPLES\x1b[0m
> dump
SessionTracker dump:
Session Sets (3)/(1):
0 expire at Fri Nov 14 02:49:52 PST 2014:
0 expire at Fri Nov 14 02:49:56 PST 2014:
1 expire at Fri Nov 14 02:50:00 PST 2014:
0x149adea89940107
ephemeral nodes dump:
Sessions with Ephemerals (0): | 5.849877 | 6.371929 | 0.91807 |
for path in params.paths:
self._zk.delete(path, recursive=True) | def do_rmr(self, params) | \x1b[1mNAME\x1b[0m
rmr - Delete a path and all its children
\x1b[1mSYNOPSIS\x1b[0m
rmr <path> [path] [path] ... [path]
\x1b[1mEXAMPLES\x1b[0m
> rmr /foo
> rmr /foo /bar | 8.427341 | 12.828621 | 0.656917 |
get_child_watcher(self._zk, print_func=self.show_output).update(
params.path, params.verbose) | def do_child_watch(self, params) | \x1b[1mNAME\x1b[0m
child_watch - Watch a path for child changes
\x1b[1mSYNOPSIS\x1b[0m
child_watch <path> [verbose]
\x1b[1mOPTIONS\x1b[0m
* verbose: prints list of znodes (default: false)
\x1b[1mEXAMPLES\x1b[0m
# only prints the current number of children
> child_watch /
# prints num of children along with znodes listing
> child_watch / true | 18.780939 | 21.618734 | 0.868734 |
count = 0
for count, (diff, path) in enumerate(self._zk.diff(params.path_a, params.path_b), 1):
if diff == -1:
self.show_output("-- %s", path)
elif diff == 0:
self.show_output("-+ %s", path)
elif diff == 1:
self.show_output("++ %s", path)
if count == 0:
self.show_output("Branches are equal.") | def do_diff(self, params) | \x1b[1mNAME\x1b[0m
diff - Display the differences between two paths
\x1b[1mSYNOPSIS\x1b[0m
diff <src> <dst>
\x1b[1mDESCRIPTION\x1b[0m
The output is interpreted as:
-- means the znode is missing in /new-configs
++ means the znode is new in /new-configs
+- means the znode's content differ between /configs and /new-configs
\x1b[1mEXAMPLES\x1b[0m
> diff /configs /new-configs
-- service-x/hosts
++ service-x/hosts.json
+- service-x/params | 3.659914 | 3.703706 | 0.988176 |
def check_valid(path, print_path):
result = "no"
value, _ = self._zk.get(path)
if value is not None:
try:
x = json.loads(value)
result = "yes"
except ValueError:
pass
if print_path:
self.show_output("%s: %s.", os.path.basename(path), result)
else:
self.show_output("%s.", result)
if not params.recursive:
check_valid(params.path, False)
else:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
check_valid(cpath, True) | def do_json_valid(self, params) | \x1b[1mNAME\x1b[0m
json_valid - Checks znodes for valid JSON
\x1b[1mSYNOPSIS\x1b[0m
json_valid <path> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse to all children (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_valid /some/valid/json_znode
yes.
> json_valid /some/invalid/json_znode
no.
> json_valid /configs true
/configs/a: yes.
/configs/b: no. | 3.766807 | 3.264842 | 1.153748 |
def json_output(path, print_path):
value, _ = self._zk.get(path)
if value is not None:
try:
value = json.dumps(json.loads(value), indent=4)
except ValueError:
pass
if print_path:
self.show_output("%s:\n%s", os.path.basename(path), value)
else:
self.show_output(value)
if not params.recursive:
json_output(params.path, False)
else:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
json_output(cpath, True) | def do_json_cat(self, params) | \x1b[1mNAME\x1b[0m
json_cat - Pretty prints a znode's JSON
\x1b[1mSYNOPSIS\x1b[0m
json_cat <path> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse to all children (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_cat /configs/clusters
{
"dc0": {
"network": "10.2.0.0/16",
},
.....
}
> json_cat /configs true
/configs/clusters:
{
"dc0": {
"network": "10.2.0.0/16",
},
.....
}
/configs/dns_servers:
[
"10.2.0.1",
"10.3.0.1"
] | 3.247642 | 3.104731 | 1.04603 |
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
if params.recursive:
paths = self._zk.tree(params.path, 0, full_path=True)
print_path = True
else:
paths = [(params.path, 0)]
print_path = False
for cpath, _ in paths:
try:
jstr, _ = self._zk.get(cpath)
value = Keys.value(json_deserialize(jstr), params.keys)
if print_path:
self.show_output("%s: %s", os.path.basename(cpath), value)
else:
self.show_output(value)
except BadJSON as ex:
self.show_output("Path %s has bad JSON.", cpath)
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", cpath, ex) | def do_json_get(self, params) | \x1b[1mNAME\x1b[0m
json_get - Get key (or keys, if nested) from a JSON object serialized in the given path
\x1b[1mSYNOPSIS\x1b[0m
json_get <path> <keys> [recursive]
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse to all children (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_get /configs/primary_service endpoint.clientPort
32768
> json_get /configs endpoint.clientPort true
primary_service: 32768
secondary_service: 32769
# Use template strings to access various keys at once:
> json_get /configs/primary_service '#{endpoint.ipAddress}:#{endpoint.clientPort}'
10.2.2.3:32768 | 3.856324 | 3.596604 | 1.072213 |
complete_keys = partial(complete_values, ["key1", "key2", "#{key1.key2}"])
completers = [self._complete_path, complete_keys, complete_labeled_boolean("recursive")]
return complete(completers, cmd_param_text, full_cmd, *rest) | def complete_json_get(self, cmd_param_text, full_cmd, *rest) | TODO: prefetch & parse znodes & suggest keys | 8.443282 | 7.16516 | 1.17838 |
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
try:
jstr, stat = self._zk.get(params.path)
obj_src = json_deserialize(jstr)
obj_dst = copy.deepcopy(obj_src)
# Cast value to its given type.
value = to_type(params.value, params.value_type)
Keys.set(obj_dst, params.keys, value)
if params.confirm:
a = json.dumps(obj_src, sort_keys=True, indent=4)
b = json.dumps(obj_dst, sort_keys=True, indent=4)
diff = difflib.unified_diff(a.split("\n"), b.split("\n"))
self.show_output("\n".join(diff))
if not self.prompt_yes_no("Apply update?"):
return
# Pass along the read version, to ensure we are updating what we read.
self.set(params.path, json.dumps(obj_dst), version=stat.version)
except BadJSON:
self.show_output("Path %s has bad JSON.", params.path)
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", params.path, ex)
except ValueError:
self.show_output("Bad value_type") | def do_json_set(self, params) | \x1b[1mNAME\x1b[0m
json_set - Sets the value for the given (possibly nested) key on a JSON object serialized in the given path
\x1b[1mSYNOPSIS\x1b[0m
json_set <path> <keys> <value> <value_type> [confirm]
\x1b[1mDESCRIPTION\x1b[0m
If the key exists and the value is different, the znode will be updated with the key set to its new value.
If the key does not exist, it'll be created and the znode will be updated with the serialized version of
the new object. The value's type will be determined by the value_type parameter.
\x1b[1mEXAMPLES\x1b[0m
> create /props '{"a": {"b": 4}}'
> json_cat /props
{
"a": {
"b": 4
}
}
> json_set /props a.b 5 int
> json_cat /props
{
"a": {
"b": 5
}
}
> json_set /props a.c.d true bool
> json_cat /props
{
"a": {
"c": {
"d": true
},
"b": 5
}
} | 3.859115 | 3.374563 | 1.14359 |
# Ensure we have a balance set of (key, value, type) tuples.
if len(params.keys_values_types) % 3 != 0:
self.show_output('Bad list of parameters')
return
for key, _, _ in grouper(params.keys_values_types, 3):
try:
Keys.validate(key)
except Keys.Bad as ex:
self.show_output(str(ex))
return
# Fetch & deserialize znode.
jstr, stat = self._zk.get(params.path)
try:
obj_src = json_deserialize(jstr)
except BadJSON:
self.show_output("Path %s has bad JSON.", params.path)
obj_dst = copy.deepcopy(obj_src)
# Cast values to their given type.
for key, value, ptype in grouper(params.keys_values_types, 3):
try:
Keys.set(obj_dst, key, to_type(value, ptype))
except Keys.Missing as ex:
self.show_output("Path %s is missing key %s.", params.path, ex)
return
except ValueError:
self.show_output("Bad value_type")
return
# Pass along the read version, to ensure we are updating what we read.
self.set(params.path, json.dumps(obj_dst), version=stat.version) | def do_json_set_many(self, params) | \x1b[1mNAME\x1b[0m
json_set_many - like `json_set`, but for multiple key/value pairs
\x1b[1mSYNOPSIS\x1b[0m
json_set_many <path> <keys> <value> <value_type> <keys1> <value1> <value_type1> ...
\x1b[1mDESCRIPTION\x1b[0m
If the key exists and the value is different, the znode will be updated with the key set to its new value.
If the key does not exist, it'll be created and the znode will be updated with the serialized version of
the new object. The value's type will be determined by the value_type parameter.
This is an atomic operation, either all given keys are set in one ZK operation or none are.
\x1b[1mEXAMPLES\x1b[0m
> create /props '{"a": {"b": 4}}'
> json_cat /props
{
"a": {
"b": 4
}
}
> json_set_many /props a.b 5 int a.c.d true bool
> json_cat /props
{
"a": {
"c": {
"d": true
},
"b": 5
}
} | 4.55566 | 4.224233 | 1.078459 |
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
path_map = PathMap(self._zk, params.path)
values = defaultdict(int)
for path, data in path_map.get():
try:
value = Keys.value(json_deserialize(data), params.keys)
values[value] += 1
except BadJSON as ex:
if params.report_errors:
self.show_output("Path %s has bad JSON.", path)
except Keys.Missing as ex:
if params.report_errors:
self.show_output("Path %s is missing key %s.", path, ex)
results = sorted(values.items(), key=lambda item: item[1], reverse=params.reverse)
results = [r for r in results if r[1] >= params.minfreq]
# what slice do we want?
if params.top == 0:
start, end = 0, len(results)
elif params.top > 0:
start, end = 0, params.top if params.top < len(results) else len(results)
else:
start = len(results) + params.top if abs(params.top) < len(results) else 0
end = len(results)
if len(results) > 0 and params.print_path:
self.show_output(params.path)
for i in range(start, end):
value, frequency = results[i]
self.show_output("%s = %d", value, frequency)
# if no results were found we call it a failure (i.e.: exit(1) from --run-once)
if len(results) == 0:
return False | def do_json_count_values(self, params) | \x1b[1mNAME\x1b[0m
json_count_values - Gets the frequency of the values associated with the given keys
\x1b[1mSYNOPSIS\x1b[0m
json_count_values <path> <keys> [top] [minfreq] [reverse] [report_errors] [print_path]
\x1b[1mOPTIONS\x1b[0m
* top: number of results to show (0 is all) (default: 0)
* minfreq: minimum frequency to be displayed (default: 1)
* reverse: sort in descending order (default: true)
* report_errors: report bad znodes (default: false)
* print_path: print the path if there are results (default: false)
\x1b[1mEXAMPLES\x1b[0m
> json_count_values /configs/primary_service endpoint.host
10.20.0.2 3
10.20.0.4 3
10.20.0.5 3
10.20.0.6 1
10.20.0.7 1
... | 3.402822 | 2.81151 | 1.210318 |
try:
Keys.validate(params.keys)
except Keys.Bad as ex:
self.show_output(str(ex))
return
path_map = PathMap(self._zk, params.path)
dupes_by_path = defaultdict(lambda: defaultdict(list))
for path, data in path_map.get():
parent, child = split(path)
if not child.startswith(params.prefix):
continue
try:
value = Keys.value(json_deserialize(data), params.keys)
dupes_by_path[parent][value].append(path)
except BadJSON as ex:
if params.report_errors:
self.show_output("Path %s has bad JSON.", path)
except Keys.Missing as ex:
if params.report_errors:
self.show_output("Path %s is missing key %s.", path, ex)
dupes = []
for _, paths_by_value in dupes_by_path.items():
for _, paths in paths_by_value.items():
if len(paths) > 1:
paths.sort()
paths = paths if params.first else paths[1:]
for path in paths:
idx = bisect.bisect(dupes, path)
dupes.insert(idx, path)
for dup in dupes:
self.show_output(dup)
# if no dupes were found we call it a failure (i.e.: exit(1) from --run-once)
if len(dupes) == 0:
return False | def do_json_dupes_for_keys(self, params) | \x1b[1mNAME\x1b[0m
json_duples_for_keys - Gets the duplicate znodes for the given keys
\x1b[1mSYNOPSIS\x1b[0m
json_dupes_for_keys <path> <keys> [prefix] [report_errors] [first]
\x1b[1mDESCRIPTION\x1b[0m
Znodes with duplicated keys are sorted and all but the first (original) one
are printed.
\x1b[1mOPTIONS\x1b[0m
* prefix: only include matching znodes
* report_errors: turn on error reporting (i.e.: bad JSON in a znode)
* first: print the first, non duplicated, znode too.
\x1b[1mEXAMPLES\x1b[0m
> json_cat /configs/primary_service true
member_0000000186
{
"status": "ALIVE",
"serviceEndpoint": {
"http": {
"host": "10.0.0.2",
"port": 31994
}
},
"shard": 0
}
member_0000000187
{
"status": "ALIVE",
"serviceEndpoint": {
"http": {
"host": "10.0.0.2",
"port": 31994
}
},
"shard": 0
}
> json_dupes_for_keys /configs/primary_service shard
member_0000000187 | 3.533581 | 3.257507 | 1.08475 |
if os.getuid() == 0:
self.show_output("edit cannot be run as root.")
return
editor = os.getenv("EDITOR", os.getenv("VISUAL", "/usr/bin/vi"))
if editor is None:
self.show_output("No editor found, please set $EDITOR")
return
editor = which(editor)
if not editor:
self.show_output("Cannot find executable editor, please set $EDITOR")
return
st = os.stat(editor)
if (st.st_mode & statlib.S_ISUID) or (st.st_mode & statlib.S_ISUID):
self.show_output("edit cannot use setuid/setgid binaries.")
return
# copy content to tempfile
value, stat = self._zk.get(params.path)
_, tmppath = tempfile.mkstemp()
with open(tmppath, "w") as fh:
fh.write(value if value else "")
# launch editor
rv = os.system("%s %s" % (editor, tmppath))
if rv != 0:
self.show_output("%s did not exit successfully" % editor)
try:
os.unlink(tmppath)
except OSError: pass
return
# did it change? if so, save it
with open(tmppath, "r") as fh:
newvalue = fh.read()
if newvalue != value:
self.set(params.path, decoded(newvalue), stat.version)
try:
os.unlink(tmppath)
except OSError: pass | def do_edit(self, params) | \x1b[1mNAME\x1b[0m
edit - Opens up an editor to modify and update a znode.
\x1b[1mSYNOPSIS\x1b[0m
edit <path>
\x1b[1mDESCRIPTION\x1b[0m
If the content has not changed, the znode won't be updated.
$EDITOR must be set for zk-shell to find your editor.
\x1b[1mEXAMPLES\x1b[0m
# make sure $EDITOR is set in your shell
> edit /configs/webservers/primary
# change something and save
> get /configs/webservers/primary
# updated content | 2.985632 | 2.984218 | 1.000474 |
repeat = params.repeat
if repeat < 0:
self.show_output("<repeat> must be >= 0.")
return
pause = params.pause
if pause < 0:
self.show_output("<pause> must be >= 0.")
return
cmds = params.cmds
i = 0
with self.transitions_disabled():
while True:
for cmd in cmds:
try:
self.onecmd(cmd)
except Exception as ex:
self.show_output("Command failed: %s.", ex)
if pause > 0.0:
time.sleep(pause)
i += 1
if repeat > 0 and i >= repeat:
break | def do_loop(self, params) | \x1b[1mNAME\x1b[0m
loop - Runs commands in a loop
\x1b[1mSYNOPSIS\x1b[0m
loop <repeat> <pause> <cmd1> <cmd2> ... <cmdN>
\x1b[1mDESCRIPTION\x1b[0m
Runs <cmds> <repeat> times (0 means forever), with a pause of <pause> secs inbetween
each <cmd> (0 means no pause).
\x1b[1mEXAMPLES\x1b[0m
> loop 3 0 "get /foo"
...
> loop 3 0 "get /foo" "get /bar"
... | 2.912083 | 2.668979 | 1.091085 |
if invalid_hosts(params.hosts):
self.show_output("List of hosts has the wrong syntax.")
return
stat = self._zk.exists(params.path)
if stat is None:
self.show_output("%s is gone.", params.path)
return
if not params.recursive and stat.ephemeralOwner == 0:
self.show_output("%s is not ephemeral.", params.path)
return
try:
info_by_path = self._zk.ephemerals_info(params.hosts)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
return
def check(path, show_path, resolved):
info = info_by_path.get(path, None)
if info is None:
self.show_output("No session info for %s.", path)
else:
self.show_output("%s%s",
"%s: " % (path) if show_path else "",
info.resolved if resolved else str(info))
if not params.recursive:
check(params.path, False, params.reverse)
else:
for cpath, _ in self._zk.tree(params.path, 0, full_path=True):
check(cpath, True, params.reverse) | def do_ephemeral_endpoint(self, params) | \x1b[1mNAME\x1b[0m
ephemeral_endpoint - Gets the ephemeral znode owner's session and ip:port
\x1b[1mSYNOPSIS\x1b[0m
ephemeral_endpoint <path> <hosts> [recursive] [reverse_lookup]
\x1b[1mDESCRIPTION\x1b[0m
hosts is a list of hosts in the host1[:port1][,host2[:port2]],... form.
\x1b[1mOPTIONS\x1b[0m
* recursive: recurse through the children (default: false)
* reverse_lookup: convert IPs back to hostnames (default: false)
\x1b[1mEXAMPLES\x1b[0m
> ephemeral_endpoint /servers/member_0000044941 10.0.0.1,10.0.0.2,10.0.0.3
0xa4788b919450e6 10.3.2.12:54250 10.0.0.2:2181 | 3.783507 | 3.839569 | 0.985399 |
if invalid_hosts(params.hosts):
self.show_output("List of hosts has the wrong syntax.")
return
try:
info_by_id = self._zk.sessions_info(params.hosts)
except XClient.CmdFailed as ex:
self.show_output(str(ex))
return
info = info_by_id.get(params.session, None)
if info is None:
self.show_output("No session info for %s.", params.session)
else:
self.show_output("%s", info.resolved_endpoints if params.reverse else info.endpoints) | def do_session_endpoint(self, params) | \x1b[1mNAME\x1b[0m
session_endpoint - Gets the session's IP endpoints
\x1b[1mSYNOPSIS\x1b[0m
session_endpoint <session> <hosts> [reverse_lookup]
\x1b[1mDESCRIPTION\x1b[0m
where hosts is a list of hosts in the host1[:port1][,host2[:port2]],... form
\x1b[1mOPTIONS\x1b[0m
* reverse_lookup: convert IPs back to hostnames (default: false)
\x1b[1mEXAMPLES\x1b[0m
> session_endpoint 0xa4788b919450e6 10.0.0.1,10.0.0.2,10.0.0.3
10.3.2.12:54250 10.0.0.2:2181 | 5.272207 | 5.247017 | 1.004801 |
complete_hosts = partial(complete_values, ["127.0.0.1:2181"])
completers = [self._complete_path, complete_hosts, complete_labeled_boolean("reverse")]
return complete(completers, cmd_param_text, full_cmd, *rest) | def complete_session_endpoint(self, cmd_param_text, full_cmd, *rest) | TODO: the hosts lists can be retrieved from self.zk.hosts | 6.78953 | 5.576128 | 1.217607 |
self._zk.set(params.path, decoded(params.val * params.repeat)) | def do_fill(self, params) | \x1b[1mNAME\x1b[0m
fill - Fills a znode with the given value
\x1b[1mSYNOPSIS\x1b[0m
fill <path> <char> <count>
\x1b[1mEXAMPLES\x1b[0m
> fill /some/znode X 1048576 | 34.688549 | 21.192318 | 1.636845 |
start = time.time()
for cmd in params.cmds:
try:
self.onecmd(cmd)
except Exception as ex:
self.show_output("Command failed: %s.", ex)
elapsed = "{0:.5f}".format(time.time() - start)
self.show_output("Took %s seconds" % elapsed) | def do_time(self, params) | \x1b[1mNAME\x1b[0m
time - Measures elapsed seconds after running commands
\x1b[1mSYNOPSIS\x1b[0m
time <cmd1> <cmd2> ... <cmdN>
\x1b[1mEXAMPLES\x1b[0m
> time 'loop 10 0 "create /foo_ bar ephemeral=false sequence=true"'
Took 0.05585 seconds | 4.072897 | 4.280573 | 0.951484 |
if params.cmd not in ["add", "remove"]:
raise ValueError("Bad command: %s" % params.cmd)
joining, leaving, from_config = None, None, params.from_config
if params.cmd == "add":
joining = params.args
elif params.cmd == "remove":
leaving = params.args
try:
value, _ = self._zk.reconfig(
joining=joining, leaving=leaving, new_members=None, from_config=from_config)
self.show_output(value)
except NewConfigNoQuorumError:
self.show_output("No quorum available to perform reconfig.")
except ReconfigInProcessError:
self.show_output("There's a reconfig in process.") | def do_reconfig(self, params) | \x1b[1mNAME\x1b[0m
reconfig - Reconfigures a ZooKeeper cluster (adds/removes members)
\x1b[1mSYNOPSIS\x1b[0m
reconfig <add|remove> <arg> [from_config]
\x1b[1mDESCRIPTION\x1b[0m
reconfig add <members> [from_config]
adds the given members (i.e.: 'server.100=10.0.0.10:2889:3888:observer;0.0.0.0:2181').
reconfig remove <members_ids> [from_config]
removes the members with the given ids (i.e.: '2,3,5').
\x1b[1mEXAMPLES\x1b[0m
> reconfig add server.100=0.0.0.0:56954:37866:observer;0.0.0.0:42969
server.1=localhost:20002:20001:participant
server.2=localhost:20012:20011:participant
server.3=localhost:20022:20021:participant
server.100=0.0.0.0:56954:37866:observer;0.0.0.0:42969
version=100000003
> reconfig remove 100
server.1=localhost:20002:20001:participant
server.2=localhost:20012:20011:participant
server.3=localhost:20022:20021:participant
version=100000004 | 4.13979 | 3.711435 | 1.115415 |
values = []
with self.output_context() as context:
for cmd in params.cmds:
rv = self.onecmd(cmd)
val = "" if rv is False else context.value.rstrip("\n")
values.append(val)
context.reset()
try:
self.show_output(params.fmtstr, *values)
except TypeError:
self.show_output("Bad format string or missing arguments.") | def do_echo(self, params) | \x1b[1mNAME\x1b[0m
echo - displays formatted data
\x1b[1mSYNOPSIS\x1b[0m
echo <fmtstr> [cmd1] [cmd2] ... [cmdN]
\x1b[1mEXAMPLES\x1b[0m
> echo hello
hello
> echo 'The value of /foo is %s' 'get /foo'
bar | 6.362267 | 6.84748 | 0.92914 |
self._disconnect()
auth_data = []
hosts = []
for auth_host in hosts_list:
nl = Netloc.from_string(auth_host)
rhost, rport = hosts_to_endpoints(nl.host)[0]
if self._tunnel is not None:
lhost, lport = TunnelHelper.create_tunnel(rhost, rport, self._tunnel)
hosts.append('{0}:{1}'.format(lhost, lport))
else:
hosts.append(nl.host)
if nl.scheme != "":
auth_data.append((nl.scheme, nl.credential))
self._zk = XClient(",".join(hosts),
read_only=self._read_only,
timeout=self._connect_timeout,
auth_data=auth_data if len(auth_data) > 0 else None)
if self._asynchronous:
self._connect_async(hosts)
else:
self._connect_sync(hosts) | def _connect(self, hosts_list) | In the basic case, hostsp is a list of hosts like:
```
[10.0.0.2:2181, 10.0.0.3:2181]
```
It might also contain auth info:
```
[digest:foo:[email protected]:2181, 10.0.0.3:2181]
``` | 3.803921 | 3.901332 | 0.975031 |
sock = socket.create_connection(address, timeout)
yield sock
sock.close() | def connected_socket(address, timeout=3) | yields a connected socket | 3.398821 | 2.889263 | 1.176363 |
super(XTransactionRequest, self).create(path, to_bytes(value), acl, ephemeral, sequence) | def create(self, path, value=b"", acl=None, ephemeral=False,
sequence=False) | wrapper that handles encoding (yay Py3k) | 8.830316 | 7.657453 | 1.153166 |
super(XTransactionRequest, self).set_data(path, to_bytes(value), version) | def set_data(self, path, value, version=-1) | wrapper that handles encoding (yay Py3k) | 10.580762 | 7.437701 | 1.422585 |
value, stat = super(XClient, self).get(*args, **kwargs)
try:
if value is not None:
value = value.decode(encoding="utf-8")
except UnicodeDecodeError:
pass
return (value, stat) | def get(self, *args, **kwargs) | wraps the default get() and deals with encoding | 4.247221 | 3.606689 | 1.177596 |
return super(XClient, self).get(*args, **kwargs) | def get_bytes(self, *args, **kwargs) | no string decoding performed | 10.88659 | 7.39923 | 1.471314 |
value = to_bytes(value)
super(XClient, self).set(path, value, version) | def set(self, path, value, version=-1) | wraps the default set() and handles encoding (Py3k) | 6.84156 | 5.849919 | 1.169514 |
value = to_bytes(value)
return super(XClient, self).create_async(path, value, acl, ephemeral, sequence, makepath) | def create_async(self, path, value=b"", acl=None, ephemeral=False, sequence=False, makepath=False) | wraps the default create() and handles encoding (Py3k) | 3.780987 | 3.700923 | 1.021634 |
yield path, self.get_acls(path)[0]
if depth == -1:
return
for tpath, _ in self.tree(path, depth, full_path=True):
try:
acls, stat = self.get_acls(tpath)
except NoNodeError:
continue
if not include_ephemerals and stat.ephemeralOwner != 0:
continue
yield tpath, acls | def get_acls_recursive(self, path, depth, include_ephemerals) | A recursive generator wrapper for get_acls
:param path: path from which to start
:param depth: depth of the recursion (-1 no recursion, 0 means no limit)
:param include_ephemerals: get ACLs for ephemerals too | 3.575739 | 3.782867 | 0.945246 |
try:
match = re.compile(match, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
offset = len(path)
for cpath in Tree(self, path).get():
if match.search(cpath[offset:]):
yield cpath | def find(self, path, match, flags) | find every matching child path under path | 4.179624 | 3.934284 | 1.062359 |
try:
match = re.compile(content, flags)
except sre_constants.error as ex:
print("Bad regexp: %s" % (ex))
return
for gpath, matches in self.do_grep(path, match):
yield (gpath, matches) | def grep(self, path, content, flags) | grep every child path under path for content | 4.085299 | 4.157635 | 0.982601 |
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
full_path = os.path.join(path, child)
try:
value, _ = self.get(full_path)
except (NoNodeError, NoAuthError):
value = ""
if value is not None:
matches = [line for line in value.split("\n") if match.search(line)]
if len(matches) > 0:
yield (full_path, matches)
for mpath, matches in self.do_grep(full_path, match):
yield (mpath, matches) | def do_grep(self, path, match) | grep's work horse | 2.384566 | 2.357096 | 1.011654 |
stat = self.stat(path)
if not stat:
return 0
count = stat.numChildren
for _, _, stat in self.tree(path, 0, include_stat=True):
if stat:
count += stat.numChildren
return count | def child_count(self, path) | returns the child count under path (deals with znodes going away as it's
traversing the tree). | 3.969822 | 3.676363 | 1.079823 |
for child_level_stat in self.do_tree(path, max_depth, 0, full_path, include_stat):
yield child_level_stat | def tree(self, path, max_depth, full_path=False, include_stat=False) | DFS generator which starts from a given path and goes up to a max depth.
:param path: path from which the DFS will start
:param max_depth: max depth of DFS (0 means no limit)
:param full_path: should the full path of the child node be returned
:param include_stat: return the child Znode's stat along with the name & level | 5.133859 | 5.780403 | 0.888149 |
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
cpath = os.path.join(path, child) if full_path else child
if include_stat:
yield cpath, level, self.stat(os.path.join(path, child))
else:
yield cpath, level
if max_depth == 0 or level + 1 < max_depth:
cpath = os.path.join(path, child)
for rchild_rlevel_rstat in self.do_tree(cpath, max_depth, level + 1, full_path, include_stat):
yield rchild_rlevel_rstat | def do_tree(self, path, max_depth, level, full_path, include_stat) | tree's work horse | 2.424777 | 2.433674 | 0.996344 |
for cpath in Tree(self, path).get(exclude_recurse):
yield cpath | def fast_tree(self, path, exclude_recurse=None) | a fast async version of tree() | 10.180872 | 8.121446 | 1.253579 |
path_a = path_a.rstrip("/")
path_b = path_b.rstrip("/")
if not self.exists(path_a) or not self.exists(path_b):
return
if not self.equal(path_a, path_b):
yield 0, "/"
seen = set()
len_a = len(path_a)
len_b = len(path_b)
# first, check what's missing & changed in dst
for child_a, level in self.tree(path_a, 0, True):
child_sub = child_a[len_a + 1:]
child_b = os.path.join(path_b, child_sub)
if not self.exists(child_b):
yield -1, child_sub
else:
if not self.equal(child_a, child_b):
yield 0, child_sub
seen.add(child_sub)
# now, check what's new in dst
for child_b, level in self.tree(path_b, 0, True):
child_sub = child_b[len_b + 1:]
if child_sub not in seen:
yield 1, child_sub | def diff(self, path_a, path_b) | Performs a deep comparison of path_a/ and path_b/
For each child, it yields (rv, child) where rv:
-1 if doesn't exist in path_b (destination)
0 if they are different
1 if it doesn't exist in path_a (source) | 2.420242 | 2.400154 | 1.00837 |
content_a, _ = self.get_bytes(path_a)
content_b, _ = self.get_bytes(path_b)
return content_a == content_b | def equal(self, path_a, path_b) | compare if a and b have the same bytes | 2.705804 | 2.187607 | 1.236879 |
try:
stat = self.exists(str(path))
except (NoNodeError, NoAuthError):
stat = None
return stat | def stat(self, path) | safely gets the Znode's Stat | 6.658549 | 4.336182 | 1.535579 |
replies = []
for ep in endpoints:
try:
replies.append(self._cmd(ep, cmd))
except self.CmdFailed as ex:
# if there's only 1 endpoint, give up.
# if there's more, keep trying.
if len(endpoints) == 1:
raise ex
return "".join(replies) | def cmd(self, endpoints, cmd) | endpoints is [(host1, port1), (host2, port), ...] | 4.378292 | 4.31411 | 1.014877 |
cmdbuf = "%s\n" % (cmd)
# some cmds have large outputs and ZK closes the connection as soon as it
# finishes writing. so read in huge chunks.
recvsize = 1 << 20
replies = []
host, port = endpoint
ips = get_ips(host, port)
if len(ips) == 0:
raise self.CmdFailed("Failed to resolve: %s" % (host))
for ip in ips:
try:
with connected_socket((ip, port)) as sock:
sock.send(cmdbuf.encode())
while True:
buf = sock.recv(recvsize).decode("utf-8")
if buf == "":
break
replies.append(buf)
except socket.error as ex:
# if there's only 1 record, give up.
# if there's more, keep trying.
if len(ips) == 1:
raise self.CmdFailed("Error(%s): %s" % (ip, ex))
return "".join(replies) | def _cmd(self, endpoint, cmd) | endpoint is (host, port) | 4.554641 | 4.261286 | 1.068842 |
state_change_event = self.handler.event_object()
def listener(state):
if state is KazooState.SUSPENDED:
state_change_event.set()
self.add_listener(listener)
self._connection._socket.shutdown(socket.SHUT_RDWR)
state_change_event.wait(1)
if not state_change_event.is_set():
return False
# wait until we are back
while not self.connected:
time.sleep(0.1)
return True | def reconnect(self) | forces a reconnect by shutting down the connected socket
return True if the reconnect happened, False otherwise | 4.164945 | 3.859848 | 1.079044 |
dump_by_endpoint = {}
for endpoint in self._to_endpoints(hosts):
try:
out = self.cmd([endpoint], "dump")
except self.CmdFailed as ex:
out = ""
dump_by_endpoint[endpoint] = out
return dump_by_endpoint | def dump_by_server(self, hosts) | Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo). | 4.675609 | 5.436128 | 0.860099 |
info_by_path, info_by_id = {}, {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
sid = None
for line in dump.split("\n"):
mat = self.SESSION_REGEX.match(line)
if mat:
sid = mat.group(1)
continue
mat = self.PATH_REGEX.match(line)
if mat:
info = info_by_id.get(sid, None)
if info is None:
info = info_by_id[sid] = ClientInfo(sid)
info_by_path[mat.group(1)] = info
continue
mat = self.IP_PORT_REGEX.match(line)
if mat:
ip, port, sid = mat.groups()
if sid not in info_by_id:
continue
info_by_id[sid](ip, int(port), server_ip, server_port)
return info_by_path | def ephemerals_info(self, hosts) | Returns ClientInfo per path.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (path, ClientInfo). | 2.587411 | 2.623957 | 0.986072 |
info_by_id = {}
for server_endpoint, dump in self.dump_by_server(hosts).items():
server_ip, server_port = server_endpoint
for line in dump.split("\n"):
mat = self.IP_PORT_REGEX.match(line)
if mat is None:
continue
ip, port, sid = mat.groups()
info_by_id[sid] = ClientInfo(sid, ip, port, server_ip, server_port)
return info_by_id | def sessions_info(self, hosts) | Returns ClientInfo per session.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of (session_id, ClientInfo). | 3.560687 | 3.603822 | 0.988031 |
if path in self._by_path:
self.remove(path)
else:
self.add(path, verbose) | def update(self, path, verbose=False) | if the path isn't being watched, start watching it
if it is, stop watching it | 5.012912 | 4.351661 | 1.151954 |
if not re.match(r".*:\d+$", host):
host = "%s:%d" % (host, DEFAULT_ZK_PORT)
client = KazooClient(hosts=host)
client.start()
if scheme != "":
client.add_auth(scheme, credential)
return client | def zk_client(host, scheme, credential) | returns a connected (and possibly authenticated) ZK client | 2.604565 | 2.513409 | 1.036268 |
result = cls.parse(string)
if result.scheme not in cls.TYPES:
raise CopyError("Invalid scheme: %s" % (result.scheme))
return cls.TYPES[result.scheme](result, exists, asynchronous, verbose) | def from_string(cls, string, exists=False, asynchronous=False, verbose=False) | if exists is bool, then check it either exists or it doesn't.
if exists is None, we don't care. | 4.261364 | 4.471272 | 0.953054 |
full_path = os.path.join(root_path, branch_path) if branch_path else root_path
try:
children = self.client.get_children(full_path)
except NoNodeError:
children = set()
except NoAuthError:
raise AuthError("read children", full_path)
for child in children:
child_path = os.path.join(branch_path, child) if branch_path else child
try:
stat = self.client.exists(os.path.join(root_path, child_path))
except NoAuthError:
raise AuthError("read", child)
if stat is None or stat.ephemeralOwner != 0:
continue
yield child_path
for new_path in self.zk_walk(root_path, child_path):
yield new_path | def zk_walk(self, root_path, branch_path) | skip ephemeral znodes since there's no point in copying those | 2.307735 | 2.249151 | 1.026047 |
parent_dir = os.path.dirname(self.path)
try:
os.makedirs(parent_dir)
except OSError:
pass
with open(self.path, "w") as fph:
fph.write(path_value.value) | def write_path(self, path_value) | this will overwrite dst path - be careful | 2.719178 | 2.74212 | 0.991633 |
reqs = Queue()
pending = 1
path = self.path
zk = self.zk
def child_of(path):
return zk.get_children_async(path)
def dispatch(path):
return Request(path, child_of(path))
stat = zk.exists(path)
if stat is None or stat.numChildren == 0:
return
reqs.put(dispatch(path))
while pending:
req = reqs.get()
try:
children = req.value
for child in children:
cpath = os.path.join(req.path, child)
if exclude_recurse is None or exclude_recurse not in child:
pending += 1
reqs.put(dispatch(cpath))
yield cpath
except (NoNodeError, NoAuthError): pass
pending -= 1 | def get(self, exclude_recurse=None) | Paths matching exclude_recurse will not be recursed. | 3.587149 | 3.480466 | 1.030652 |
if path in self._stats_by_path:
print("%s is already being watched" % (path))
return
# we can't watch child paths of what's already being watched,
# because that generates a race between firing and resetting
# watches for overlapping paths.
if "/" in self._stats_by_path:
print("/ is already being watched, so everything is watched")
return
for epath in self._stats_by_path:
if epath.startswith(path):
print(self.PARENT_ERR % (path, epath))
return
if path.startswith(epath):
print(self.CHILD_ERR % (path, epath))
return
self._stats_by_path[path] = PathStats(debug)
self._watch(path, 0, children) | def add(self, path, debug, children) | Set a watch for path and (maybe) its children depending on the value
of children:
-1: all children
0: no children
> 0: up to level depth children
If debug is true, print each received events. | 4.770572 | 4.791763 | 0.995578 |
# ephemeral znodes can't have children, so skip them
stat = self._client.exists(path)
if stat is None or stat.ephemeralOwner != 0:
return
try:
children = self._client.get_children(path, self._watcher)
except NoNodeError:
children = []
if max_level >= 0 and current_level + 1 > max_level:
return
for child in children:
self._watch(os.path.join(path, child), current_level + 1, max_level) | def _watch(self, path, current_level, max_level) | we need to catch ZNONODE because children might be removed whilst we
are iterating (specially ephemeral znodes) | 2.945446 | 2.559784 | 1.150662 |
try:
plist[idx] = value
return
except IndexError:
pass
# Fill in the missing positions. Handle negative indexes.
end = idx + 1 if idx >= 0 else abs(idx)
for _ in range(len(plist), end):
if callable(fill_with):
plist.append(fill_with())
else:
plist.append(fill_with)
plist[idx] = value | def safe_list_set(plist, idx, fill_with, value) | Sets:
```
plist[idx] = value
```
If len(plist) is smaller than what idx is trying
to dereferece, we first grow plist to get the needed
capacity and fill the new elements with fill_with
(or fill_with(), if it's a callable). | 3.052459 | 2.969589 | 1.027906 |
if ptype == 'str':
return str(value)
elif ptype == 'int':
return int(value)
elif ptype == 'float':
return float(value)
elif ptype == 'bool':
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
raise ValueError('Bad bool value: %s' % value)
elif ptype == 'json':
return json.loads(value)
return ValueError('Unknown type') | def to_type(value, ptype) | Convert value to ptype | 1.855632 | 1.898372 | 0.977486 |
regex = r'#{\s*(%s)\s*}' % cls.ALLOWED_KEY
return re.match(regex, keystr).group(1) | def extract(cls, keystr) | for #{key} returns key | 6.629942 | 4.889868 | 1.355853 |
regex = r'%s$' % cls.ALLOWED_KEY
if re.match(regex, keystr) is None:
raise cls.Bad("Bad key syntax for: %s. Should be: key1.key2..." % (keystr))
return True | def validate_one(cls, keystr) | validates one key string | 7.685653 | 7.434029 | 1.033848 |
regex = r'#{\s*%s\s*}' % cls.ALLOWED_KEY
keys = re.findall(regex, template)
if len(keys) == 0:
raise cls.Bad("Bad keys template: %s. Should be: \"%s\"" % (
template, "a = #{key1}, b = #{key2.key3} ..."))
return keys | def from_template(cls, template) | extracts keys out of template in the form of: "a = #{key1}, b = #{key2.key3} ..." | 8.019707 | 4.32504 | 1.85425 |
if "#{" in keystr:
# it's a template with keys vars
keys = cls.from_template(keystr)
for k in keys:
cls.validate_one(cls.extract(k))
else:
# plain keys str
cls.validate_one(keystr) | def validate(cls, keystr) | raises cls.Bad if keys has errors | 7.040462 | 6.71067 | 1.049144 |
current = obj
for key in keys.split("."):
if type(current) == list:
try:
key = int(key)
except TypeError:
raise cls.Missing(key)
try:
current = current[key]
except (IndexError, KeyError, TypeError) as ex:
raise cls.Missing(key)
return current | def fetch(cls, obj, keys) | fetches the value corresponding to keys from obj | 3.07055 | 3.067521 | 1.000987 |
if "#{" in keystr:
# it's a template with keys vars
keys = cls.from_template(keystr)
for k in keys:
v = cls.fetch(obj, cls.extract(k))
keystr = keystr.replace(k, str(v))
value = keystr
else:
# plain keys str
value = cls.fetch(obj, keystr)
return value | def value(cls, obj, keystr) | gets the value corresponding to keys from obj. if keys is a template
string, it extrapolates the keys in it | 5.221684 | 4.900643 | 1.06551 |
current = obj
keys_list = keys.split(".")
for idx, key in enumerate(keys_list, 1):
if type(current) == list:
# Validate this key works with a list.
try:
key = int(key)
except ValueError:
raise cls.Missing(key)
try:
# This is the last key, so set the value.
if idx == len(keys_list):
if type(current) == list:
safe_list_set(
current,
key,
lambda: copy.copy(fill_list_value),
value
)
else:
current[key] = value
# done.
return
# More keys left, ensure we have a container for this key.
if type(key) == int:
try:
current[key]
except IndexError:
# Create a list for this key.
cnext = container_for_key(keys_list[idx])
if type(cnext) == list:
def fill_with():
return []
else:
def fill_with():
return {}
safe_list_set(
current,
key,
fill_with,
[] if type(cnext) == list else {}
)
else:
if key not in current:
# Create a list for this key.
current[key] = container_for_key(keys_list[idx])
# Move on to the next key.
current = current[key]
except (IndexError, KeyError, TypeError):
raise cls.Missing(key) | def set(cls, obj, keys, value, fill_list_value=None) | sets the value for the given keys on obj. if any of the given
keys does not exist, create the intermediate containers. | 3.034218 | 2.978833 | 1.018593 |
for unit in ['', 'KB', 'MB', 'GB']:
if num < 1024.0:
if unit == '':
return "%d" % (num)
else:
return "%3.1f%s" % (num, unit)
num /= 1024.0
return "%3.1f%s" % (num, 'TB') | def pretty_bytes(num) | pretty print the given number of bytes | 1.869951 | 1.864011 | 1.003186 |
vtype = type(value)
if vtype == bytes or vtype == type(None):
return value
try:
return vtype.encode(value)
except UnicodeEncodeError:
pass
return value | def to_bytes(value) | str to bytes (py3k) | 4.207529 | 3.931709 | 1.070153 |
match = _valid_ipv4.match(ip)
if match is None:
return False
octets = match.groups()
if len(octets) != 4:
return False
first = int(octets[0])
if first < 1 or first > 254:
return False
for i in range(1, 4):
octet = int(octets[i])
if octet < 0 or octet > 255:
return False
return True | def valid_ipv4(ip) | check if ip is a valid ipv4 | 2.010435 | 2.045585 | 0.982817 |
for part in host.split("."):
if not _valid_host_part.match(part):
return False
return True | def valid_host(host) | check valid hostname | 4.378471 | 3.980984 | 1.099847 |
host, port = hostport.rsplit(":", 1) if ":" in hostport else (hostport, None)
# first, validate host or IP
if not valid_ipv4(host) and not valid_host(host):
return False
# now, validate port
if port is not None and not valid_port(port):
return False
return True | def valid_host_with_port(hostport) | matches hostname or an IP, optionally with a port | 2.595706 | 2.482284 | 1.045693 |
if _empty.match(hosts):
return False
for host in hosts.split(","):
if not valid_host_with_port(host):
return False
return True | def valid_hosts(hosts) | matches a comma separated list of hosts (possibly with ports) | 4.515872 | 4.051162 | 1.11471 |
if path == '/':
return ('/', None)
parent, child = path.rsplit('/', 1)
if parent == '':
parent = '/'
return (parent, child) | def split(path) | splits path into parent, child | 3.201346 | 2.61744 | 1.223083 |
ips = set()
for af_type in (socket.AF_INET, socket.AF_INET6):
try:
records = socket.getaddrinfo(host, port, af_type, socket.SOCK_STREAM)
ips.update(rec[4][0] for rec in records)
except socket.gaierror as ex:
pass
return ips | def get_ips(host, port) | lookup all IPs (v4 and v6) | 2.489189 | 2.309893 | 1.077621 |
endpoints = []
for host in hosts.split(","):
endpoints.append(tuple(host.rsplit(":", 1)) if ":" in host else (host, port))
return endpoints | def hosts_to_endpoints(hosts, port=2181) | return a list of (host, port) tuples from a given host[:port],... str | 3.011316 | 2.628966 | 1.145437 |
with_pos = sorted([pair for pair in enumerate(group)], key=lambda p: p[1])
outliers_start = outliers_end = -1
for i in range(0, len(with_pos) - 1):
cur = with_pos[i][1]
nex = with_pos[i + 1][1]
if nex - cur > delta:
# depending on where we are, outliers are the remaining
# items or the ones that we've already seen.
if i < (len(with_pos) - i):
# outliers are close to the start
outliers_start, outliers_end = 0, i + 1
else:
# outliers are close to the end
outliers_start, outliers_end = i + 1, len(with_pos)
break
if outliers_start != -1:
return [with_pos[i][0] for i in range(outliers_start, outliers_end)]
else:
return [] | def find_outliers(group, delta) | given a list of values, find those that are apart from the rest by
`delta`. the indexes for the outliers is returned, if any.
examples:
values = [100, 6, 7, 8, 9, 10, 150]
find_outliers(values, 5) -> [0, 6]
values = [5, 6, 5, 4, 5]
find_outliers(values, 3) -> [] | 2.964262 | 3.110476 | 0.952993 |
if match != "":
lines = [line for line in content.split("\n") if match in line]
content = "\n".join(lines)
return content | def get_matching(content, match) | filters out lines that don't include match | 2.885454 | 2.421696 | 1.191501 |
if params is None:
params = dict()
to_send = {
'action': action,
'version': version,
'params': params
}
r = requests.post(AnkiConnect.URL, json=to_send)
return r.json() | def post(action, params=None, version=6) | For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return: | 3.229931 | 2.970924 | 1.087181 |
if file_input is None:
file_input = get_collection_path()
source = file_input
if sampling_limits is None:
sampling_limits = {
'notes': 10,
'cards': 10
}
if os.path.splitext(file_input)[1] == '.apkg':
from AnkiTools.convert import anki_convert
tempdir = mkdtemp()
temp_anki2 = os.path.join(tempdir, 'temp.anki2')
anki_convert(file_input, out_file=temp_anki2)
file_input = temp_anki2
output_json = OrderedDict(
_meta=OrderedDict(
generated=datetime.fromtimestamp(datetime.now().timestamp()).isoformat(),
source=os.path.abspath(source),
data=OrderedDict()
)
)
with sqlite3.connect(file_input) as conn:
cursor = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
for row in cursor:
table_name = row[0]
key = table_name
output = list(read_anki_table(conn, table_name))
if table_name not in output_json['_meta']['data'].keys():
output_json['_meta']['data'][table_name] = OrderedDict()
output_json['_meta']['data'][table_name]['number_of_entries'] = len(output)
if len(output) >= 1:
if len(output) > 1:
if table_name in do_not_sample:
output_json[key] = output
else:
re_match, re_replace = sampling_substitution_regex
key = re.sub(re_match, re_replace, key)
output_json[key] = random.sample(output, sampling_limits.get(table_name, 10))
else:
output_json[key] = output[0]
if formatted:
to_format = output_json[key]
if isinstance(output_json[key], (dict, OrderedDict)):
_format_representative_json(to_format, annotate_is_json)
else:
for item in to_format:
_format_representative_json(item, annotate_is_json)
else:
output_json[key] = None
return output_json | def get_representative_json(file_input=None,
formatted=False, annotate_is_json=False,
sampling_substitution_regex=('(.+)', '\\1_sample'),
do_not_sample=('sqlite_stat1', ),
sampling_limits=None) | :param None|str file_input:
:param bool formatted:
:param bool annotate_is_json:
:param tuple sampling_substitution_regex: to shorten string by one, try ('(.+).{1}', '\\1') or ('(.+)s', '\\1')
:param list|tuple do_not_sample:
:param None|dict sampling_limits:
:return: | 2.398204 | 2.43473 | 0.984998 |
field = dict([
('name', field_name),
('rtl', False),
('sticky', False),
('media', []),
('ord', ordering),
('font', 'Arial'),
('size', 12)
])
for k, v in field.items():
if k in kwargs.keys():
field[k] = kwargs[k]
return field | def new_field(field_name: str, ordering: int, **kwargs) | Fields have no unique ID.
:param field_name:
:param ordering:
:param kwargs:
:return: | 4.403192 | 4.782184 | 0.920749 |
if formatting is not None:
kwargs.update(formatting)
template = dict([
('name', template_name),
('qfmt', DEFAULT_TEMPLATE['qfmt']),
('did', None),
('bafmt', DEFAULT_TEMPLATE['bafmt']),
('afmt', DEFAULT_TEMPLATE['afmt']),
('ord', ordering),
('bqfmt', DEFAULT_TEMPLATE['bqfmt'])
])
for k, v in template.items():
if k in kwargs.keys():
template[k] = kwargs[k]
return template | def new_template(template_name: str, ordering: int, formatting: dict=None, **kwargs) | Templates have no unique ID.
:param template_name:
:param ordering:
:param formatting:
:param kwargs:
:return: | 3.533264 | 3.562887 | 0.991686 |
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
if '/tmp/' not in self.settings_dict['NAME']:
try:
etag = ''
if os.path.isfile('/tmp/' + self.settings_dict['NAME']):
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
# In general the ETag is the md5 of the file, in some cases it's not,
# and in that case we will just need to reload the file, I don't see any other way
etag = m.hexdigest()
obj = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['NAME'])
obj_bytes = obj.get(IfNoneMatch=etag)["Body"] # Will throw E on 304 or 404
with open('/tmp/' + self.settings_dict['NAME'], 'wb') as f:
f.write(obj_bytes.read())
m = hashlib.md5()
with open('/tmp/' + self.settings_dict['NAME'], 'rb') as f:
m.update(f.read())
self.db_hash = m.hexdigest()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "304":
logging.debug("ETag matches md5 of local copy, using local copy of DB!")
self.db_hash = etag
else:
logging.debug("Couldn't load remote DB object.")
except Exception as e:
# Weird one
logging.debug(e)
# SQLite DatabaseWrapper will treat our tmp as normal now
# Check because Django likes to call this function a lot more than it should
if '/tmp/' not in self.settings_dict['NAME']:
self.settings_dict['REMOTE_NAME'] = self.settings_dict['NAME']
self.settings_dict['NAME'] = '/tmp/' + self.settings_dict['NAME']
# Make sure it exists if it doesn't yet
if not os.path.isfile(self.settings_dict['NAME']):
open(self.settings_dict['NAME'], 'a').close()
logging.debug("Loaded remote DB!") | def load_remote_db(self) | Load remote S3 DB | 3.366171 | 3.294516 | 1.02175 |
super(DatabaseWrapper, self).close(*args, **kwargs)
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource(
's3',
config=botocore.client.Config(signature_version=signature_version),
)
try:
with open(self.settings_dict['NAME'], 'rb') as f:
fb = f.read()
m = hashlib.md5()
m.update(fb)
if self.db_hash == m.hexdigest():
logging.debug("Database unchanged, not saving to remote DB!")
return
bytesIO = BytesIO()
bytesIO.write(fb)
bytesIO.seek(0)
s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME'])
result = s3_object.put('rb', Body=bytesIO)
except Exception as e:
logging.debug(e)
logging.debug("Saved to remote DB!") | def close(self, *args, **kwargs) | Engine closed, copy file to DB if it has changed | 3.232995 | 3.078422 | 1.050212 |
return os.environ['BLINKA_FORCECHIP']
except KeyError: # no forced chip, continue with testing!
pass
# Special case, if we have an environment var set, we could use FT232H
try:
if os.environ['BLINKA_FT232H']:
# we can't have ftdi1 as a dependency cause its wierd
# to install, sigh.
import ftdi1 as ftdi # pylint: disable=import-error
try:
ctx = None
ctx = ftdi.new() # Create a libftdi context.
# Enumerate FTDI devices.
count, _ = ftdi.usb_find_all(ctx, 0, 0)
if count < 0:
raise RuntimeError('ftdi_usb_find_all returned error %d : %s' %
count, ftdi.get_error_string(self._ctx))
if count == 0:
raise RuntimeError('BLINKA_FT232H environment variable' + \
'set, but no FT232H device found')
finally:
# Make sure to clean up list and context when done.
if ctx is not None:
ftdi.free(ctx)
return FT232H
except KeyError: # no FT232H environment var
pass
platform = sys.platform
if platform == "linux" or platform == "linux2":
return self._linux_id()
if platform == "esp8266":
return ESP8266
if platform == "samd21":
return SAMD21
if platform == "pyboard":
return STM32
# nothing found!
return None | def id(self): # pylint: disable=invalid-name,too-many-branches,too-many-return-statements
# There are some times we want to trick the platform detection
# say if a raspberry pi doesn't have the right ID, or for testing
try | Return a unique id for the detected chip, if any. | 4.920373 | 4.892545 | 1.005688 |
linux_id = None
hardware = self.detector.get_cpuinfo_field("Hardware")
if hardware is None:
vendor_id = self.detector.get_cpuinfo_field("vendor_id")
if vendor_id in ("GenuineIntel", "AuthenticAMD"):
linux_id = GENERIC_X86
compatible = self.detector.get_device_compatible()
if compatible and 'tegra' in compatible:
if 'cv' in compatible or 'nano' in compatible:
linux_id = T210
elif 'quill' in compatible:
linux_id = T186
elif 'xavier' in compatible:
linux_id = T194
elif hardware in ("BCM2708", "BCM2709", "BCM2835"):
linux_id = BCM2XXX
elif "AM33XX" in hardware:
linux_id = AM33XX
elif "sun8i" in hardware:
linux_id = SUN8I
elif "ODROIDC" in hardware:
linux_id = S805
elif "ODROID-C2" in hardware:
linux_id = S905
elif "SAMA5" in hardware:
linux_id = SAMA5
return linux_id | def _linux_id(self) | Attempt to detect the CPU on a computer running the Linux kernel. | 3.938344 | 3.791639 | 1.038692 |
# There are some times we want to trick the platform detection
# say if a raspberry pi doesn't have the right ID, or for testing
try:
return os.environ['BLINKA_FORCEBOARD']
except KeyError: # no forced board, continue with testing!
pass
chip_id = self.detector.chip.id
board_id = None
if chip_id == ap_chip.BCM2XXX:
board_id = self._pi_id()
elif chip_id == ap_chip.AM33XX:
board_id = self._beaglebone_id()
elif chip_id == ap_chip.GENERIC_X86:
board_id = GENERIC_LINUX_PC
elif chip_id == ap_chip.SUN8I:
board_id = self._armbian_id()
elif chip_id == ap_chip.SAMA5:
board_id = self._sama5_id()
elif chip_id == ap_chip.ESP8266:
board_id = FEATHER_HUZZAH
elif chip_id == ap_chip.SAMD21:
board_id = FEATHER_M0_EXPRESS
elif chip_id == ap_chip.STM32:
board_id = PYBOARD
elif chip_id == ap_chip.S805:
board_id = ODROID_C1
elif chip_id == ap_chip.S905:
board_id = ODROID_C2
elif chip_id == ap_chip.FT232H:
board_id = FTDI_FT232H
elif chip_id in (ap_chip.T210, ap_chip.T186, ap_chip.T194):
board_id = self._tegra_id()
return board_id | def id(self) | Return a unique id for the detected board, if any. | 4.078025 | 3.871518 | 1.05334 |
# Check for Pi boards:
pi_rev_code = self._pi_rev_code()
if pi_rev_code:
for model, codes in _PI_REV_CODES.items():
if pi_rev_code in codes:
return model
return None | def _pi_id(self) | Try to detect id of a Raspberry Pi. | 5.842181 | 4.88239 | 1.196582 |
# 2708 is Pi 1
# 2709 is Pi 2
# 2835 is Pi 3 (or greater) on 4.9.x kernel
# Anything else is not a Pi.
if self.detector.chip.id != ap_chip.BCM2XXX:
# Something else, not a Pi.
return None
return self.detector.get_cpuinfo_field('Revision') | def _pi_rev_code(self) | Attempt to find a Raspberry Pi revision code for this board. | 11.109114 | 8.872601 | 1.25207 |
try:
with open("/sys/bus/nvmem/devices/0-00500/nvmem", "rb") as eeprom:
eeprom_bytes = eeprom.read(16)
except FileNotFoundError:
return None
if eeprom_bytes[:4] != b'\xaaU3\xee':
return None
id_string = eeprom_bytes[4:].decode("ascii")
for model, bb_ids in _BEAGLEBONE_BOARD_IDS.items():
for bb_id in bb_ids:
if id_string == bb_id[1]:
return model
return None | def _beaglebone_id(self) | Try to detect id of a Beaglebone. | 3.685647 | 3.450702 | 1.068086 |
board_value = self.detector.get_device_model()
if 'tx1' in board_value:
return JETSON_TX1
elif 'quill' in board_value:
return JETSON_TX2
elif 'xavier' in board_value:
return JETSON_XAVIER
elif 'nano' in board_value:
return JETSON_NANO
return None | def _tegra_id(self) | Try to detect the id of aarch64 board. | 4.258983 | 3.662685 | 1.162804 |
return self.any_raspberry_pi or self.any_beaglebone or \
self.any_orange_pi or self.any_giant_board or self.any_jetson_board | def any_embedded_linux(self) | Check whether the current board is any embedded Linux device. | 7.966554 | 6.541656 | 1.217819 |
# Match a line like 'Hardware : BCM2709':
pattern = r'^' + field + r'\s+:\s+(.*)$'
with open('/proc/cpuinfo', 'r') as infile:
cpuinfo = infile.read().split('\n')
for line in cpuinfo:
match = re.search(pattern, line, flags=re.IGNORECASE)
if match:
return match.group(1)
return None | def get_cpuinfo_field(self, field) | Search /proc/cpuinfo for a field and return its value, if found,
otherwise None. | 3.044135 | 2.993403 | 1.016948 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.