repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
sequencelengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
sequencelengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
ontio/ontology-python-sdk
ontology/io/binary_reader.py
BinaryReader.read_var_str
def read_var_str(self, max_size=sys.maxsize): """ Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes: """ length = self.read_var_int(max_size) return self.unpack(str(length) + 's', length)
python
def read_var_str(self, max_size=sys.maxsize): """ Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes: """ length = self.read_var_int(max_size) return self.unpack(str(length) + 's', length)
[ "def", "read_var_str", "(", "self", ",", "max_size", "=", "sys", ".", "maxsize", ")", ":", "length", "=", "self", ".", "read_var_int", "(", "max_size", ")", "return", "self", ".", "unpack", "(", "str", "(", "length", ")", "+", "'s'", ",", "length", ")" ]
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator. Args: max_size (int): (Optional) maximum number of bytes to read. Returns: bytes:
[ "Similar", "to", "ReadString", "but", "expects", "a", "variable", "length", "indicator", "instead", "of", "the", "fixed", "1", "byte", "indicator", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L311-L322
ontio/ontology-python-sdk
ontology/io/binary_reader.py
BinaryReader.read_serializable_array
def read_serializable_array(self, class_name, max_size=sys.maxsize): """ Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream. """ module = '.'.join(class_name.split('.')[:-1]) class_name = class_name.split('.')[-1] class_attr = getattr(importlib.import_module(module), class_name) length = self.read_var_int(max_size=max_size) items = [] try: for _ in range(0, length): item = class_attr() item.Deserialize(self) items.append(item) except Exception as e: raise SDKException(ErrorCode.param_err("Couldn't deserialize %s" % e)) return items
python
def read_serializable_array(self, class_name, max_size=sys.maxsize): """ Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream. """ module = '.'.join(class_name.split('.')[:-1]) class_name = class_name.split('.')[-1] class_attr = getattr(importlib.import_module(module), class_name) length = self.read_var_int(max_size=max_size) items = [] try: for _ in range(0, length): item = class_attr() item.Deserialize(self) items.append(item) except Exception as e: raise SDKException(ErrorCode.param_err("Couldn't deserialize %s" % e)) return items
[ "def", "read_serializable_array", "(", "self", ",", "class_name", ",", "max_size", "=", "sys", ".", "maxsize", ")", ":", "module", "=", "'.'", ".", "join", "(", "class_name", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "class_name", "=", "class_name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "class_attr", "=", "getattr", "(", "importlib", ".", "import_module", "(", "module", ")", ",", "class_name", ")", "length", "=", "self", ".", "read_var_int", "(", "max_size", "=", "max_size", ")", "items", "=", "[", "]", "try", ":", "for", "_", "in", "range", "(", "0", ",", "length", ")", ":", "item", "=", "class_attr", "(", ")", "item", ".", "Deserialize", "(", "self", ")", "items", ".", "append", "(", "item", ")", "except", "Exception", "as", "e", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "\"Couldn't deserialize %s\"", "%", "e", ")", ")", "return", "items" ]
Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream.
[ "Deserialize", "a", "stream", "into", "the", "object", "specific", "by", "class_name", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L335-L358
ontio/ontology-python-sdk
ontology/io/binary_reader.py
BinaryReader.read_2000256_list
def read_2000256_list(self): """ Read 2000 times a 64 byte value from the stream. Returns: list: a list containing 2000 64 byte values in reversed form. """ items = [] for _ in range(0, 2000): data = self.read_bytes(64) ba = bytearray(binascii.unhexlify(data)) ba.reverse() items.append(ba.hex().encode('utf-8')) return items
python
def read_2000256_list(self): """ Read 2000 times a 64 byte value from the stream. Returns: list: a list containing 2000 64 byte values in reversed form. """ items = [] for _ in range(0, 2000): data = self.read_bytes(64) ba = bytearray(binascii.unhexlify(data)) ba.reverse() items.append(ba.hex().encode('utf-8')) return items
[ "def", "read_2000256_list", "(", "self", ")", ":", "items", "=", "[", "]", "for", "_", "in", "range", "(", "0", ",", "2000", ")", ":", "data", "=", "self", ".", "read_bytes", "(", "64", ")", "ba", "=", "bytearray", "(", "binascii", ".", "unhexlify", "(", "data", ")", ")", "ba", ".", "reverse", "(", ")", "items", ".", "append", "(", "ba", ".", "hex", "(", ")", ".", "encode", "(", "'utf-8'", ")", ")", "return", "items" ]
Read 2000 times a 64 byte value from the stream. Returns: list: a list containing 2000 64 byte values in reversed form.
[ "Read", "2000", "times", "a", "64", "byte", "value", "from", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L360-L373
ontio/ontology-python-sdk
ontology/io/binary_reader.py
BinaryReader.read_hashes
def read_hashes(self): """ Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type. """ var_len = self.read_var_int() items = [] for _ in range(0, var_len): ba = bytearray(self.read_bytes(32)) ba.reverse() items.append(ba.hex()) return items
python
def read_hashes(self): """ Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type. """ var_len = self.read_var_int() items = [] for _ in range(0, var_len): ba = bytearray(self.read_bytes(32)) ba.reverse() items.append(ba.hex()) return items
[ "def", "read_hashes", "(", "self", ")", ":", "var_len", "=", "self", ".", "read_var_int", "(", ")", "items", "=", "[", "]", "for", "_", "in", "range", "(", "0", ",", "var_len", ")", ":", "ba", "=", "bytearray", "(", "self", ".", "read_bytes", "(", "32", ")", ")", "ba", ".", "reverse", "(", ")", "items", ".", "append", "(", "ba", ".", "hex", "(", ")", ")", "return", "items" ]
Read Hash values from the stream. Returns: list: a list of hash values. Each value is of the bytearray type.
[ "Read", "Hash", "values", "from", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L375-L388
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_byte
def write_byte(self, value): """ Write a single byte to the stream. Args: value (bytes, str or int): value to write to the stream. """ if isinstance(value, bytes): self.stream.write(value) elif isinstance(value, str): self.stream.write(value.encode('utf-8')) elif isinstance(value, int): self.stream.write(bytes([value]))
python
def write_byte(self, value): """ Write a single byte to the stream. Args: value (bytes, str or int): value to write to the stream. """ if isinstance(value, bytes): self.stream.write(value) elif isinstance(value, str): self.stream.write(value.encode('utf-8')) elif isinstance(value, int): self.stream.write(bytes([value]))
[ "def", "write_byte", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bytes", ")", ":", "self", ".", "stream", ".", "write", "(", "value", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "self", ".", "stream", ".", "write", "(", "value", ".", "encode", "(", "'utf-8'", ")", ")", "elif", "isinstance", "(", "value", ",", "int", ")", ":", "self", ".", "stream", ".", "write", "(", "bytes", "(", "[", "value", "]", ")", ")" ]
Write a single byte to the stream. Args: value (bytes, str or int): value to write to the stream.
[ "Write", "a", "single", "byte", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L47-L59
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.pack
def pack(self, fmt, data): """ Write bytes by packing them according to the provided format `fmt`. For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html Args: fmt (str): format string. data (object): the data to write to the raw stream. Returns: int: the number of bytes written. """ return self.write_bytes(struct.pack(fmt, data))
python
def pack(self, fmt, data): """ Write bytes by packing them according to the provided format `fmt`. For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html Args: fmt (str): format string. data (object): the data to write to the raw stream. Returns: int: the number of bytes written. """ return self.write_bytes(struct.pack(fmt, data))
[ "def", "pack", "(", "self", ",", "fmt", ",", "data", ")", ":", "return", "self", ".", "write_bytes", "(", "struct", ".", "pack", "(", "fmt", ",", "data", ")", ")" ]
Write bytes by packing them according to the provided format `fmt`. For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html Args: fmt (str): format string. data (object): the data to write to the raw stream. Returns: int: the number of bytes written.
[ "Write", "bytes", "by", "packing", "them", "according", "to", "the", "provided", "format", "fmt", ".", "For", "more", "information", "about", "the", "fmt", "format", "see", ":", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "library", "/", "struct", ".", "html" ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L69-L81
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_float
def write_float(self, value, little_endian=True): """ Pack the value as a float and write 4 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sf' % endian, value)
python
def write_float(self, value, little_endian=True): """ Pack the value as a float and write 4 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sf' % endian, value)
[ "def", "write_float", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sf'", "%", "endian", ",", "value", ")" ]
Pack the value as a float and write 4 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "a", "float", "and", "write", "4", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L95-L110
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_double
def write_double(self, value, little_endian=True): """ Pack the value as a double and write 8 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sd' % endian, value)
python
def write_double(self, value, little_endian=True): """ Pack the value as a double and write 8 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sd' % endian, value)
[ "def", "write_double", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sd'", "%", "endian", ",", "value", ")" ]
Pack the value as a double and write 8 bytes to the stream. Args: value (number): the value to write to the stream. little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "a", "double", "and", "write", "8", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L112-L127
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_int8
def write_int8(self, value, little_endian=True): """ Pack the value as a signed byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sb' % endian, value)
python
def write_int8(self, value, little_endian=True): """ Pack the value as a signed byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sb' % endian, value)
[ "def", "write_int8", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sb'", "%", "endian", ",", "value", ")" ]
Pack the value as a signed byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "a", "signed", "byte", "and", "write", "1", "byte", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L129-L144
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_uint8
def write_uint8(self, value, little_endian=True): """ Pack the value as an unsigned byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sB' % endian, value)
python
def write_uint8(self, value, little_endian=True): """ Pack the value as an unsigned byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sB' % endian, value)
[ "def", "write_uint8", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sB'", "%", "endian", ",", "value", ")" ]
Pack the value as an unsigned byte and write 1 byte to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "an", "unsigned", "byte", "and", "write", "1", "byte", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L146-L161
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_int16
def write_int16(self, value, little_endian=True): """ Pack the value as a signed integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sh' % endian, value)
python
def write_int16(self, value, little_endian=True): """ Pack the value as a signed integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sh' % endian, value)
[ "def", "write_int16", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sh'", "%", "endian", ",", "value", ")" ]
Pack the value as a signed integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "a", "signed", "integer", "and", "write", "2", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L175-L190
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_uint16
def write_uint16(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sH' % endian, value)
python
def write_uint16(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sH' % endian, value)
[ "def", "write_uint16", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sH'", "%", "endian", ",", "value", ")" ]
Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "an", "unsigned", "integer", "and", "write", "2", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L192-L207
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_int32
def write_int32(self, value, little_endian=True): """ Pack the value as a signed integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%si' % endian, value)
python
def write_int32(self, value, little_endian=True): """ Pack the value as a signed integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%si' % endian, value)
[ "def", "write_int32", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%si'", "%", "endian", ",", "value", ")" ]
Pack the value as a signed integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "a", "signed", "integer", "and", "write", "4", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L209-L224
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_uint32
def write_uint32(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sI' % endian, value)
python
def write_uint32(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sI' % endian, value)
[ "def", "write_uint32", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sI'", "%", "endian", ",", "value", ")" ]
Pack the value as an unsigned integer and write 4 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "an", "unsigned", "integer", "and", "write", "4", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L226-L241
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_int64
def write_int64(self, value, little_endian=True): """ Pack the value as a signed integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sq' % endian, value)
python
def write_int64(self, value, little_endian=True): """ Pack the value as a signed integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sq' % endian, value)
[ "def", "write_int64", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sq'", "%", "endian", ",", "value", ")" ]
Pack the value as a signed integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "a", "signed", "integer", "and", "write", "8", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L243-L258
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_uint64
def write_uint64(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sQ' % endian, value)
python
def write_uint64(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sQ' % endian, value)
[ "def", "write_uint64", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "little_endian", ":", "endian", "=", "\"<\"", "else", ":", "endian", "=", "\">\"", "return", "self", ".", "pack", "(", "'%sQ'", "%", "endian", ",", "value", ")" ]
Pack the value as an unsigned integer and write 8 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
[ "Pack", "the", "value", "as", "an", "unsigned", "integer", "and", "write", "8", "bytes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L260-L275
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_var_int
def write_var_int(self, value, little_endian=True): """ Write an integer value in a space saving way to the stream. Args: value (int): little_endian (bool): specify the endianness. (Default) Little endian. Raises: SDKException: if `value` is not of type int. SDKException: if `value` is < 0. Returns: int: the number of bytes written. """ if not isinstance(value, int): raise SDKException(ErrorCode.param_err('%s not int type.' % value)) if value < 0: raise SDKException(ErrorCode.param_err('%d too small.' % value)) elif value < 0xfd: return self.write_byte(value) elif value <= 0xffff: self.write_byte(0xfd) return self.write_uint16(value, little_endian) elif value <= 0xFFFFFFFF: self.write_byte(0xfe) return self.write_uint32(value, little_endian) else: self.write_byte(0xff) return self.write_uint64(value, little_endian)
python
def write_var_int(self, value, little_endian=True): """ Write an integer value in a space saving way to the stream. Args: value (int): little_endian (bool): specify the endianness. (Default) Little endian. Raises: SDKException: if `value` is not of type int. SDKException: if `value` is < 0. Returns: int: the number of bytes written. """ if not isinstance(value, int): raise SDKException(ErrorCode.param_err('%s not int type.' % value)) if value < 0: raise SDKException(ErrorCode.param_err('%d too small.' % value)) elif value < 0xfd: return self.write_byte(value) elif value <= 0xffff: self.write_byte(0xfd) return self.write_uint16(value, little_endian) elif value <= 0xFFFFFFFF: self.write_byte(0xfe) return self.write_uint32(value, little_endian) else: self.write_byte(0xff) return self.write_uint64(value, little_endian)
[ "def", "write_var_int", "(", "self", ",", "value", ",", "little_endian", "=", "True", ")", ":", "if", "not", "isinstance", "(", "value", ",", "int", ")", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'%s not int type.'", "%", "value", ")", ")", "if", "value", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'%d too small.'", "%", "value", ")", ")", "elif", "value", "<", "0xfd", ":", "return", "self", ".", "write_byte", "(", "value", ")", "elif", "value", "<=", "0xffff", ":", "self", ".", "write_byte", "(", "0xfd", ")", "return", "self", ".", "write_uint16", "(", "value", ",", "little_endian", ")", "elif", "value", "<=", "0xFFFFFFFF", ":", "self", ".", "write_byte", "(", "0xfe", ")", "return", "self", ".", "write_uint32", "(", "value", ",", "little_endian", ")", "else", ":", "self", ".", "write_byte", "(", "0xff", ")", "return", "self", ".", "write_uint64", "(", "value", ",", "little_endian", ")" ]
Write an integer value in a space saving way to the stream. Args: value (int): little_endian (bool): specify the endianness. (Default) Little endian. Raises: SDKException: if `value` is not of type int. SDKException: if `value` is < 0. Returns: int: the number of bytes written.
[ "Write", "an", "integer", "value", "in", "a", "space", "saving", "way", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L277-L311
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_var_bytes
def write_var_bytes(self, value, little_endian: bool = True): """ Write an integer value in a space saving way to the stream. :param value: :param little_endian: specify the endianness. (Default) Little endian. :return: int: the number of bytes written. """ length = len(value) self.write_var_int(length, little_endian) return self.write_bytes(value, to_bytes=False)
python
def write_var_bytes(self, value, little_endian: bool = True): """ Write an integer value in a space saving way to the stream. :param value: :param little_endian: specify the endianness. (Default) Little endian. :return: int: the number of bytes written. """ length = len(value) self.write_var_int(length, little_endian) return self.write_bytes(value, to_bytes=False)
[ "def", "write_var_bytes", "(", "self", ",", "value", ",", "little_endian", ":", "bool", "=", "True", ")", ":", "length", "=", "len", "(", "value", ")", "self", ".", "write_var_int", "(", "length", ",", "little_endian", ")", "return", "self", ".", "write_bytes", "(", "value", ",", "to_bytes", "=", "False", ")" ]
Write an integer value in a space saving way to the stream. :param value: :param little_endian: specify the endianness. (Default) Little endian. :return: int: the number of bytes written.
[ "Write", "an", "integer", "value", "in", "a", "space", "saving", "way", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L313-L323
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_var_str
def write_var_str(self, value, encoding: str = 'utf-8'): """ Write a string value to the stream. :param value: value to write to the stream. :param encoding: string encoding format. """ if isinstance(value, str): value = value.encode(encoding) self.write_var_int(len(value)) self.write_bytes(value)
python
def write_var_str(self, value, encoding: str = 'utf-8'): """ Write a string value to the stream. :param value: value to write to the stream. :param encoding: string encoding format. """ if isinstance(value, str): value = value.encode(encoding) self.write_var_int(len(value)) self.write_bytes(value)
[ "def", "write_var_str", "(", "self", ",", "value", ",", "encoding", ":", "str", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "value", "=", "value", ".", "encode", "(", "encoding", ")", "self", ".", "write_var_int", "(", "len", "(", "value", ")", ")", "self", ".", "write_bytes", "(", "value", ")" ]
Write a string value to the stream. :param value: value to write to the stream. :param encoding: string encoding format.
[ "Write", "a", "string", "value", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L325-L335
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_fixed_str
def write_fixed_str(self, value, length): """ Write a string value to the stream. Args: value (str): value to write to the stream. length (int): length of the string to write. """ towrite = value.encode('utf-8') slen = len(towrite) if slen > length: raise SDKException(ErrorCode.param_err('string longer than fixed length: %s' % length)) self.write_bytes(towrite) diff = length - slen while diff > 0: self.write_byte(0) diff -= 1
python
def write_fixed_str(self, value, length): """ Write a string value to the stream. Args: value (str): value to write to the stream. length (int): length of the string to write. """ towrite = value.encode('utf-8') slen = len(towrite) if slen > length: raise SDKException(ErrorCode.param_err('string longer than fixed length: %s' % length)) self.write_bytes(towrite) diff = length - slen while diff > 0: self.write_byte(0) diff -= 1
[ "def", "write_fixed_str", "(", "self", ",", "value", ",", "length", ")", ":", "towrite", "=", "value", ".", "encode", "(", "'utf-8'", ")", "slen", "=", "len", "(", "towrite", ")", "if", "slen", ">", "length", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'string longer than fixed length: %s'", "%", "length", ")", ")", "self", ".", "write_bytes", "(", "towrite", ")", "diff", "=", "length", "-", "slen", "while", "diff", ">", "0", ":", "self", ".", "write_byte", "(", "0", ")", "diff", "-=", "1" ]
Write a string value to the stream. Args: value (str): value to write to the stream. length (int): length of the string to write.
[ "Write", "a", "string", "value", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L337-L354
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_serializable_array
def write_serializable_array(self, array): """ Write an array of serializable objects to the stream. Args: array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin """ if array is None: self.write_byte(0) else: self.write_var_int(len(array)) for item in array: item.Serialize(self)
python
def write_serializable_array(self, array): """ Write an array of serializable objects to the stream. Args: array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin """ if array is None: self.write_byte(0) else: self.write_var_int(len(array)) for item in array: item.Serialize(self)
[ "def", "write_serializable_array", "(", "self", ",", "array", ")", ":", "if", "array", "is", "None", ":", "self", ".", "write_byte", "(", "0", ")", "else", ":", "self", ".", "write_var_int", "(", "len", "(", "array", ")", ")", "for", "item", "in", "array", ":", "item", ".", "Serialize", "(", "self", ")" ]
Write an array of serializable objects to the stream. Args: array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
[ "Write", "an", "array", "of", "serializable", "objects", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L356-L368
ontio/ontology-python-sdk
ontology/io/binary_writer.py
BinaryWriter.write_hashes
def write_hashes(self, arr): """ Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes. """ length = len(arr) self.write_var_int(length) for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.write_bytes(ba)
python
def write_hashes(self, arr): """ Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes. """ length = len(arr) self.write_var_int(length) for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.write_bytes(ba)
[ "def", "write_hashes", "(", "self", ",", "arr", ")", ":", "length", "=", "len", "(", "arr", ")", "self", ".", "write_var_int", "(", "length", ")", "for", "item", "in", "arr", ":", "ba", "=", "bytearray", "(", "binascii", ".", "unhexlify", "(", "item", ")", ")", "ba", ".", "reverse", "(", ")", "self", ".", "write_bytes", "(", "ba", ")" ]
Write an array of hashes to the stream. Args: arr (list): a list of 32 byte hashes.
[ "Write", "an", "array", "of", "hashes", "to", "the", "stream", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_writer.py#L370-L382
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.get_asset_address
def get_asset_address(self, asset: str) -> bytes: """ This interface is used to get the smart contract address of ONT otr ONG. :param asset: a string which is used to indicate which asset's contract address we want to get. :return: the contract address of asset in the form of bytearray. """ if asset.upper() == 'ONT': return self.__ont_contract elif asset.upper() == 'ONG': return self.__ong_contract else: raise SDKException(ErrorCode.other_error('asset is not equal to ONT or ONG.'))
python
def get_asset_address(self, asset: str) -> bytes: """ This interface is used to get the smart contract address of ONT otr ONG. :param asset: a string which is used to indicate which asset's contract address we want to get. :return: the contract address of asset in the form of bytearray. """ if asset.upper() == 'ONT': return self.__ont_contract elif asset.upper() == 'ONG': return self.__ong_contract else: raise SDKException(ErrorCode.other_error('asset is not equal to ONT or ONG.'))
[ "def", "get_asset_address", "(", "self", ",", "asset", ":", "str", ")", "->", "bytes", ":", "if", "asset", ".", "upper", "(", ")", "==", "'ONT'", ":", "return", "self", ".", "__ont_contract", "elif", "asset", ".", "upper", "(", ")", "==", "'ONG'", ":", "return", "self", ".", "__ong_contract", "else", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'asset is not equal to ONT or ONG.'", ")", ")" ]
This interface is used to get the smart contract address of ONT otr ONG. :param asset: a string which is used to indicate which asset's contract address we want to get. :return: the contract address of asset in the form of bytearray.
[ "This", "interface", "is", "used", "to", "get", "the", "smart", "contract", "address", "of", "ONT", "otr", "ONG", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L22-L34
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.query_balance
def query_balance(self, asset: str, b58_address: str) -> int: """ This interface is used to query the account's ONT or ONG balance. :param asset: a string which is used to indicate which asset we want to check the balance. :param b58_address: a base58 encode account address. :return: account balance. """ raw_address = Address.b58decode(b58_address).to_bytes() contract_address = self.get_asset_address(asset) invoke_code = build_native_invoke_code(contract_address, b'\x00', "balanceOf", raw_address) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) try: balance = ContractDataParser.to_int(response['Result']) return balance except SDKException: return 0
python
def query_balance(self, asset: str, b58_address: str) -> int: """ This interface is used to query the account's ONT or ONG balance. :param asset: a string which is used to indicate which asset we want to check the balance. :param b58_address: a base58 encode account address. :return: account balance. """ raw_address = Address.b58decode(b58_address).to_bytes() contract_address = self.get_asset_address(asset) invoke_code = build_native_invoke_code(contract_address, b'\x00', "balanceOf", raw_address) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) try: balance = ContractDataParser.to_int(response['Result']) return balance except SDKException: return 0
[ "def", "query_balance", "(", "self", ",", "asset", ":", "str", ",", "b58_address", ":", "str", ")", "->", "int", ":", "raw_address", "=", "Address", ".", "b58decode", "(", "b58_address", ")", ".", "to_bytes", "(", ")", "contract_address", "=", "self", ".", "get_asset_address", "(", "asset", ")", "invoke_code", "=", "build_native_invoke_code", "(", "contract_address", ",", "b'\\x00'", ",", "\"balanceOf\"", ",", "raw_address", ")", "tx", "=", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "0", ",", "0", ",", "None", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")", "response", "=", "self", ".", "__sdk", ".", "rpc", ".", "send_raw_transaction_pre_exec", "(", "tx", ")", "try", ":", "balance", "=", "ContractDataParser", ".", "to_int", "(", "response", "[", "'Result'", "]", ")", "return", "balance", "except", "SDKException", ":", "return", "0" ]
This interface is used to query the account's ONT or ONG balance. :param asset: a string which is used to indicate which asset we want to check the balance. :param b58_address: a base58 encode account address. :return: account balance.
[ "This", "interface", "is", "used", "to", "query", "the", "account", "s", "ONT", "or", "ONG", "balance", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L36-L53
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.query_unbound_ong
def query_unbound_ong(self, base58_address: str) -> int: """ This interface is used to query the amount of account's unbound ong. :param base58_address: a base58 encode address which indicate which account's unbound ong we want to query. :return: the amount of unbound ong in the form of int. """ contract_address = self.get_asset_address('ont') unbound_ong = self.__sdk.rpc.get_allowance("ong", Address(contract_address).b58encode(), base58_address) return int(unbound_ong)
python
def query_unbound_ong(self, base58_address: str) -> int: """ This interface is used to query the amount of account's unbound ong. :param base58_address: a base58 encode address which indicate which account's unbound ong we want to query. :return: the amount of unbound ong in the form of int. """ contract_address = self.get_asset_address('ont') unbound_ong = self.__sdk.rpc.get_allowance("ong", Address(contract_address).b58encode(), base58_address) return int(unbound_ong)
[ "def", "query_unbound_ong", "(", "self", ",", "base58_address", ":", "str", ")", "->", "int", ":", "contract_address", "=", "self", ".", "get_asset_address", "(", "'ont'", ")", "unbound_ong", "=", "self", ".", "__sdk", ".", "rpc", ".", "get_allowance", "(", "\"ong\"", ",", "Address", "(", "contract_address", ")", ".", "b58encode", "(", ")", ",", "base58_address", ")", "return", "int", "(", "unbound_ong", ")" ]
This interface is used to query the amount of account's unbound ong. :param base58_address: a base58 encode address which indicate which account's unbound ong we want to query. :return: the amount of unbound ong in the form of int.
[ "This", "interface", "is", "used", "to", "query", "the", "amount", "of", "account", "s", "unbound", "ong", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L76-L85
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.query_symbol
def query_symbol(self, asset: str) -> str: """ This interface is used to query the asset's symbol of ONT or ONG. :param asset: a string which is used to indicate which asset's symbol we want to get. :return: asset's symbol in the form of string. """ contract_address = self.get_asset_address(asset) method = 'symbol' invoke_code = build_native_invoke_code(contract_address, b'\x00', method, bytearray()) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) symbol = ContractDataParser.to_utf8_str(response['Result']) return symbol
python
def query_symbol(self, asset: str) -> str: """ This interface is used to query the asset's symbol of ONT or ONG. :param asset: a string which is used to indicate which asset's symbol we want to get. :return: asset's symbol in the form of string. """ contract_address = self.get_asset_address(asset) method = 'symbol' invoke_code = build_native_invoke_code(contract_address, b'\x00', method, bytearray()) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) symbol = ContractDataParser.to_utf8_str(response['Result']) return symbol
[ "def", "query_symbol", "(", "self", ",", "asset", ":", "str", ")", "->", "str", ":", "contract_address", "=", "self", ".", "get_asset_address", "(", "asset", ")", "method", "=", "'symbol'", "invoke_code", "=", "build_native_invoke_code", "(", "contract_address", ",", "b'\\x00'", ",", "method", ",", "bytearray", "(", ")", ")", "tx", "=", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "0", ",", "0", ",", "None", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")", "response", "=", "self", ".", "__sdk", ".", "rpc", ".", "send_raw_transaction_pre_exec", "(", "tx", ")", "symbol", "=", "ContractDataParser", ".", "to_utf8_str", "(", "response", "[", "'Result'", "]", ")", "return", "symbol" ]
This interface is used to query the asset's symbol of ONT or ONG. :param asset: a string which is used to indicate which asset's symbol we want to get. :return: asset's symbol in the form of string.
[ "This", "interface", "is", "used", "to", "query", "the", "asset", "s", "symbol", "of", "ONT", "or", "ONG", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L103-L116
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.query_decimals
def query_decimals(self, asset: str) -> int: """ This interface is used to query the asset's decimals of ONT or ONG. :param asset: a string which is used to indicate which asset's decimals we want to get :return: asset's decimals in the form of int """ contract_address = self.get_asset_address(asset) invoke_code = build_native_invoke_code(contract_address, b'\x00', 'decimals', bytearray()) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) try: decimal = ContractDataParser.to_int(response['Result']) return decimal except SDKException: return 0
python
def query_decimals(self, asset: str) -> int: """ This interface is used to query the asset's decimals of ONT or ONG. :param asset: a string which is used to indicate which asset's decimals we want to get :return: asset's decimals in the form of int """ contract_address = self.get_asset_address(asset) invoke_code = build_native_invoke_code(contract_address, b'\x00', 'decimals', bytearray()) tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list()) response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx) try: decimal = ContractDataParser.to_int(response['Result']) return decimal except SDKException: return 0
[ "def", "query_decimals", "(", "self", ",", "asset", ":", "str", ")", "->", "int", ":", "contract_address", "=", "self", ".", "get_asset_address", "(", "asset", ")", "invoke_code", "=", "build_native_invoke_code", "(", "contract_address", ",", "b'\\x00'", ",", "'decimals'", ",", "bytearray", "(", ")", ")", "tx", "=", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "0", ",", "0", ",", "None", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")", "response", "=", "self", ".", "__sdk", ".", "rpc", ".", "send_raw_transaction_pre_exec", "(", "tx", ")", "try", ":", "decimal", "=", "ContractDataParser", ".", "to_int", "(", "response", "[", "'Result'", "]", ")", "return", "decimal", "except", "SDKException", ":", "return", "0" ]
This interface is used to query the asset's decimals of ONT or ONG. :param asset: a string which is used to indicate which asset's decimals we want to get :return: asset's decimals in the form of int
[ "This", "interface", "is", "used", "to", "query", "the", "asset", "s", "decimals", "of", "ONT", "or", "ONG", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L118-L133
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.new_transfer_transaction
def new_transfer_transaction(self, asset: str, b58_from_address: str, b58_to_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object for transfer. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for transfer. """ if not isinstance(b58_from_address, str) or not isinstance(b58_to_address, str) or not isinstance( b58_payer_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_from_address) != 34 or len(b58_to_address) != 34 or len(b58_payer_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_from = Address.b58decode(b58_from_address).to_bytes() raw_to = Address.b58decode(b58_to_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() state = [{"from": raw_from, "to": raw_to, "amount": amount}] invoke_code = build_native_invoke_code(contract_address, b'\x00', "transfer", state) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
python
def new_transfer_transaction(self, asset: str, b58_from_address: str, b58_to_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object for transfer. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for transfer. """ if not isinstance(b58_from_address, str) or not isinstance(b58_to_address, str) or not isinstance( b58_payer_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_from_address) != 34 or len(b58_to_address) != 34 or len(b58_payer_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_from = Address.b58decode(b58_from_address).to_bytes() raw_to = Address.b58decode(b58_to_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() state = [{"from": raw_from, "to": raw_to, "amount": amount}] invoke_code = build_native_invoke_code(contract_address, b'\x00', "transfer", state) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
[ "def", "new_transfer_transaction", "(", "self", ",", "asset", ":", "str", ",", "b58_from_address", ":", "str", ",", "b58_to_address", ":", "str", ",", "amount", ":", "int", ",", "b58_payer_address", ":", "str", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", "->", "Transaction", ":", "if", "not", "isinstance", "(", "b58_from_address", ",", "str", ")", "or", "not", "isinstance", "(", "b58_to_address", ",", "str", ")", "or", "not", "isinstance", "(", "b58_payer_address", ",", "str", ")", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the data type of base58 encode address should be the string.'", ")", ")", "if", "len", "(", "b58_from_address", ")", "!=", "34", "or", "len", "(", "b58_to_address", ")", "!=", "34", "or", "len", "(", "b58_payer_address", ")", "!=", "34", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the length of base58 encode address should be 34 bytes.'", ")", ")", "if", "amount", "<=", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the amount should be greater than than zero.'", ")", ")", "if", "gas_price", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas price should be equal or greater than zero.'", ")", ")", "if", "gas_limit", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas limit should be equal or greater than zero.'", ")", ")", "contract_address", "=", "self", ".", "get_asset_address", "(", "asset", ")", "raw_from", "=", "Address", ".", "b58decode", "(", "b58_from_address", ")", ".", "to_bytes", "(", ")", "raw_to", "=", "Address", ".", "b58decode", "(", "b58_to_address", ")", ".", "to_bytes", "(", ")", "raw_payer", "=", "Address", ".", "b58decode", "(", "b58_payer_address", ")", ".", "to_bytes", "(", ")", "state", "=", "[", "{", "\"from\"", ":", "raw_from", ",", "\"to\"", ":", "raw_to", ",", "\"amount\"", ":", "amount", "}", "]", "invoke_code", "=", "build_native_invoke_code", "(", "contract_address", ",", "b'\\x00'", ",", "\"transfer\"", ",", "state", ")", "return", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "gas_price", ",", "gas_limit", ",", "raw_payer", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")" ]
This interface is used to generate a Transaction object for transfer. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for transfer.
[ "This", "interface", "is", "used", "to", "generate", "a", "Transaction", "object", "for", "transfer", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L135-L166
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.new_approve_transaction
def new_approve_transaction(self, asset: str, b58_send_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object for approve. :param asset: a string which is used to indicate which asset we want to approve. :param b58_send_address: a base58 encode address which indicate where the approve from. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset that will be approved. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for approve. """ if not isinstance(b58_send_address, str) or not isinstance(b58_recv_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_send_address) != 34 or len(b58_recv_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_send = Address.b58decode(b58_send_address).to_bytes() raw_recv = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() args = {"from": raw_send, "to": raw_recv, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', 'approve', args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
python
def new_approve_transaction(self, asset: str, b58_send_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object for approve. :param asset: a string which is used to indicate which asset we want to approve. :param b58_send_address: a base58 encode address which indicate where the approve from. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset that will be approved. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for approve. """ if not isinstance(b58_send_address, str) or not isinstance(b58_recv_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_send_address) != 34 or len(b58_recv_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) contract_address = self.get_asset_address(asset) raw_send = Address.b58decode(b58_send_address).to_bytes() raw_recv = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() args = {"from": raw_send, "to": raw_recv, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', 'approve', args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
[ "def", "new_approve_transaction", "(", "self", ",", "asset", ":", "str", ",", "b58_send_address", ":", "str", ",", "b58_recv_address", ":", "str", ",", "amount", ":", "int", ",", "b58_payer_address", ":", "str", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", "->", "Transaction", ":", "if", "not", "isinstance", "(", "b58_send_address", ",", "str", ")", "or", "not", "isinstance", "(", "b58_recv_address", ",", "str", ")", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the data type of base58 encode address should be the string.'", ")", ")", "if", "len", "(", "b58_send_address", ")", "!=", "34", "or", "len", "(", "b58_recv_address", ")", "!=", "34", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the length of base58 encode address should be 34 bytes.'", ")", ")", "if", "amount", "<=", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the amount should be greater than than zero.'", ")", ")", "if", "gas_price", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas price should be equal or greater than zero.'", ")", ")", "if", "gas_limit", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas limit should be equal or greater than zero.'", ")", ")", "contract_address", "=", "self", ".", "get_asset_address", "(", "asset", ")", "raw_send", "=", "Address", ".", "b58decode", "(", "b58_send_address", ")", ".", "to_bytes", "(", ")", "raw_recv", "=", "Address", ".", "b58decode", "(", "b58_recv_address", ")", ".", "to_bytes", "(", ")", "raw_payer", "=", "Address", ".", "b58decode", "(", "b58_payer_address", ")", ".", "to_bytes", "(", ")", "args", "=", "{", "\"from\"", ":", "raw_send", ",", "\"to\"", ":", "raw_recv", ",", "\"amount\"", ":", "amount", "}", "invoke_code", "=", "build_native_invoke_code", "(", "contract_address", ",", "b'\\x00'", ",", "'approve'", ",", "args", ")", "return", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "gas_price", ",", "gas_limit", ",", "raw_payer", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")" ]
This interface is used to generate a Transaction object for approve. :param asset: a string which is used to indicate which asset we want to approve. :param b58_send_address: a base58 encode address which indicate where the approve from. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset that will be approved. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for approve.
[ "This", "interface", "is", "used", "to", "generate", "a", "Transaction", "object", "for", "approve", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L168-L198
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.new_transfer_from_transaction
def new_transfer_from_transaction(self, asset: str, b58_send_address: str, b58_from_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object that allow one account to transfer a amount of ONT or ONG Asset to another account, in the condition of the first account had been approved. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_send_address: a base58 encode address which indicate where the asset from. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_recv_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which allow one account to transfer a amount of asset to another account. """ raw_sender = Address.b58decode(b58_send_address).to_bytes() raw_from = Address.b58decode(b58_from_address).to_bytes() raw_to = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() contract_address = self.get_asset_address(asset) args = {"sender": raw_sender, "from": raw_from, "to": raw_to, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', "transferFrom", args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
python
def new_transfer_from_transaction(self, asset: str, b58_send_address: str, b58_from_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object that allow one account to transfer a amount of ONT or ONG Asset to another account, in the condition of the first account had been approved. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_send_address: a base58 encode address which indicate where the asset from. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_recv_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which allow one account to transfer a amount of asset to another account. """ raw_sender = Address.b58decode(b58_send_address).to_bytes() raw_from = Address.b58decode(b58_from_address).to_bytes() raw_to = Address.b58decode(b58_recv_address).to_bytes() raw_payer = Address.b58decode(b58_payer_address).to_bytes() contract_address = self.get_asset_address(asset) args = {"sender": raw_sender, "from": raw_from, "to": raw_to, "amount": amount} invoke_code = build_native_invoke_code(contract_address, b'\x00', "transferFrom", args) return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, raw_payer, invoke_code, bytearray(), list())
[ "def", "new_transfer_from_transaction", "(", "self", ",", "asset", ":", "str", ",", "b58_send_address", ":", "str", ",", "b58_from_address", ":", "str", ",", "b58_recv_address", ":", "str", ",", "amount", ":", "int", ",", "b58_payer_address", ":", "str", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", "->", "Transaction", ":", "raw_sender", "=", "Address", ".", "b58decode", "(", "b58_send_address", ")", ".", "to_bytes", "(", ")", "raw_from", "=", "Address", ".", "b58decode", "(", "b58_from_address", ")", ".", "to_bytes", "(", ")", "raw_to", "=", "Address", ".", "b58decode", "(", "b58_recv_address", ")", ".", "to_bytes", "(", ")", "raw_payer", "=", "Address", ".", "b58decode", "(", "b58_payer_address", ")", ".", "to_bytes", "(", ")", "contract_address", "=", "self", ".", "get_asset_address", "(", "asset", ")", "args", "=", "{", "\"sender\"", ":", "raw_sender", ",", "\"from\"", ":", "raw_from", ",", "\"to\"", ":", "raw_to", ",", "\"amount\"", ":", "amount", "}", "invoke_code", "=", "build_native_invoke_code", "(", "contract_address", ",", "b'\\x00'", ",", "\"transferFrom\"", ",", "args", ")", "return", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "gas_price", ",", "gas_limit", ",", "raw_payer", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")" ]
This interface is used to generate a Transaction object that allow one account to transfer a amount of ONT or ONG Asset to another account, in the condition of the first account had been approved. :param asset: a string which is used to indicate which asset we want to transfer. :param b58_send_address: a base58 encode address which indicate where the asset from. :param b58_from_address: a base58 encode address which indicate where the asset from. :param b58_recv_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which allow one account to transfer a amount of asset to another account.
[ "This", "interface", "is", "used", "to", "generate", "a", "Transaction", "object", "that", "allow", "one", "account", "to", "transfer", "a", "amount", "of", "ONT", "or", "ONG", "Asset", "to", "another", "account", "in", "the", "condition", "of", "the", "first", "account", "had", "been", "approved", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L200-L224
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.new_withdraw_ong_transaction
def new_withdraw_ong_transaction(self, b58_claimer_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object that allow one account to withdraw an amount of ong and transfer them to receive address. :param b58_claimer_address: a base58 encode address which is used to indicate who is the claimer. :param b58_recv_address: a base58 encode address which is used to indicate who receive the claimed ong. :param amount: the amount of asset that will be claimed. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for withdraw ong. """ if not isinstance(b58_claimer_address, str) or not isinstance(b58_recv_address, str) or not isinstance( b58_payer_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_claimer_address) != 34 or len(b58_recv_address) != 34 or len(b58_payer_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) ont_contract_address = self.get_asset_address('ont') ong_contract_address = self.get_asset_address("ong") args = {"sender": Address.b58decode(b58_claimer_address).to_bytes(), "from": ont_contract_address, "to": Address.b58decode(b58_recv_address).to_bytes(), "value": amount} invoke_code = build_native_invoke_code(ong_contract_address, b'\x00', "transferFrom", args) payer_array = Address.b58decode(b58_payer_address).to_bytes() return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, payer_array, invoke_code, bytearray(), list())
python
def new_withdraw_ong_transaction(self, b58_claimer_address: str, b58_recv_address: str, amount: int, b58_payer_address: str, gas_limit: int, gas_price: int) -> Transaction: """ This interface is used to generate a Transaction object that allow one account to withdraw an amount of ong and transfer them to receive address. :param b58_claimer_address: a base58 encode address which is used to indicate who is the claimer. :param b58_recv_address: a base58 encode address which is used to indicate who receive the claimed ong. :param amount: the amount of asset that will be claimed. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for withdraw ong. """ if not isinstance(b58_claimer_address, str) or not isinstance(b58_recv_address, str) or not isinstance( b58_payer_address, str): raise SDKException(ErrorCode.param_err('the data type of base58 encode address should be the string.')) if len(b58_claimer_address) != 34 or len(b58_recv_address) != 34 or len(b58_payer_address) != 34: raise SDKException(ErrorCode.param_err('the length of base58 encode address should be 34 bytes.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) ont_contract_address = self.get_asset_address('ont') ong_contract_address = self.get_asset_address("ong") args = {"sender": Address.b58decode(b58_claimer_address).to_bytes(), "from": ont_contract_address, "to": Address.b58decode(b58_recv_address).to_bytes(), "value": amount} invoke_code = build_native_invoke_code(ong_contract_address, b'\x00', "transferFrom", args) payer_array = Address.b58decode(b58_payer_address).to_bytes() return Transaction(0, 0xd1, int(time()), gas_price, gas_limit, payer_array, invoke_code, bytearray(), list())
[ "def", "new_withdraw_ong_transaction", "(", "self", ",", "b58_claimer_address", ":", "str", ",", "b58_recv_address", ":", "str", ",", "amount", ":", "int", ",", "b58_payer_address", ":", "str", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", "->", "Transaction", ":", "if", "not", "isinstance", "(", "b58_claimer_address", ",", "str", ")", "or", "not", "isinstance", "(", "b58_recv_address", ",", "str", ")", "or", "not", "isinstance", "(", "b58_payer_address", ",", "str", ")", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the data type of base58 encode address should be the string.'", ")", ")", "if", "len", "(", "b58_claimer_address", ")", "!=", "34", "or", "len", "(", "b58_recv_address", ")", "!=", "34", "or", "len", "(", "b58_payer_address", ")", "!=", "34", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the length of base58 encode address should be 34 bytes.'", ")", ")", "if", "amount", "<=", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the amount should be greater than than zero.'", ")", ")", "if", "gas_price", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas price should be equal or greater than zero.'", ")", ")", "if", "gas_limit", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas limit should be equal or greater than zero.'", ")", ")", "ont_contract_address", "=", "self", ".", "get_asset_address", "(", "'ont'", ")", "ong_contract_address", "=", "self", ".", "get_asset_address", "(", "\"ong\"", ")", "args", "=", "{", "\"sender\"", ":", "Address", ".", "b58decode", "(", "b58_claimer_address", ")", ".", "to_bytes", "(", ")", ",", "\"from\"", ":", "ont_contract_address", ",", "\"to\"", ":", "Address", ".", "b58decode", "(", "b58_recv_address", ")", ".", "to_bytes", "(", ")", ",", "\"value\"", ":", "amount", "}", "invoke_code", "=", "build_native_invoke_code", "(", "ong_contract_address", ",", "b'\\x00'", ",", "\"transferFrom\"", ",", "args", ")", "payer_array", "=", "Address", ".", "b58decode", "(", "b58_payer_address", ")", ".", "to_bytes", "(", ")", "return", "Transaction", "(", "0", ",", "0xd1", ",", "int", "(", "time", "(", ")", ")", ",", "gas_price", ",", "gas_limit", ",", "payer_array", ",", "invoke_code", ",", "bytearray", "(", ")", ",", "list", "(", ")", ")" ]
This interface is used to generate a Transaction object that allow one account to withdraw an amount of ong and transfer them to receive address. :param b58_claimer_address: a base58 encode address which is used to indicate who is the claimer. :param b58_recv_address: a base58 encode address which is used to indicate who receive the claimed ong. :param amount: the amount of asset that will be claimed. :param b58_payer_address: a base58 encode address which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: a Transaction object which can be used for withdraw ong.
[ "This", "interface", "is", "used", "to", "generate", "a", "Transaction", "object", "that", "allow", "one", "account", "to", "withdraw", "an", "amount", "of", "ong", "and", "transfer", "them", "to", "receive", "address", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L226-L257
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.transfer
def transfer(self, asset: str, from_acct: Account, b58_to_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int): """ This interface is used to send a transfer transaction that only for ONT or ONG. :param asset: a string which is used to indicate which asset we want to transfer. :param from_acct: a Account object which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param payer: a Account object which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value. """ tx = self.new_transfer_transaction(asset, from_acct.get_address_base58(), b58_to_address, amount, payer.get_address_base58(), gas_limit, gas_price) tx.sign_transaction(from_acct) if from_acct.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
python
def transfer(self, asset: str, from_acct: Account, b58_to_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int): """ This interface is used to send a transfer transaction that only for ONT or ONG. :param asset: a string which is used to indicate which asset we want to transfer. :param from_acct: a Account object which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param payer: a Account object which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value. """ tx = self.new_transfer_transaction(asset, from_acct.get_address_base58(), b58_to_address, amount, payer.get_address_base58(), gas_limit, gas_price) tx.sign_transaction(from_acct) if from_acct.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
[ "def", "transfer", "(", "self", ",", "asset", ":", "str", ",", "from_acct", ":", "Account", ",", "b58_to_address", ":", "str", ",", "amount", ":", "int", ",", "payer", ":", "Account", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", ":", "tx", "=", "self", ".", "new_transfer_transaction", "(", "asset", ",", "from_acct", ".", "get_address_base58", "(", ")", ",", "b58_to_address", ",", "amount", ",", "payer", ".", "get_address_base58", "(", ")", ",", "gas_limit", ",", "gas_price", ")", "tx", ".", "sign_transaction", "(", "from_acct", ")", "if", "from_acct", ".", "get_address_base58", "(", ")", "!=", "payer", ".", "get_address_base58", "(", ")", ":", "tx", ".", "add_sign_transaction", "(", "payer", ")", "return", "self", ".", "__sdk", ".", "get_network", "(", ")", ".", "send_raw_transaction", "(", "tx", ")" ]
This interface is used to send a transfer transaction that only for ONT or ONG. :param asset: a string which is used to indicate which asset we want to transfer. :param from_acct: a Account object which indicate where the asset from. :param b58_to_address: a base58 encode address which indicate where the asset to. :param amount: the amount of asset that will be transferred. :param payer: a Account object which indicate who will pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value.
[ "This", "interface", "is", "used", "to", "send", "a", "transfer", "transaction", "that", "only", "for", "ONT", "or", "ONG", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L259-L278
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.withdraw_ong
def withdraw_ong(self, claimer: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int) -> str: """ This interface is used to withdraw a amount of ong and transfer them to receive address. :param claimer: the owner of ong that remained to claim. :param b58_recv_address: the address that received the ong. :param amount: the amount of ong want to claim. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value. """ if claimer is None: raise SDKException(ErrorCode.param_err('the claimer should not be None.')) if payer is None: raise SDKException(ErrorCode.param_err('the payer should not be None.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) b58_claimer = claimer.get_address_base58() b58_payer = payer.get_address_base58() tx = self.new_withdraw_ong_transaction(b58_claimer, b58_recv_address, amount, b58_payer, gas_limit, gas_price) tx.sign_transaction(claimer) if claimer.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
python
def withdraw_ong(self, claimer: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int) -> str: """ This interface is used to withdraw a amount of ong and transfer them to receive address. :param claimer: the owner of ong that remained to claim. :param b58_recv_address: the address that received the ong. :param amount: the amount of ong want to claim. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value. """ if claimer is None: raise SDKException(ErrorCode.param_err('the claimer should not be None.')) if payer is None: raise SDKException(ErrorCode.param_err('the payer should not be None.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) b58_claimer = claimer.get_address_base58() b58_payer = payer.get_address_base58() tx = self.new_withdraw_ong_transaction(b58_claimer, b58_recv_address, amount, b58_payer, gas_limit, gas_price) tx.sign_transaction(claimer) if claimer.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
[ "def", "withdraw_ong", "(", "self", ",", "claimer", ":", "Account", ",", "b58_recv_address", ":", "str", ",", "amount", ":", "int", ",", "payer", ":", "Account", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", "->", "str", ":", "if", "claimer", "is", "None", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the claimer should not be None.'", ")", ")", "if", "payer", "is", "None", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the payer should not be None.'", ")", ")", "if", "amount", "<=", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the amount should be greater than than zero.'", ")", ")", "if", "gas_price", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas price should be equal or greater than zero.'", ")", ")", "if", "gas_limit", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas limit should be equal or greater than zero.'", ")", ")", "b58_claimer", "=", "claimer", ".", "get_address_base58", "(", ")", "b58_payer", "=", "payer", ".", "get_address_base58", "(", ")", "tx", "=", "self", ".", "new_withdraw_ong_transaction", "(", "b58_claimer", ",", "b58_recv_address", ",", "amount", ",", "b58_payer", ",", "gas_limit", ",", "gas_price", ")", "tx", ".", "sign_transaction", "(", "claimer", ")", "if", "claimer", ".", "get_address_base58", "(", ")", "!=", "payer", ".", "get_address_base58", "(", ")", ":", "tx", ".", "add_sign_transaction", "(", "payer", ")", "return", "self", ".", "__sdk", ".", "get_network", "(", ")", ".", "send_raw_transaction", "(", "tx", ")" ]
This interface is used to withdraw a amount of ong and transfer them to receive address. :param claimer: the owner of ong that remained to claim. :param b58_recv_address: the address that received the ong. :param amount: the amount of ong want to claim. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value.
[ "This", "interface", "is", "used", "to", "withdraw", "a", "amount", "of", "ong", "and", "transfer", "them", "to", "receive", "address", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L280-L309
ontio/ontology-python-sdk
ontology/smart_contract/native_contract/asset.py
Asset.approve
def approve(self, asset, sender: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int) -> str: """ This is an interface used to send an approve transaction which allow receiver to spend a amount of ONT or ONG asset in sender's account. :param asset: a string which is used to indicate what asset we want to approve. :param sender: an Account class that send the approve transaction. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset want to approve. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value. """ if sender is None: raise SDKException(ErrorCode.param_err('the sender should not be None.')) if payer is None: raise SDKException(ErrorCode.param_err('the payer should not be None.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) b58_sender_address = sender.get_address_base58() b58_payer_address = payer.get_address_base58() tx = self.new_approve_transaction(asset, b58_sender_address, b58_recv_address, amount, b58_payer_address, gas_limit, gas_price) tx.sign_transaction(sender) if sender.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
python
def approve(self, asset, sender: Account, b58_recv_address: str, amount: int, payer: Account, gas_limit: int, gas_price: int) -> str: """ This is an interface used to send an approve transaction which allow receiver to spend a amount of ONT or ONG asset in sender's account. :param asset: a string which is used to indicate what asset we want to approve. :param sender: an Account class that send the approve transaction. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset want to approve. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value. """ if sender is None: raise SDKException(ErrorCode.param_err('the sender should not be None.')) if payer is None: raise SDKException(ErrorCode.param_err('the payer should not be None.')) if amount <= 0: raise SDKException(ErrorCode.other_error('the amount should be greater than than zero.')) if gas_price < 0: raise SDKException(ErrorCode.other_error('the gas price should be equal or greater than zero.')) if gas_limit < 0: raise SDKException(ErrorCode.other_error('the gas limit should be equal or greater than zero.')) b58_sender_address = sender.get_address_base58() b58_payer_address = payer.get_address_base58() tx = self.new_approve_transaction(asset, b58_sender_address, b58_recv_address, amount, b58_payer_address, gas_limit, gas_price) tx.sign_transaction(sender) if sender.get_address_base58() != payer.get_address_base58(): tx.add_sign_transaction(payer) return self.__sdk.get_network().send_raw_transaction(tx)
[ "def", "approve", "(", "self", ",", "asset", ",", "sender", ":", "Account", ",", "b58_recv_address", ":", "str", ",", "amount", ":", "int", ",", "payer", ":", "Account", ",", "gas_limit", ":", "int", ",", "gas_price", ":", "int", ")", "->", "str", ":", "if", "sender", "is", "None", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the sender should not be None.'", ")", ")", "if", "payer", "is", "None", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "'the payer should not be None.'", ")", ")", "if", "amount", "<=", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the amount should be greater than than zero.'", ")", ")", "if", "gas_price", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas price should be equal or greater than zero.'", ")", ")", "if", "gas_limit", "<", "0", ":", "raise", "SDKException", "(", "ErrorCode", ".", "other_error", "(", "'the gas limit should be equal or greater than zero.'", ")", ")", "b58_sender_address", "=", "sender", ".", "get_address_base58", "(", ")", "b58_payer_address", "=", "payer", ".", "get_address_base58", "(", ")", "tx", "=", "self", ".", "new_approve_transaction", "(", "asset", ",", "b58_sender_address", ",", "b58_recv_address", ",", "amount", ",", "b58_payer_address", ",", "gas_limit", ",", "gas_price", ")", "tx", ".", "sign_transaction", "(", "sender", ")", "if", "sender", ".", "get_address_base58", "(", ")", "!=", "payer", ".", "get_address_base58", "(", ")", ":", "tx", ".", "add_sign_transaction", "(", "payer", ")", "return", "self", ".", "__sdk", ".", "get_network", "(", ")", ".", "send_raw_transaction", "(", "tx", ")" ]
This is an interface used to send an approve transaction which allow receiver to spend a amount of ONT or ONG asset in sender's account. :param asset: a string which is used to indicate what asset we want to approve. :param sender: an Account class that send the approve transaction. :param b58_recv_address: a base58 encode address which indicate where the approve to. :param amount: the amount of asset want to approve. :param payer: an Account class that used to pay for the transaction. :param gas_limit: an int value that indicate the gas limit. :param gas_price: an int value that indicate the gas price. :return: hexadecimal transaction hash value.
[ "This", "is", "an", "interface", "used", "to", "send", "an", "approve", "transaction", "which", "allow", "receiver", "to", "spend", "a", "amount", "of", "ONT", "or", "ONG", "asset", "in", "sender", "s", "account", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/smart_contract/native_contract/asset.py#L311-L343
ontio/ontology-python-sdk
ontology/wallet/wallet.py
WalletData.remove_account
def remove_account(self, address: str): """ This interface is used to remove account from WalletData. :param address: a string address. """ account = self.get_account_by_b58_address(address) if account is None: raise SDKException(ErrorCode.get_account_by_address_err) self.accounts.remove(account)
python
def remove_account(self, address: str): """ This interface is used to remove account from WalletData. :param address: a string address. """ account = self.get_account_by_b58_address(address) if account is None: raise SDKException(ErrorCode.get_account_by_address_err) self.accounts.remove(account)
[ "def", "remove_account", "(", "self", ",", "address", ":", "str", ")", ":", "account", "=", "self", ".", "get_account_by_b58_address", "(", "address", ")", "if", "account", "is", "None", ":", "raise", "SDKException", "(", "ErrorCode", ".", "get_account_by_address_err", ")", "self", ".", "accounts", ".", "remove", "(", "account", ")" ]
This interface is used to remove account from WalletData. :param address: a string address.
[ "This", "interface", "is", "used", "to", "remove", "account", "from", "WalletData", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/wallet/wallet.py#L101-L110
ontio/ontology-python-sdk
ontology/wallet/wallet.py
WalletData.set_default_account_by_index
def set_default_account_by_index(self, index: int): """ This interface is used to set default account by given index. :param index: an int value that indicate the account object in account list. """ if index >= len(self.accounts): raise SDKException(ErrorCode.param_error) for acct in self.accounts: acct.is_default = False self.accounts[index].is_default = True self.default_account_address = self.accounts[index].b58_address
python
def set_default_account_by_index(self, index: int): """ This interface is used to set default account by given index. :param index: an int value that indicate the account object in account list. """ if index >= len(self.accounts): raise SDKException(ErrorCode.param_error) for acct in self.accounts: acct.is_default = False self.accounts[index].is_default = True self.default_account_address = self.accounts[index].b58_address
[ "def", "set_default_account_by_index", "(", "self", ",", "index", ":", "int", ")", ":", "if", "index", ">=", "len", "(", "self", ".", "accounts", ")", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_error", ")", "for", "acct", "in", "self", ".", "accounts", ":", "acct", ".", "is_default", "=", "False", "self", ".", "accounts", "[", "index", "]", ".", "is_default", "=", "True", "self", ".", "default_account_address", "=", "self", ".", "accounts", "[", "index", "]", ".", "b58_address" ]
This interface is used to set default account by given index. :param index: an int value that indicate the account object in account list.
[ "This", "interface", "is", "used", "to", "set", "default", "account", "by", "given", "index", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/wallet/wallet.py#L120-L131
ontio/ontology-python-sdk
ontology/wallet/wallet.py
WalletData.set_default_account_by_address
def set_default_account_by_address(self, b58_address: str): """ This interface is used to set default account by given base58 encode address. :param b58_address: a base58 encode address. """ flag = True index = -1 for acct in self.accounts: index += 1 if acct.b58_address == b58_address: flag = False break if flag: raise SDKException(ErrorCode.get_account_by_address_err) for i in range(len(self.accounts)): self.accounts[i].is_default = False self.accounts[index].is_default = True self.default_account_address = b58_address
python
def set_default_account_by_address(self, b58_address: str): """ This interface is used to set default account by given base58 encode address. :param b58_address: a base58 encode address. """ flag = True index = -1 for acct in self.accounts: index += 1 if acct.b58_address == b58_address: flag = False break if flag: raise SDKException(ErrorCode.get_account_by_address_err) for i in range(len(self.accounts)): self.accounts[i].is_default = False self.accounts[index].is_default = True self.default_account_address = b58_address
[ "def", "set_default_account_by_address", "(", "self", ",", "b58_address", ":", "str", ")", ":", "flag", "=", "True", "index", "=", "-", "1", "for", "acct", "in", "self", ".", "accounts", ":", "index", "+=", "1", "if", "acct", ".", "b58_address", "==", "b58_address", ":", "flag", "=", "False", "break", "if", "flag", ":", "raise", "SDKException", "(", "ErrorCode", ".", "get_account_by_address_err", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "accounts", ")", ")", ":", "self", ".", "accounts", "[", "i", "]", ".", "is_default", "=", "False", "self", ".", "accounts", "[", "index", "]", ".", "is_default", "=", "True", "self", ".", "default_account_address", "=", "b58_address" ]
This interface is used to set default account by given base58 encode address. :param b58_address: a base58 encode address.
[ "This", "interface", "is", "used", "to", "set", "default", "account", "by", "given", "base58", "encode", "address", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/wallet/wallet.py#L133-L151
ontio/ontology-python-sdk
ontology/wallet/wallet.py
WalletData.set_default_identity_by_index
def set_default_identity_by_index(self, index: int): """ This interface is used to set default account by given an index value. :param index: an int value that indicate the position of an account object in account list. """ identities_len = len(self.identities) if index >= identities_len: raise SDKException(ErrorCode.param_error) for i in range(identities_len): self.identities[i].is_default = False if i == index: self.identities[index].is_default = True
python
def set_default_identity_by_index(self, index: int): """ This interface is used to set default account by given an index value. :param index: an int value that indicate the position of an account object in account list. """ identities_len = len(self.identities) if index >= identities_len: raise SDKException(ErrorCode.param_error) for i in range(identities_len): self.identities[i].is_default = False if i == index: self.identities[index].is_default = True
[ "def", "set_default_identity_by_index", "(", "self", ",", "index", ":", "int", ")", ":", "identities_len", "=", "len", "(", "self", ".", "identities", ")", "if", "index", ">=", "identities_len", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_error", ")", "for", "i", "in", "range", "(", "identities_len", ")", ":", "self", ".", "identities", "[", "i", "]", ".", "is_default", "=", "False", "if", "i", "==", "index", ":", "self", ".", "identities", "[", "index", "]", ".", "is_default", "=", "True" ]
This interface is used to set default account by given an index value. :param index: an int value that indicate the position of an account object in account list.
[ "This", "interface", "is", "used", "to", "set", "default", "account", "by", "given", "an", "index", "value", "." ]
train
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/wallet/wallet.py#L230-L242
vicenteneto/python-cartolafc
cartolafc/api.py
Api.set_credentials
def set_credentials(self, email, password): """ Realiza a autenticação no sistema do CartolaFC utilizando o email e password informados. Args: email (str): O email do usuário password (str): A senha do usuário Raises: cartolafc.CartolaFCError: Se o conjunto (email, password) não conseguiu realizar a autenticação com sucesso. """ self._email = email self._password = password response = requests.post(self._auth_url, json=dict(payload=dict(email=self._email, password=self._password, serviceId=4728))) body = response.json() if response.status_code == codes.ok: self._glb_id = body['glbId'] else: raise CartolaFCError(body['userMessage'])
python
def set_credentials(self, email, password): """ Realiza a autenticação no sistema do CartolaFC utilizando o email e password informados. Args: email (str): O email do usuário password (str): A senha do usuário Raises: cartolafc.CartolaFCError: Se o conjunto (email, password) não conseguiu realizar a autenticação com sucesso. """ self._email = email self._password = password response = requests.post(self._auth_url, json=dict(payload=dict(email=self._email, password=self._password, serviceId=4728))) body = response.json() if response.status_code == codes.ok: self._glb_id = body['glbId'] else: raise CartolaFCError(body['userMessage'])
[ "def", "set_credentials", "(", "self", ",", "email", ",", "password", ")", ":", "self", ".", "_email", "=", "email", "self", ".", "_password", "=", "password", "response", "=", "requests", ".", "post", "(", "self", ".", "_auth_url", ",", "json", "=", "dict", "(", "payload", "=", "dict", "(", "email", "=", "self", ".", "_email", ",", "password", "=", "self", ".", "_password", ",", "serviceId", "=", "4728", ")", ")", ")", "body", "=", "response", ".", "json", "(", ")", "if", "response", ".", "status_code", "==", "codes", ".", "ok", ":", "self", ".", "_glb_id", "=", "body", "[", "'glbId'", "]", "else", ":", "raise", "CartolaFCError", "(", "body", "[", "'userMessage'", "]", ")" ]
Realiza a autenticação no sistema do CartolaFC utilizando o email e password informados. Args: email (str): O email do usuário password (str): A senha do usuário Raises: cartolafc.CartolaFCError: Se o conjunto (email, password) não conseguiu realizar a autenticação com sucesso.
[ "Realiza", "a", "autenticação", "no", "sistema", "do", "CartolaFC", "utilizando", "o", "email", "e", "password", "informados", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L91-L110
vicenteneto/python-cartolafc
cartolafc/api.py
Api.set_redis
def set_redis(self, redis_url, redis_timeout=10): """ Realiza a autenticação no servidor Redis utilizando a URL informada. Args: redis_url (str): URL para conectar ao servidor Redis, exemplo: redis://user:password@localhost:6379/2. redis_timeout (int): O timeout padrão (em segundos). kwargs (dict): Raises: cartolafc.CartolaFCError: Se não for possível se conectar ao servidor Redis """ self._redis_url = redis_url self._redis_timeout = redis_timeout if isinstance(redis_timeout, int) and redis_timeout > 0 else 10 try: self._redis = redis.StrictRedis.from_url(url=redis_url) self._redis.ping() except (ConnectionError, TimeoutError): raise CartolaFCError('Erro conectando ao servidor Redis.')
python
def set_redis(self, redis_url, redis_timeout=10): """ Realiza a autenticação no servidor Redis utilizando a URL informada. Args: redis_url (str): URL para conectar ao servidor Redis, exemplo: redis://user:password@localhost:6379/2. redis_timeout (int): O timeout padrão (em segundos). kwargs (dict): Raises: cartolafc.CartolaFCError: Se não for possível se conectar ao servidor Redis """ self._redis_url = redis_url self._redis_timeout = redis_timeout if isinstance(redis_timeout, int) and redis_timeout > 0 else 10 try: self._redis = redis.StrictRedis.from_url(url=redis_url) self._redis.ping() except (ConnectionError, TimeoutError): raise CartolaFCError('Erro conectando ao servidor Redis.')
[ "def", "set_redis", "(", "self", ",", "redis_url", ",", "redis_timeout", "=", "10", ")", ":", "self", ".", "_redis_url", "=", "redis_url", "self", ".", "_redis_timeout", "=", "redis_timeout", "if", "isinstance", "(", "redis_timeout", ",", "int", ")", "and", "redis_timeout", ">", "0", "else", "10", "try", ":", "self", ".", "_redis", "=", "redis", ".", "StrictRedis", ".", "from_url", "(", "url", "=", "redis_url", ")", "self", ".", "_redis", ".", "ping", "(", ")", "except", "(", "ConnectionError", ",", "TimeoutError", ")", ":", "raise", "CartolaFCError", "(", "'Erro conectando ao servidor Redis.'", ")" ]
Realiza a autenticação no servidor Redis utilizando a URL informada. Args: redis_url (str): URL para conectar ao servidor Redis, exemplo: redis://user:password@localhost:6379/2. redis_timeout (int): O timeout padrão (em segundos). kwargs (dict): Raises: cartolafc.CartolaFCError: Se não for possível se conectar ao servidor Redis
[ "Realiza", "a", "autenticação", "no", "servidor", "Redis", "utilizando", "a", "URL", "informada", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L112-L130
vicenteneto/python-cartolafc
cartolafc/api.py
Api.liga
def liga(self, nome=None, slug=None, page=1, order_by=CAMPEONATO): """ Este serviço requer que a API esteja autenticada, e realiza uma busca pelo nome ou slug informados. Este serviço obtém apenas 20 times por página, portanto, caso sua liga possua mais que 20 membros, deve-se utilizar o argumento "page" para obter mais times. Args: nome (str): Nome da liga que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* page (int): Página dos times que se deseja obter. order_by (str): É possível obter os times ordenados por "campeonato", "turno", "mes", "rodada" e "patrimonio". As constantes estão disponíveis em "cartolafc.CAMPEONATO", "cartolafc.TURNO" e assim sucessivamente. Returns: Um objeto representando a liga encontrada. Raises: CartolaFCError: Se a API não está autenticada ou se nenhuma liga foi encontrada com os dados recebidos. """ if not any((nome, slug)): raise CartolaFCError('Você precisa informar o nome ou o slug da liga que deseja obter') slug = slug if slug else convert_team_name_to_slug(nome) url = '{api_url}/auth/liga/{slug}'.format(api_url=self._api_url, slug=slug) data = self._request(url, params=dict(page=page, orderBy=order_by)) return Liga.from_dict(data, order_by)
python
def liga(self, nome=None, slug=None, page=1, order_by=CAMPEONATO): """ Este serviço requer que a API esteja autenticada, e realiza uma busca pelo nome ou slug informados. Este serviço obtém apenas 20 times por página, portanto, caso sua liga possua mais que 20 membros, deve-se utilizar o argumento "page" para obter mais times. Args: nome (str): Nome da liga que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* page (int): Página dos times que se deseja obter. order_by (str): É possível obter os times ordenados por "campeonato", "turno", "mes", "rodada" e "patrimonio". As constantes estão disponíveis em "cartolafc.CAMPEONATO", "cartolafc.TURNO" e assim sucessivamente. Returns: Um objeto representando a liga encontrada. Raises: CartolaFCError: Se a API não está autenticada ou se nenhuma liga foi encontrada com os dados recebidos. """ if not any((nome, slug)): raise CartolaFCError('Você precisa informar o nome ou o slug da liga que deseja obter') slug = slug if slug else convert_team_name_to_slug(nome) url = '{api_url}/auth/liga/{slug}'.format(api_url=self._api_url, slug=slug) data = self._request(url, params=dict(page=page, orderBy=order_by)) return Liga.from_dict(data, order_by)
[ "def", "liga", "(", "self", ",", "nome", "=", "None", ",", "slug", "=", "None", ",", "page", "=", "1", ",", "order_by", "=", "CAMPEONATO", ")", ":", "if", "not", "any", "(", "(", "nome", ",", "slug", ")", ")", ":", "raise", "CartolaFCError", "(", "'Você precisa informar o nome ou o slug da liga que deseja obter')", "", "slug", "=", "slug", "if", "slug", "else", "convert_team_name_to_slug", "(", "nome", ")", "url", "=", "'{api_url}/auth/liga/{slug}'", ".", "format", "(", "api_url", "=", "self", ".", "_api_url", ",", "slug", "=", "slug", ")", "data", "=", "self", ".", "_request", "(", "url", ",", "params", "=", "dict", "(", "page", "=", "page", ",", "orderBy", "=", "order_by", ")", ")", "return", "Liga", ".", "from_dict", "(", "data", ",", "order_by", ")" ]
Este serviço requer que a API esteja autenticada, e realiza uma busca pelo nome ou slug informados. Este serviço obtém apenas 20 times por página, portanto, caso sua liga possua mais que 20 membros, deve-se utilizar o argumento "page" para obter mais times. Args: nome (str): Nome da liga que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* page (int): Página dos times que se deseja obter. order_by (str): É possível obter os times ordenados por "campeonato", "turno", "mes", "rodada" e "patrimonio". As constantes estão disponíveis em "cartolafc.CAMPEONATO", "cartolafc.TURNO" e assim sucessivamente. Returns: Um objeto representando a liga encontrada. Raises: CartolaFCError: Se a API não está autenticada ou se nenhuma liga foi encontrada com os dados recebidos.
[ "Este", "serviço", "requer", "que", "a", "API", "esteja", "autenticada", "e", "realiza", "uma", "busca", "pelo", "nome", "ou", "slug", "informados", ".", "Este", "serviço", "obtém", "apenas", "20", "times", "por", "página", "portanto", "caso", "sua", "liga", "possua", "mais", "que", "20", "membros", "deve", "-", "se", "utilizar", "o", "argumento", "page", "para", "obter", "mais", "times", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L139-L165
vicenteneto/python-cartolafc
cartolafc/api.py
Api.ligas
def ligas(self, query): """ Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.Liga, uma para cada liga contento o termo utilizado na busca. """ url = '{api_url}/ligas'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [Liga.from_dict(liga_info) for liga_info in data]
python
def ligas(self, query): """ Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.Liga, uma para cada liga contento o termo utilizado na busca. """ url = '{api_url}/ligas'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [Liga.from_dict(liga_info) for liga_info in data]
[ "def", "ligas", "(", "self", ",", "query", ")", ":", "url", "=", "'{api_url}/ligas'", ".", "format", "(", "api_url", "=", "self", ".", "_api_url", ")", "data", "=", "self", ".", "_request", "(", "url", ",", "params", "=", "dict", "(", "q", "=", "query", ")", ")", "return", "[", "Liga", ".", "from_dict", "(", "liga_info", ")", "for", "liga_info", "in", "data", "]" ]
Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.Liga, uma para cada liga contento o termo utilizado na busca.
[ "Retorna", "o", "resultado", "da", "busca", "ao", "Cartola", "por", "um", "determinado", "termo", "de", "pesquisa", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L185-L197
vicenteneto/python-cartolafc
cartolafc/api.py
Api.mercado
def mercado(self): """ Obtém o status do mercado na rodada atual. Returns: Uma instância de cartolafc.Mercado representando o status do mercado na rodada atual. """ url = '{api_url}/mercado/status'.format(api_url=self._api_url) data = self._request(url) return Mercado.from_dict(data)
python
def mercado(self): """ Obtém o status do mercado na rodada atual. Returns: Uma instância de cartolafc.Mercado representando o status do mercado na rodada atual. """ url = '{api_url}/mercado/status'.format(api_url=self._api_url) data = self._request(url) return Mercado.from_dict(data)
[ "def", "mercado", "(", "self", ")", ":", "url", "=", "'{api_url}/mercado/status'", ".", "format", "(", "api_url", "=", "self", ".", "_api_url", ")", "data", "=", "self", ".", "_request", "(", "url", ")", "return", "Mercado", ".", "from_dict", "(", "data", ")" ]
Obtém o status do mercado na rodada atual. Returns: Uma instância de cartolafc.Mercado representando o status do mercado na rodada atual.
[ "Obtém", "o", "status", "do", "mercado", "na", "rodada", "atual", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L205-L214
vicenteneto/python-cartolafc
cartolafc/api.py
Api.parciais
def parciais(self): """ Obtém um mapa com todos os atletas que já pontuaram na rodada atual (aberta). Returns: Uma mapa, onde a key é um inteiro representando o id do atleta e o valor é uma instância de cartolafc.Atleta Raises: CartolaFCError: Se o mercado atual estiver com o status fechado. """ if self.mercado().status.id == MERCADO_FECHADO: url = '{api_url}/atletas/pontuados'.format(api_url=self._api_url) data = self._request(url) clubes = {clube['id']: Clube.from_dict(clube) for clube in data['clubes'].values()} return {int(atleta_id): Atleta.from_dict(atleta, clubes=clubes, atleta_id=int(atleta_id)) for atleta_id, atleta in data['atletas'].items() if atleta['clube_id'] > 0} raise CartolaFCError('As pontuações parciais só ficam disponíveis com o mercado fechado.')
python
def parciais(self): """ Obtém um mapa com todos os atletas que já pontuaram na rodada atual (aberta). Returns: Uma mapa, onde a key é um inteiro representando o id do atleta e o valor é uma instância de cartolafc.Atleta Raises: CartolaFCError: Se o mercado atual estiver com o status fechado. """ if self.mercado().status.id == MERCADO_FECHADO: url = '{api_url}/atletas/pontuados'.format(api_url=self._api_url) data = self._request(url) clubes = {clube['id']: Clube.from_dict(clube) for clube in data['clubes'].values()} return {int(atleta_id): Atleta.from_dict(atleta, clubes=clubes, atleta_id=int(atleta_id)) for atleta_id, atleta in data['atletas'].items() if atleta['clube_id'] > 0} raise CartolaFCError('As pontuações parciais só ficam disponíveis com o mercado fechado.')
[ "def", "parciais", "(", "self", ")", ":", "if", "self", ".", "mercado", "(", ")", ".", "status", ".", "id", "==", "MERCADO_FECHADO", ":", "url", "=", "'{api_url}/atletas/pontuados'", ".", "format", "(", "api_url", "=", "self", ".", "_api_url", ")", "data", "=", "self", ".", "_request", "(", "url", ")", "clubes", "=", "{", "clube", "[", "'id'", "]", ":", "Clube", ".", "from_dict", "(", "clube", ")", "for", "clube", "in", "data", "[", "'clubes'", "]", ".", "values", "(", ")", "}", "return", "{", "int", "(", "atleta_id", ")", ":", "Atleta", ".", "from_dict", "(", "atleta", ",", "clubes", "=", "clubes", ",", "atleta_id", "=", "int", "(", "atleta_id", ")", ")", "for", "atleta_id", ",", "atleta", "in", "data", "[", "'atletas'", "]", ".", "items", "(", ")", "if", "atleta", "[", "'clube_id'", "]", ">", "0", "}", "raise", "CartolaFCError", "(", "'As pontuações parciais só ficam disponíveis com o mercado fechado.')", "" ]
Obtém um mapa com todos os atletas que já pontuaram na rodada atual (aberta). Returns: Uma mapa, onde a key é um inteiro representando o id do atleta e o valor é uma instância de cartolafc.Atleta Raises: CartolaFCError: Se o mercado atual estiver com o status fechado.
[ "Obtém", "um", "mapa", "com", "todos", "os", "atletas", "que", "já", "pontuaram", "na", "rodada", "atual", "(", "aberta", ")", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L222-L239
vicenteneto/python-cartolafc
cartolafc/api.py
Api.time
def time(self, id=None, nome=None, slug=None, as_json=False): """ Obtém um time específico, baseando-se no nome ou no slug utilizado. Ao menos um dos dois devem ser informado. Args: id (int): Id to time que se deseja obter. *Este argumento sempre será utilizado primeiro* nome (str): Nome do time que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* as_json (bool): Se desejar obter o retorno no formato json. Returns: Uma instância de cartolafc.Time se o time foi encontrado. Raises: cartolafc.CartolaFCError: Se algum erro aconteceu, como por exemplo: Nenhum time foi encontrado. """ if not any((id, nome, slug)): raise CartolaFCError('Você precisa informar o nome ou o slug do time que deseja obter') param = 'id' if id else 'slug' value = id if id else (slug if slug else convert_team_name_to_slug(nome)) url = '{api_url}/time/{param}/{value}'.format(api_url=self._api_url, param=param, value=value) data = self._request(url) if bool(as_json): return data clubes = {clube['id']: Clube.from_dict(clube) for clube in data['clubes'].values()} return Time.from_dict(data, clubes=clubes, capitao=data['capitao_id'])
python
def time(self, id=None, nome=None, slug=None, as_json=False): """ Obtém um time específico, baseando-se no nome ou no slug utilizado. Ao menos um dos dois devem ser informado. Args: id (int): Id to time que se deseja obter. *Este argumento sempre será utilizado primeiro* nome (str): Nome do time que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* as_json (bool): Se desejar obter o retorno no formato json. Returns: Uma instância de cartolafc.Time se o time foi encontrado. Raises: cartolafc.CartolaFCError: Se algum erro aconteceu, como por exemplo: Nenhum time foi encontrado. """ if not any((id, nome, slug)): raise CartolaFCError('Você precisa informar o nome ou o slug do time que deseja obter') param = 'id' if id else 'slug' value = id if id else (slug if slug else convert_team_name_to_slug(nome)) url = '{api_url}/time/{param}/{value}'.format(api_url=self._api_url, param=param, value=value) data = self._request(url) if bool(as_json): return data clubes = {clube['id']: Clube.from_dict(clube) for clube in data['clubes'].values()} return Time.from_dict(data, clubes=clubes, capitao=data['capitao_id'])
[ "def", "time", "(", "self", ",", "id", "=", "None", ",", "nome", "=", "None", ",", "slug", "=", "None", ",", "as_json", "=", "False", ")", ":", "if", "not", "any", "(", "(", "id", ",", "nome", ",", "slug", ")", ")", ":", "raise", "CartolaFCError", "(", "'Você precisa informar o nome ou o slug do time que deseja obter')", "", "param", "=", "'id'", "if", "id", "else", "'slug'", "value", "=", "id", "if", "id", "else", "(", "slug", "if", "slug", "else", "convert_team_name_to_slug", "(", "nome", ")", ")", "url", "=", "'{api_url}/time/{param}/{value}'", ".", "format", "(", "api_url", "=", "self", ".", "_api_url", ",", "param", "=", "param", ",", "value", "=", "value", ")", "data", "=", "self", ".", "_request", "(", "url", ")", "if", "bool", "(", "as_json", ")", ":", "return", "data", "clubes", "=", "{", "clube", "[", "'id'", "]", ":", "Clube", ".", "from_dict", "(", "clube", ")", "for", "clube", "in", "data", "[", "'clubes'", "]", ".", "values", "(", ")", "}", "return", "Time", ".", "from_dict", "(", "data", ",", "clubes", "=", "clubes", ",", "capitao", "=", "data", "[", "'capitao_id'", "]", ")" ]
Obtém um time específico, baseando-se no nome ou no slug utilizado. Ao menos um dos dois devem ser informado. Args: id (int): Id to time que se deseja obter. *Este argumento sempre será utilizado primeiro* nome (str): Nome do time que se deseja obter. Requerido se o slug não for informado. slug (str): Slug do time que se deseja obter. *Este argumento tem prioridade sobre o nome* as_json (bool): Se desejar obter o retorno no formato json. Returns: Uma instância de cartolafc.Time se o time foi encontrado. Raises: cartolafc.CartolaFCError: Se algum erro aconteceu, como por exemplo: Nenhum time foi encontrado.
[ "Obtém", "um", "time", "específico", "baseando", "-", "se", "no", "nome", "ou", "no", "slug", "utilizado", ".", "Ao", "menos", "um", "dos", "dois", "devem", "ser", "informado", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L255-L283
vicenteneto/python-cartolafc
cartolafc/api.py
Api.times
def times(self, query): """ Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca. """ url = '{api_url}/times'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [TimeInfo.from_dict(time_info) for time_info in data]
python
def times(self, query): """ Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca. """ url = '{api_url}/times'.format(api_url=self._api_url) data = self._request(url, params=dict(q=query)) return [TimeInfo.from_dict(time_info) for time_info in data]
[ "def", "times", "(", "self", ",", "query", ")", ":", "url", "=", "'{api_url}/times'", ".", "format", "(", "api_url", "=", "self", ".", "_api_url", ")", "data", "=", "self", ".", "_request", "(", "url", ",", "params", "=", "dict", "(", "q", "=", "query", ")", ")", "return", "[", "TimeInfo", ".", "from_dict", "(", "time_info", ")", "for", "time_info", "in", "data", "]" ]
Retorna o resultado da busca ao Cartola por um determinado termo de pesquisa. Args: query (str): Termo para utilizar na busca. Returns: Uma lista de instâncias de cartolafc.TimeInfo, uma para cada time contento o termo utilizado na busca.
[ "Retorna", "o", "resultado", "da", "busca", "ao", "Cartola", "por", "um", "determinado", "termo", "de", "pesquisa", "." ]
train
https://github.com/vicenteneto/python-cartolafc/blob/15b2a192d7745f454d69a55ac9b7ef7c7abb53b9/cartolafc/api.py#L293-L304
openvax/gtfparse
gtfparse/read_gtf.py
parse_gtf
def parse_gtf( filepath_or_buffer, chunksize=1024 * 1024, features=None, intern_columns=["seqname", "source", "strand", "frame"], fix_quotes_columns=["attribute"]): """ Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int features : set or None Drop entries which aren't one of these features intern_columns : list These columns are short strings which should be interned fix_quotes_columns : list Most commonly the 'attribute' column which had broken quotes on some Ensembl release GTF files. """ if features is not None: features = set(features) dataframes = [] def parse_frame(s): if s == ".": return 0 else: return int(s) # GTF columns: # 1) seqname: str ("1", "X", "chrX", etc...) # 2) source : str # Different versions of GTF use second column as of: # (a) gene biotype # (b) transcript biotype # (c) the annotation source # See: https://www.biostars.org/p/120306/#120321 # 3) feature : str ("gene", "transcript", &c) # 4) start : int # 5) end : int # 6) score : float or "." # 7) strand : "+", "-", or "." # 8) frame : 0, 1, 2 or "." # 9) attribute : key-value pairs separated by semicolons # (see more complete description in docstring at top of file) chunk_iterator = pd.read_csv( filepath_or_buffer, sep="\t", comment="#", names=REQUIRED_COLUMNS, skipinitialspace=True, skip_blank_lines=True, error_bad_lines=True, warn_bad_lines=True, chunksize=chunksize, engine="c", dtype={ "start": np.int64, "end": np.int64, "score": np.float32, "seqname": str, }, na_values=".", converters={"frame": parse_frame}) dataframes = [] try: for df in chunk_iterator: for intern_column in intern_columns: df[intern_column] = [intern(str(s)) for s in df[intern_column]] # compare feature strings after interning if features is not None: df = df[df["feature"].isin(features)] for fix_quotes_column in fix_quotes_columns: # Catch mistaken semicolons by replacing "xyz;" with "xyz" # Required to do this since the Ensembl GTF for Ensembl # release 78 has mistakes such as: # gene_name = "PRAMEF6;" transcript_name = "PRAMEF6;-201" df[fix_quotes_column] = [ s.replace(';\"', '\"').replace(";-", "-") for s in df[fix_quotes_column] ] dataframes.append(df) except Exception as e: raise ParsingError(str(e)) df = pd.concat(dataframes) return df
python
def parse_gtf( filepath_or_buffer, chunksize=1024 * 1024, features=None, intern_columns=["seqname", "source", "strand", "frame"], fix_quotes_columns=["attribute"]): """ Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int features : set or None Drop entries which aren't one of these features intern_columns : list These columns are short strings which should be interned fix_quotes_columns : list Most commonly the 'attribute' column which had broken quotes on some Ensembl release GTF files. """ if features is not None: features = set(features) dataframes = [] def parse_frame(s): if s == ".": return 0 else: return int(s) # GTF columns: # 1) seqname: str ("1", "X", "chrX", etc...) # 2) source : str # Different versions of GTF use second column as of: # (a) gene biotype # (b) transcript biotype # (c) the annotation source # See: https://www.biostars.org/p/120306/#120321 # 3) feature : str ("gene", "transcript", &c) # 4) start : int # 5) end : int # 6) score : float or "." # 7) strand : "+", "-", or "." # 8) frame : 0, 1, 2 or "." # 9) attribute : key-value pairs separated by semicolons # (see more complete description in docstring at top of file) chunk_iterator = pd.read_csv( filepath_or_buffer, sep="\t", comment="#", names=REQUIRED_COLUMNS, skipinitialspace=True, skip_blank_lines=True, error_bad_lines=True, warn_bad_lines=True, chunksize=chunksize, engine="c", dtype={ "start": np.int64, "end": np.int64, "score": np.float32, "seqname": str, }, na_values=".", converters={"frame": parse_frame}) dataframes = [] try: for df in chunk_iterator: for intern_column in intern_columns: df[intern_column] = [intern(str(s)) for s in df[intern_column]] # compare feature strings after interning if features is not None: df = df[df["feature"].isin(features)] for fix_quotes_column in fix_quotes_columns: # Catch mistaken semicolons by replacing "xyz;" with "xyz" # Required to do this since the Ensembl GTF for Ensembl # release 78 has mistakes such as: # gene_name = "PRAMEF6;" transcript_name = "PRAMEF6;-201" df[fix_quotes_column] = [ s.replace(';\"', '\"').replace(";-", "-") for s in df[fix_quotes_column] ] dataframes.append(df) except Exception as e: raise ParsingError(str(e)) df = pd.concat(dataframes) return df
[ "def", "parse_gtf", "(", "filepath_or_buffer", ",", "chunksize", "=", "1024", "*", "1024", ",", "features", "=", "None", ",", "intern_columns", "=", "[", "\"seqname\"", ",", "\"source\"", ",", "\"strand\"", ",", "\"frame\"", "]", ",", "fix_quotes_columns", "=", "[", "\"attribute\"", "]", ")", ":", "if", "features", "is", "not", "None", ":", "features", "=", "set", "(", "features", ")", "dataframes", "=", "[", "]", "def", "parse_frame", "(", "s", ")", ":", "if", "s", "==", "\".\"", ":", "return", "0", "else", ":", "return", "int", "(", "s", ")", "# GTF columns:", "# 1) seqname: str (\"1\", \"X\", \"chrX\", etc...)", "# 2) source : str", "# Different versions of GTF use second column as of:", "# (a) gene biotype", "# (b) transcript biotype", "# (c) the annotation source", "# See: https://www.biostars.org/p/120306/#120321", "# 3) feature : str (\"gene\", \"transcript\", &c)", "# 4) start : int", "# 5) end : int", "# 6) score : float or \".\"", "# 7) strand : \"+\", \"-\", or \".\"", "# 8) frame : 0, 1, 2 or \".\"", "# 9) attribute : key-value pairs separated by semicolons", "# (see more complete description in docstring at top of file)", "chunk_iterator", "=", "pd", ".", "read_csv", "(", "filepath_or_buffer", ",", "sep", "=", "\"\\t\"", ",", "comment", "=", "\"#\"", ",", "names", "=", "REQUIRED_COLUMNS", ",", "skipinitialspace", "=", "True", ",", "skip_blank_lines", "=", "True", ",", "error_bad_lines", "=", "True", ",", "warn_bad_lines", "=", "True", ",", "chunksize", "=", "chunksize", ",", "engine", "=", "\"c\"", ",", "dtype", "=", "{", "\"start\"", ":", "np", ".", "int64", ",", "\"end\"", ":", "np", ".", "int64", ",", "\"score\"", ":", "np", ".", "float32", ",", "\"seqname\"", ":", "str", ",", "}", ",", "na_values", "=", "\".\"", ",", "converters", "=", "{", "\"frame\"", ":", "parse_frame", "}", ")", "dataframes", "=", "[", "]", "try", ":", "for", "df", "in", "chunk_iterator", ":", "for", "intern_column", "in", "intern_columns", ":", "df", "[", "intern_column", "]", "=", "[", "intern", "(", "str", "(", "s", ")", ")", "for", "s", "in", "df", "[", "intern_column", "]", "]", "# compare feature strings after interning", "if", "features", "is", "not", "None", ":", "df", "=", "df", "[", "df", "[", "\"feature\"", "]", ".", "isin", "(", "features", ")", "]", "for", "fix_quotes_column", "in", "fix_quotes_columns", ":", "# Catch mistaken semicolons by replacing \"xyz;\" with \"xyz\"", "# Required to do this since the Ensembl GTF for Ensembl", "# release 78 has mistakes such as:", "# gene_name = \"PRAMEF6;\" transcript_name = \"PRAMEF6;-201\"", "df", "[", "fix_quotes_column", "]", "=", "[", "s", ".", "replace", "(", "';\\\"'", ",", "'\\\"'", ")", ".", "replace", "(", "\";-\"", ",", "\"-\"", ")", "for", "s", "in", "df", "[", "fix_quotes_column", "]", "]", "dataframes", ".", "append", "(", "df", ")", "except", "Exception", "as", "e", ":", "raise", "ParsingError", "(", "str", "(", "e", ")", ")", "df", "=", "pd", ".", "concat", "(", "dataframes", ")", "return", "df" ]
Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int features : set or None Drop entries which aren't one of these features intern_columns : list These columns are short strings which should be interned fix_quotes_columns : list Most commonly the 'attribute' column which had broken quotes on some Ensembl release GTF files.
[ "Parameters", "----------" ]
train
https://github.com/openvax/gtfparse/blob/c79cab0c2a5ac3d08de9f932fa29a56d334a712b/gtfparse/read_gtf.py#L32-L126
openvax/gtfparse
gtfparse/read_gtf.py
parse_gtf_and_expand_attributes
def parse_gtf_and_expand_attributes( filepath_or_buffer, chunksize=1024 * 1024, restrict_attribute_columns=None, features=None): """ Parse lines into column->values dictionary and then expand the 'attribute' column into multiple columns. This expansion happens by replacing strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int restrict_attribute_columns : list/set of str or None If given, then only usese attribute columns. features : set or None Ignore entries which don't correspond to one of the supplied features """ result = parse_gtf( filepath_or_buffer, chunksize=chunksize, features=features) attribute_values = result["attribute"] del result["attribute"] for column_name, values in expand_attribute_strings( attribute_values, usecols=restrict_attribute_columns).items(): result[column_name] = values return result
python
def parse_gtf_and_expand_attributes( filepath_or_buffer, chunksize=1024 * 1024, restrict_attribute_columns=None, features=None): """ Parse lines into column->values dictionary and then expand the 'attribute' column into multiple columns. This expansion happens by replacing strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int restrict_attribute_columns : list/set of str or None If given, then only usese attribute columns. features : set or None Ignore entries which don't correspond to one of the supplied features """ result = parse_gtf( filepath_or_buffer, chunksize=chunksize, features=features) attribute_values = result["attribute"] del result["attribute"] for column_name, values in expand_attribute_strings( attribute_values, usecols=restrict_attribute_columns).items(): result[column_name] = values return result
[ "def", "parse_gtf_and_expand_attributes", "(", "filepath_or_buffer", ",", "chunksize", "=", "1024", "*", "1024", ",", "restrict_attribute_columns", "=", "None", ",", "features", "=", "None", ")", ":", "result", "=", "parse_gtf", "(", "filepath_or_buffer", ",", "chunksize", "=", "chunksize", ",", "features", "=", "features", ")", "attribute_values", "=", "result", "[", "\"attribute\"", "]", "del", "result", "[", "\"attribute\"", "]", "for", "column_name", ",", "values", "in", "expand_attribute_strings", "(", "attribute_values", ",", "usecols", "=", "restrict_attribute_columns", ")", ".", "items", "(", ")", ":", "result", "[", "column_name", "]", "=", "values", "return", "result" ]
Parse lines into column->values dictionary and then expand the 'attribute' column into multiple columns. This expansion happens by replacing strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). Parameters ---------- filepath_or_buffer : str or buffer object chunksize : int restrict_attribute_columns : list/set of str or None If given, then only usese attribute columns. features : set or None Ignore entries which don't correspond to one of the supplied features
[ "Parse", "lines", "into", "column", "-", ">", "values", "dictionary", "and", "then", "expand", "the", "attribute", "column", "into", "multiple", "columns", ".", "This", "expansion", "happens", "by", "replacing", "strings", "of", "semi", "-", "colon", "separated", "key", "-", "value", "values", "in", "the", "attribute", "column", "with", "one", "column", "per", "distinct", "key", "with", "a", "list", "of", "values", "for", "each", "row", "(", "using", "None", "for", "rows", "where", "key", "didn", "t", "occur", ")", "." ]
train
https://github.com/openvax/gtfparse/blob/c79cab0c2a5ac3d08de9f932fa29a56d334a712b/gtfparse/read_gtf.py#L129-L162
openvax/gtfparse
gtfparse/read_gtf.py
read_gtf
def read_gtf( filepath_or_buffer, expand_attribute_column=True, infer_biotype_column=False, column_converters={}, usecols=None, features=None, chunksize=1024 * 1024): """ Parse a GTF into a dictionary mapping column names to sequences of values. Parameters ---------- filepath_or_buffer : str or buffer object Path to GTF file (may be gzip compressed) or buffer object such as StringIO expand_attribute_column : bool Replace strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). infer_biotype_column : bool Due to the annoying ambiguity of the second GTF column across multiple Ensembl releases, figure out if an older GTF's source column is actually the gene_biotype or transcript_biotype. column_converters : dict, optional Dictionary mapping column names to conversion functions. Will replace empty strings with None and otherwise passes them to given conversion function. usecols : list of str or None Restrict which columns are loaded to the give set. If None, then load all columns. features : set of str or None Drop rows which aren't one of the features in the supplied set chunksize : int """ if isinstance(filepath_or_buffer, string_types) and not exists(filepath_or_buffer): raise ValueError("GTF file does not exist: %s" % filepath_or_buffer) if expand_attribute_column: result_df = parse_gtf_and_expand_attributes( filepath_or_buffer, chunksize=chunksize, restrict_attribute_columns=usecols) else: result_df = parse_gtf(result_df, features=features) for column_name, column_type in list(column_converters.items()): result_df[column_name] = [ column_type(string_value) if len(string_value) > 0 else None for string_value in result_df[column_name] ] # Hackishly infer whether the values in the 'source' column of this GTF # are actually representing a biotype by checking for the most common # gene_biotype and transcript_biotype value 'protein_coding' if infer_biotype_column: unique_source_values = set(result_df["source"]) if "protein_coding" in unique_source_values: column_names = set(result_df.columns) # Disambiguate between the two biotypes by checking if # gene_biotype is already present in another column. If it is, # the 2nd column is the transcript_biotype (otherwise, it's the # gene_biotype) if "gene_biotype" not in column_names: logging.info("Using column 'source' to replace missing 'gene_biotype'") result_df["gene_biotype"] = result_df["source"] if "transcript_biotype" not in column_names: logging.info("Using column 'source' to replace missing 'transcript_biotype'") result_df["transcript_biotype"] = result_df["source"] if usecols is not None: column_names = set(result_df.columns) valid_columns = [c for c in usecols if c in column_names] result_df = result_df[valid_columns] return result_df
python
def read_gtf( filepath_or_buffer, expand_attribute_column=True, infer_biotype_column=False, column_converters={}, usecols=None, features=None, chunksize=1024 * 1024): """ Parse a GTF into a dictionary mapping column names to sequences of values. Parameters ---------- filepath_or_buffer : str or buffer object Path to GTF file (may be gzip compressed) or buffer object such as StringIO expand_attribute_column : bool Replace strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). infer_biotype_column : bool Due to the annoying ambiguity of the second GTF column across multiple Ensembl releases, figure out if an older GTF's source column is actually the gene_biotype or transcript_biotype. column_converters : dict, optional Dictionary mapping column names to conversion functions. Will replace empty strings with None and otherwise passes them to given conversion function. usecols : list of str or None Restrict which columns are loaded to the give set. If None, then load all columns. features : set of str or None Drop rows which aren't one of the features in the supplied set chunksize : int """ if isinstance(filepath_or_buffer, string_types) and not exists(filepath_or_buffer): raise ValueError("GTF file does not exist: %s" % filepath_or_buffer) if expand_attribute_column: result_df = parse_gtf_and_expand_attributes( filepath_or_buffer, chunksize=chunksize, restrict_attribute_columns=usecols) else: result_df = parse_gtf(result_df, features=features) for column_name, column_type in list(column_converters.items()): result_df[column_name] = [ column_type(string_value) if len(string_value) > 0 else None for string_value in result_df[column_name] ] # Hackishly infer whether the values in the 'source' column of this GTF # are actually representing a biotype by checking for the most common # gene_biotype and transcript_biotype value 'protein_coding' if infer_biotype_column: unique_source_values = set(result_df["source"]) if "protein_coding" in unique_source_values: column_names = set(result_df.columns) # Disambiguate between the two biotypes by checking if # gene_biotype is already present in another column. If it is, # the 2nd column is the transcript_biotype (otherwise, it's the # gene_biotype) if "gene_biotype" not in column_names: logging.info("Using column 'source' to replace missing 'gene_biotype'") result_df["gene_biotype"] = result_df["source"] if "transcript_biotype" not in column_names: logging.info("Using column 'source' to replace missing 'transcript_biotype'") result_df["transcript_biotype"] = result_df["source"] if usecols is not None: column_names = set(result_df.columns) valid_columns = [c for c in usecols if c in column_names] result_df = result_df[valid_columns] return result_df
[ "def", "read_gtf", "(", "filepath_or_buffer", ",", "expand_attribute_column", "=", "True", ",", "infer_biotype_column", "=", "False", ",", "column_converters", "=", "{", "}", ",", "usecols", "=", "None", ",", "features", "=", "None", ",", "chunksize", "=", "1024", "*", "1024", ")", ":", "if", "isinstance", "(", "filepath_or_buffer", ",", "string_types", ")", "and", "not", "exists", "(", "filepath_or_buffer", ")", ":", "raise", "ValueError", "(", "\"GTF file does not exist: %s\"", "%", "filepath_or_buffer", ")", "if", "expand_attribute_column", ":", "result_df", "=", "parse_gtf_and_expand_attributes", "(", "filepath_or_buffer", ",", "chunksize", "=", "chunksize", ",", "restrict_attribute_columns", "=", "usecols", ")", "else", ":", "result_df", "=", "parse_gtf", "(", "result_df", ",", "features", "=", "features", ")", "for", "column_name", ",", "column_type", "in", "list", "(", "column_converters", ".", "items", "(", ")", ")", ":", "result_df", "[", "column_name", "]", "=", "[", "column_type", "(", "string_value", ")", "if", "len", "(", "string_value", ")", ">", "0", "else", "None", "for", "string_value", "in", "result_df", "[", "column_name", "]", "]", "# Hackishly infer whether the values in the 'source' column of this GTF", "# are actually representing a biotype by checking for the most common", "# gene_biotype and transcript_biotype value 'protein_coding'", "if", "infer_biotype_column", ":", "unique_source_values", "=", "set", "(", "result_df", "[", "\"source\"", "]", ")", "if", "\"protein_coding\"", "in", "unique_source_values", ":", "column_names", "=", "set", "(", "result_df", ".", "columns", ")", "# Disambiguate between the two biotypes by checking if", "# gene_biotype is already present in another column. If it is,", "# the 2nd column is the transcript_biotype (otherwise, it's the", "# gene_biotype)", "if", "\"gene_biotype\"", "not", "in", "column_names", ":", "logging", ".", "info", "(", "\"Using column 'source' to replace missing 'gene_biotype'\"", ")", "result_df", "[", "\"gene_biotype\"", "]", "=", "result_df", "[", "\"source\"", "]", "if", "\"transcript_biotype\"", "not", "in", "column_names", ":", "logging", ".", "info", "(", "\"Using column 'source' to replace missing 'transcript_biotype'\"", ")", "result_df", "[", "\"transcript_biotype\"", "]", "=", "result_df", "[", "\"source\"", "]", "if", "usecols", "is", "not", "None", ":", "column_names", "=", "set", "(", "result_df", ".", "columns", ")", "valid_columns", "=", "[", "c", "for", "c", "in", "usecols", "if", "c", "in", "column_names", "]", "result_df", "=", "result_df", "[", "valid_columns", "]", "return", "result_df" ]
Parse a GTF into a dictionary mapping column names to sequences of values. Parameters ---------- filepath_or_buffer : str or buffer object Path to GTF file (may be gzip compressed) or buffer object such as StringIO expand_attribute_column : bool Replace strings of semi-colon separated key-value values in the 'attribute' column with one column per distinct key, with a list of values for each row (using None for rows where key didn't occur). infer_biotype_column : bool Due to the annoying ambiguity of the second GTF column across multiple Ensembl releases, figure out if an older GTF's source column is actually the gene_biotype or transcript_biotype. column_converters : dict, optional Dictionary mapping column names to conversion functions. Will replace empty strings with None and otherwise passes them to given conversion function. usecols : list of str or None Restrict which columns are loaded to the give set. If None, then load all columns. features : set of str or None Drop rows which aren't one of the features in the supplied set chunksize : int
[ "Parse", "a", "GTF", "into", "a", "dictionary", "mapping", "column", "names", "to", "sequences", "of", "values", "." ]
train
https://github.com/openvax/gtfparse/blob/c79cab0c2a5ac3d08de9f932fa29a56d334a712b/gtfparse/read_gtf.py#L165-L247
openvax/gtfparse
gtfparse/attribute_parsing.py
expand_attribute_strings
def expand_attribute_strings( attribute_strings, quote_char='\"', missing_value="", usecols=None): """ The last column of GTF has a variable number of key value pairs of the format: "key1 value1; key2 value2;" Parse these into a dictionary mapping each key onto a list of values, where the value is None for any row where the key was missing. Parameters ---------- attribute_strings : list of str quote_char : str Quote character to remove from values missing_value : any If an attribute is missing from a row, give it this value. usecols : list of str or None If not None, then only expand columns included in this set, otherwise use all columns. Returns OrderedDict of column->value list mappings, in the order they appeared in the attribute strings. """ n = len(attribute_strings) extra_columns = {} column_order = [] # # SOME NOTES ABOUT THE BIZARRE STRING INTERNING GOING ON BELOW # # While parsing millions of repeated strings (e.g. "gene_id" and "TP53"), # we can save a lot of memory by making sure there's only one string # object per unique string. The canonical way to do this is using # the 'intern' function. One problem is that Py2 won't let you intern # unicode objects, so to get around this we call intern(str(...)). # # It also turns out to be faster to check interned strings ourselves # using a local dictionary, hence the two dictionaries below # and pair of try/except blocks in the loop. column_interned_strings = {} value_interned_strings = {} for (i, attribute_string) in enumerate(attribute_strings): for kv in attribute_string.split(";"): # We're slicing the first two elements out of split() because # Ensembl release 79 added values like: # transcript_support_level "1 (assigned to previous version 5)"; # ...which gets mangled by splitting on spaces. parts = kv.strip().split(" ", 2)[:2] if len(parts) != 2: continue column_name, value = parts try: column_name = column_interned_strings[column_name] except KeyError: column_name = intern(str(column_name)) column_interned_strings[column_name] = column_name if usecols is not None and column_name not in usecols: continue try: column = extra_columns[column_name] except KeyError: column = [missing_value] * n extra_columns[column_name] = column column_order.append(column_name) value = value.replace(quote_char, "") if value.startswith(quote_char) else value try: value = value_interned_strings[value] except KeyError: value = intern(str(value)) value_interned_strings[value] = value # if an attribute is used repeatedly then # keep track of all its values in a list old_value = column[i] if old_value is missing_value: column[i] = value else: column[i] = "%s,%s" % (old_value, value) logging.info("Extracted GTF attributes: %s" % column_order) return OrderedDict( (column_name, extra_columns[column_name]) for column_name in column_order)
python
def expand_attribute_strings( attribute_strings, quote_char='\"', missing_value="", usecols=None): """ The last column of GTF has a variable number of key value pairs of the format: "key1 value1; key2 value2;" Parse these into a dictionary mapping each key onto a list of values, where the value is None for any row where the key was missing. Parameters ---------- attribute_strings : list of str quote_char : str Quote character to remove from values missing_value : any If an attribute is missing from a row, give it this value. usecols : list of str or None If not None, then only expand columns included in this set, otherwise use all columns. Returns OrderedDict of column->value list mappings, in the order they appeared in the attribute strings. """ n = len(attribute_strings) extra_columns = {} column_order = [] # # SOME NOTES ABOUT THE BIZARRE STRING INTERNING GOING ON BELOW # # While parsing millions of repeated strings (e.g. "gene_id" and "TP53"), # we can save a lot of memory by making sure there's only one string # object per unique string. The canonical way to do this is using # the 'intern' function. One problem is that Py2 won't let you intern # unicode objects, so to get around this we call intern(str(...)). # # It also turns out to be faster to check interned strings ourselves # using a local dictionary, hence the two dictionaries below # and pair of try/except blocks in the loop. column_interned_strings = {} value_interned_strings = {} for (i, attribute_string) in enumerate(attribute_strings): for kv in attribute_string.split(";"): # We're slicing the first two elements out of split() because # Ensembl release 79 added values like: # transcript_support_level "1 (assigned to previous version 5)"; # ...which gets mangled by splitting on spaces. parts = kv.strip().split(" ", 2)[:2] if len(parts) != 2: continue column_name, value = parts try: column_name = column_interned_strings[column_name] except KeyError: column_name = intern(str(column_name)) column_interned_strings[column_name] = column_name if usecols is not None and column_name not in usecols: continue try: column = extra_columns[column_name] except KeyError: column = [missing_value] * n extra_columns[column_name] = column column_order.append(column_name) value = value.replace(quote_char, "") if value.startswith(quote_char) else value try: value = value_interned_strings[value] except KeyError: value = intern(str(value)) value_interned_strings[value] = value # if an attribute is used repeatedly then # keep track of all its values in a list old_value = column[i] if old_value is missing_value: column[i] = value else: column[i] = "%s,%s" % (old_value, value) logging.info("Extracted GTF attributes: %s" % column_order) return OrderedDict( (column_name, extra_columns[column_name]) for column_name in column_order)
[ "def", "expand_attribute_strings", "(", "attribute_strings", ",", "quote_char", "=", "'\\\"'", ",", "missing_value", "=", "\"\"", ",", "usecols", "=", "None", ")", ":", "n", "=", "len", "(", "attribute_strings", ")", "extra_columns", "=", "{", "}", "column_order", "=", "[", "]", "#", "# SOME NOTES ABOUT THE BIZARRE STRING INTERNING GOING ON BELOW", "#", "# While parsing millions of repeated strings (e.g. \"gene_id\" and \"TP53\"),", "# we can save a lot of memory by making sure there's only one string", "# object per unique string. The canonical way to do this is using", "# the 'intern' function. One problem is that Py2 won't let you intern", "# unicode objects, so to get around this we call intern(str(...)).", "#", "# It also turns out to be faster to check interned strings ourselves", "# using a local dictionary, hence the two dictionaries below", "# and pair of try/except blocks in the loop.", "column_interned_strings", "=", "{", "}", "value_interned_strings", "=", "{", "}", "for", "(", "i", ",", "attribute_string", ")", "in", "enumerate", "(", "attribute_strings", ")", ":", "for", "kv", "in", "attribute_string", ".", "split", "(", "\";\"", ")", ":", "# We're slicing the first two elements out of split() because", "# Ensembl release 79 added values like:", "# transcript_support_level \"1 (assigned to previous version 5)\";", "# ...which gets mangled by splitting on spaces.", "parts", "=", "kv", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ",", "2", ")", "[", ":", "2", "]", "if", "len", "(", "parts", ")", "!=", "2", ":", "continue", "column_name", ",", "value", "=", "parts", "try", ":", "column_name", "=", "column_interned_strings", "[", "column_name", "]", "except", "KeyError", ":", "column_name", "=", "intern", "(", "str", "(", "column_name", ")", ")", "column_interned_strings", "[", "column_name", "]", "=", "column_name", "if", "usecols", "is", "not", "None", "and", "column_name", "not", "in", "usecols", ":", "continue", "try", ":", "column", "=", "extra_columns", "[", "column_name", "]", "except", "KeyError", ":", "column", "=", "[", "missing_value", "]", "*", "n", "extra_columns", "[", "column_name", "]", "=", "column", "column_order", ".", "append", "(", "column_name", ")", "value", "=", "value", ".", "replace", "(", "quote_char", ",", "\"\"", ")", "if", "value", ".", "startswith", "(", "quote_char", ")", "else", "value", "try", ":", "value", "=", "value_interned_strings", "[", "value", "]", "except", "KeyError", ":", "value", "=", "intern", "(", "str", "(", "value", ")", ")", "value_interned_strings", "[", "value", "]", "=", "value", "# if an attribute is used repeatedly then", "# keep track of all its values in a list", "old_value", "=", "column", "[", "i", "]", "if", "old_value", "is", "missing_value", ":", "column", "[", "i", "]", "=", "value", "else", ":", "column", "[", "i", "]", "=", "\"%s,%s\"", "%", "(", "old_value", ",", "value", ")", "logging", ".", "info", "(", "\"Extracted GTF attributes: %s\"", "%", "column_order", ")", "return", "OrderedDict", "(", "(", "column_name", ",", "extra_columns", "[", "column_name", "]", ")", "for", "column_name", "in", "column_order", ")" ]
The last column of GTF has a variable number of key value pairs of the format: "key1 value1; key2 value2;" Parse these into a dictionary mapping each key onto a list of values, where the value is None for any row where the key was missing. Parameters ---------- attribute_strings : list of str quote_char : str Quote character to remove from values missing_value : any If an attribute is missing from a row, give it this value. usecols : list of str or None If not None, then only expand columns included in this set, otherwise use all columns. Returns OrderedDict of column->value list mappings, in the order they appeared in the attribute strings.
[ "The", "last", "column", "of", "GTF", "has", "a", "variable", "number", "of", "key", "value", "pairs", "of", "the", "format", ":", "key1", "value1", ";", "key2", "value2", ";", "Parse", "these", "into", "a", "dictionary", "mapping", "each", "key", "onto", "a", "list", "of", "values", "where", "the", "value", "is", "None", "for", "any", "row", "where", "the", "key", "was", "missing", "." ]
train
https://github.com/openvax/gtfparse/blob/c79cab0c2a5ac3d08de9f932fa29a56d334a712b/gtfparse/attribute_parsing.py#L25-L121
openvax/gtfparse
gtfparse/create_missing_features.py
create_missing_features
def create_missing_features( dataframe, unique_keys={}, extra_columns={}, missing_value=None): """ Helper function used to construct a missing feature such as 'transcript' or 'gene'. Some GTF files only have 'exon' and 'CDS' entries, but have transcript_id and gene_id annotations which allow us to construct those missing features. Parameters ---------- dataframe : pandas.DataFrame Should contain at least the core GTF columns, such as "seqname", "start", and "end" unique_keys : dict Mapping from feature names to the name of the column which should act as a unique key for that feature. Example: {"gene": "gene_id"} extra_columns : dict By default the constructed feature row will include only the 8 core columns and its unique key. Any other columns that should be included should be associated with the feature name in this dict. missing_value : any Which value to fill in for columns that we don't infer values for. Returns original dataframe along with all extra rows created for missing features. """ extra_dataframes = [] existing_features = set(dataframe["feature"]) existing_columns = set(dataframe.keys()) for (feature_name, groupby_key) in unique_keys.items(): if feature_name in existing_features: logging.info( "Feature '%s' already exists in GTF data" % feature_name) continue logging.info("Creating rows for missing feature '%s'" % feature_name) # don't include rows where the groupby key was missing empty_key_values = dataframe[groupby_key].map( lambda x: x == "" or x is None) row_groups = dataframe[~empty_key_values].groupby(groupby_key) # Each group corresponds to a unique feature entry for which the # other columns may or may not be uniquely defined. Start off by # assuming the values for every column are missing and fill them in # where possible. feature_values = OrderedDict([ (column_name, [missing_value] * row_groups.ngroups) for column_name in dataframe.keys() ]) # User specifies which non-required columns should we try to infer # values for feature_columns = list(extra_columns.get(feature_name, [])) for i, (feature_id, group) in enumerate(row_groups): # fill in the required columns by assuming that this feature # is the union of all intervals of other features that were # tagged with its unique ID (e.g. union of exons which had a # particular gene_id). feature_values["feature"][i] = feature_name feature_values[groupby_key][i] = feature_id # set the source to 'gtfparse' to indicate that we made this # entry up from other data feature_values["source"][i] = "gtfparse" feature_values["start"][i] = group["start"].min() feature_values["end"][i] = group["end"].max() # assume that seqname and strand are the same for all other # entries in the GTF which shared this unique ID feature_values["seqname"][i] = group["seqname"].iat[0] feature_values["strand"][i] = group["strand"].iat[0] # there's probably no rigorous way to set the values of # 'score' or 'frame' columns so leave them empty for column_name in feature_columns: if column_name not in existing_columns: raise ValueError( "Column '%s' does not exist in GTF, columns = %s" % ( column_name, existing_columns)) # expect that all entries related to a reconstructed feature # are related and are thus within the same interval of # positions on the same chromosome unique_values = group[column_name].dropna().unique() if len(unique_values) == 1: feature_values[column_name][i] = unique_values[0] extra_dataframes.append(pd.DataFrame(feature_values)) return pd.concat([dataframe] + extra_dataframes, ignore_index=True)
python
def create_missing_features( dataframe, unique_keys={}, extra_columns={}, missing_value=None): """ Helper function used to construct a missing feature such as 'transcript' or 'gene'. Some GTF files only have 'exon' and 'CDS' entries, but have transcript_id and gene_id annotations which allow us to construct those missing features. Parameters ---------- dataframe : pandas.DataFrame Should contain at least the core GTF columns, such as "seqname", "start", and "end" unique_keys : dict Mapping from feature names to the name of the column which should act as a unique key for that feature. Example: {"gene": "gene_id"} extra_columns : dict By default the constructed feature row will include only the 8 core columns and its unique key. Any other columns that should be included should be associated with the feature name in this dict. missing_value : any Which value to fill in for columns that we don't infer values for. Returns original dataframe along with all extra rows created for missing features. """ extra_dataframes = [] existing_features = set(dataframe["feature"]) existing_columns = set(dataframe.keys()) for (feature_name, groupby_key) in unique_keys.items(): if feature_name in existing_features: logging.info( "Feature '%s' already exists in GTF data" % feature_name) continue logging.info("Creating rows for missing feature '%s'" % feature_name) # don't include rows where the groupby key was missing empty_key_values = dataframe[groupby_key].map( lambda x: x == "" or x is None) row_groups = dataframe[~empty_key_values].groupby(groupby_key) # Each group corresponds to a unique feature entry for which the # other columns may or may not be uniquely defined. Start off by # assuming the values for every column are missing and fill them in # where possible. feature_values = OrderedDict([ (column_name, [missing_value] * row_groups.ngroups) for column_name in dataframe.keys() ]) # User specifies which non-required columns should we try to infer # values for feature_columns = list(extra_columns.get(feature_name, [])) for i, (feature_id, group) in enumerate(row_groups): # fill in the required columns by assuming that this feature # is the union of all intervals of other features that were # tagged with its unique ID (e.g. union of exons which had a # particular gene_id). feature_values["feature"][i] = feature_name feature_values[groupby_key][i] = feature_id # set the source to 'gtfparse' to indicate that we made this # entry up from other data feature_values["source"][i] = "gtfparse" feature_values["start"][i] = group["start"].min() feature_values["end"][i] = group["end"].max() # assume that seqname and strand are the same for all other # entries in the GTF which shared this unique ID feature_values["seqname"][i] = group["seqname"].iat[0] feature_values["strand"][i] = group["strand"].iat[0] # there's probably no rigorous way to set the values of # 'score' or 'frame' columns so leave them empty for column_name in feature_columns: if column_name not in existing_columns: raise ValueError( "Column '%s' does not exist in GTF, columns = %s" % ( column_name, existing_columns)) # expect that all entries related to a reconstructed feature # are related and are thus within the same interval of # positions on the same chromosome unique_values = group[column_name].dropna().unique() if len(unique_values) == 1: feature_values[column_name][i] = unique_values[0] extra_dataframes.append(pd.DataFrame(feature_values)) return pd.concat([dataframe] + extra_dataframes, ignore_index=True)
[ "def", "create_missing_features", "(", "dataframe", ",", "unique_keys", "=", "{", "}", ",", "extra_columns", "=", "{", "}", ",", "missing_value", "=", "None", ")", ":", "extra_dataframes", "=", "[", "]", "existing_features", "=", "set", "(", "dataframe", "[", "\"feature\"", "]", ")", "existing_columns", "=", "set", "(", "dataframe", ".", "keys", "(", ")", ")", "for", "(", "feature_name", ",", "groupby_key", ")", "in", "unique_keys", ".", "items", "(", ")", ":", "if", "feature_name", "in", "existing_features", ":", "logging", ".", "info", "(", "\"Feature '%s' already exists in GTF data\"", "%", "feature_name", ")", "continue", "logging", ".", "info", "(", "\"Creating rows for missing feature '%s'\"", "%", "feature_name", ")", "# don't include rows where the groupby key was missing", "empty_key_values", "=", "dataframe", "[", "groupby_key", "]", ".", "map", "(", "lambda", "x", ":", "x", "==", "\"\"", "or", "x", "is", "None", ")", "row_groups", "=", "dataframe", "[", "~", "empty_key_values", "]", ".", "groupby", "(", "groupby_key", ")", "# Each group corresponds to a unique feature entry for which the", "# other columns may or may not be uniquely defined. Start off by", "# assuming the values for every column are missing and fill them in", "# where possible.", "feature_values", "=", "OrderedDict", "(", "[", "(", "column_name", ",", "[", "missing_value", "]", "*", "row_groups", ".", "ngroups", ")", "for", "column_name", "in", "dataframe", ".", "keys", "(", ")", "]", ")", "# User specifies which non-required columns should we try to infer", "# values for", "feature_columns", "=", "list", "(", "extra_columns", ".", "get", "(", "feature_name", ",", "[", "]", ")", ")", "for", "i", ",", "(", "feature_id", ",", "group", ")", "in", "enumerate", "(", "row_groups", ")", ":", "# fill in the required columns by assuming that this feature", "# is the union of all intervals of other features that were", "# tagged with its unique ID (e.g. union of exons which had a", "# particular gene_id).", "feature_values", "[", "\"feature\"", "]", "[", "i", "]", "=", "feature_name", "feature_values", "[", "groupby_key", "]", "[", "i", "]", "=", "feature_id", "# set the source to 'gtfparse' to indicate that we made this", "# entry up from other data", "feature_values", "[", "\"source\"", "]", "[", "i", "]", "=", "\"gtfparse\"", "feature_values", "[", "\"start\"", "]", "[", "i", "]", "=", "group", "[", "\"start\"", "]", ".", "min", "(", ")", "feature_values", "[", "\"end\"", "]", "[", "i", "]", "=", "group", "[", "\"end\"", "]", ".", "max", "(", ")", "# assume that seqname and strand are the same for all other", "# entries in the GTF which shared this unique ID", "feature_values", "[", "\"seqname\"", "]", "[", "i", "]", "=", "group", "[", "\"seqname\"", "]", ".", "iat", "[", "0", "]", "feature_values", "[", "\"strand\"", "]", "[", "i", "]", "=", "group", "[", "\"strand\"", "]", ".", "iat", "[", "0", "]", "# there's probably no rigorous way to set the values of", "# 'score' or 'frame' columns so leave them empty", "for", "column_name", "in", "feature_columns", ":", "if", "column_name", "not", "in", "existing_columns", ":", "raise", "ValueError", "(", "\"Column '%s' does not exist in GTF, columns = %s\"", "%", "(", "column_name", ",", "existing_columns", ")", ")", "# expect that all entries related to a reconstructed feature", "# are related and are thus within the same interval of", "# positions on the same chromosome", "unique_values", "=", "group", "[", "column_name", "]", ".", "dropna", "(", ")", ".", "unique", "(", ")", "if", "len", "(", "unique_values", ")", "==", "1", ":", "feature_values", "[", "column_name", "]", "[", "i", "]", "=", "unique_values", "[", "0", "]", "extra_dataframes", ".", "append", "(", "pd", ".", "DataFrame", "(", "feature_values", ")", ")", "return", "pd", ".", "concat", "(", "[", "dataframe", "]", "+", "extra_dataframes", ",", "ignore_index", "=", "True", ")" ]
Helper function used to construct a missing feature such as 'transcript' or 'gene'. Some GTF files only have 'exon' and 'CDS' entries, but have transcript_id and gene_id annotations which allow us to construct those missing features. Parameters ---------- dataframe : pandas.DataFrame Should contain at least the core GTF columns, such as "seqname", "start", and "end" unique_keys : dict Mapping from feature names to the name of the column which should act as a unique key for that feature. Example: {"gene": "gene_id"} extra_columns : dict By default the constructed feature row will include only the 8 core columns and its unique key. Any other columns that should be included should be associated with the feature name in this dict. missing_value : any Which value to fill in for columns that we don't infer values for. Returns original dataframe along with all extra rows created for missing features.
[ "Helper", "function", "used", "to", "construct", "a", "missing", "feature", "such", "as", "transcript", "or", "gene", ".", "Some", "GTF", "files", "only", "have", "exon", "and", "CDS", "entries", "but", "have", "transcript_id", "and", "gene_id", "annotations", "which", "allow", "us", "to", "construct", "those", "missing", "features", "." ]
train
https://github.com/openvax/gtfparse/blob/c79cab0c2a5ac3d08de9f932fa29a56d334a712b/gtfparse/create_missing_features.py#L25-L121
MichaelAquilina/S4
s4/clients/__init__.py
SyncClient.get_action
def get_action(self, key): """ returns the action to perform on this key based on its state before the last sync. """ index_local_timestamp = self.get_index_local_timestamp(key) real_local_timestamp = self.get_real_local_timestamp(key) remote_timestamp = self.get_remote_timestamp(key) return get_sync_state( index_local_timestamp, real_local_timestamp, remote_timestamp )
python
def get_action(self, key): """ returns the action to perform on this key based on its state before the last sync. """ index_local_timestamp = self.get_index_local_timestamp(key) real_local_timestamp = self.get_real_local_timestamp(key) remote_timestamp = self.get_remote_timestamp(key) return get_sync_state( index_local_timestamp, real_local_timestamp, remote_timestamp )
[ "def", "get_action", "(", "self", ",", "key", ")", ":", "index_local_timestamp", "=", "self", ".", "get_index_local_timestamp", "(", "key", ")", "real_local_timestamp", "=", "self", ".", "get_real_local_timestamp", "(", "key", ")", "remote_timestamp", "=", "self", ".", "get_remote_timestamp", "(", "key", ")", "return", "get_sync_state", "(", "index_local_timestamp", ",", "real_local_timestamp", ",", "remote_timestamp", ")" ]
returns the action to perform on this key based on its state before the last sync.
[ "returns", "the", "action", "to", "perform", "on", "this", "key", "based", "on", "its", "state", "before", "the", "last", "sync", "." ]
train
https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/clients/__init__.py#L191-L201
MichaelAquilina/S4
s4/clients/local.py
LocalSyncClient.lock
def lock(self, timeout=10): """ Advisory lock. Use to ensure that only one LocalSyncClient is working on the Target at the same time. """ logger.debug("Locking %s", self.lock_file) if not os.path.exists(self.lock_file): self.ensure_path(self.lock_file) with open(self.lock_file, "w"): os.utime(self.lock_file) self._lock.acquire(timeout=timeout)
python
def lock(self, timeout=10): """ Advisory lock. Use to ensure that only one LocalSyncClient is working on the Target at the same time. """ logger.debug("Locking %s", self.lock_file) if not os.path.exists(self.lock_file): self.ensure_path(self.lock_file) with open(self.lock_file, "w"): os.utime(self.lock_file) self._lock.acquire(timeout=timeout)
[ "def", "lock", "(", "self", ",", "timeout", "=", "10", ")", ":", "logger", ".", "debug", "(", "\"Locking %s\"", ",", "self", ".", "lock_file", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "lock_file", ")", ":", "self", ".", "ensure_path", "(", "self", ".", "lock_file", ")", "with", "open", "(", "self", ".", "lock_file", ",", "\"w\"", ")", ":", "os", ".", "utime", "(", "self", ".", "lock_file", ")", "self", ".", "_lock", ".", "acquire", "(", "timeout", "=", "timeout", ")" ]
Advisory lock. Use to ensure that only one LocalSyncClient is working on the Target at the same time.
[ "Advisory", "lock", ".", "Use", "to", "ensure", "that", "only", "one", "LocalSyncClient", "is", "working", "on", "the", "Target", "at", "the", "same", "time", "." ]
train
https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/clients/local.py#L65-L75
MichaelAquilina/S4
s4/clients/local.py
LocalSyncClient.unlock
def unlock(self): """ Unlock the active advisory lock. """ logger.debug("Releasing lock %s", self.lock_file) self._lock.release() try: os.unlink(self.lock_file) except FileNotFoundError: pass
python
def unlock(self): """ Unlock the active advisory lock. """ logger.debug("Releasing lock %s", self.lock_file) self._lock.release() try: os.unlink(self.lock_file) except FileNotFoundError: pass
[ "def", "unlock", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Releasing lock %s\"", ",", "self", ".", "lock_file", ")", "self", ".", "_lock", ".", "release", "(", ")", "try", ":", "os", ".", "unlink", "(", "self", ".", "lock_file", ")", "except", "FileNotFoundError", ":", "pass" ]
Unlock the active advisory lock.
[ "Unlock", "the", "active", "advisory", "lock", "." ]
train
https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/clients/local.py#L77-L86
MichaelAquilina/S4
s4/utils.py
get_input
def get_input(*args, secret=False, required=False, blank=False, **kwargs): """ secret: Don't show user input when they are typing. required: Keep prompting if the user enters an empty value. blank: turn all empty strings into None. """ while True: if secret: value = getpass.getpass(*args, **kwargs) else: value = input(*args, **kwargs) if blank: value = value if value else None if not required or value: break return value
python
def get_input(*args, secret=False, required=False, blank=False, **kwargs): """ secret: Don't show user input when they are typing. required: Keep prompting if the user enters an empty value. blank: turn all empty strings into None. """ while True: if secret: value = getpass.getpass(*args, **kwargs) else: value = input(*args, **kwargs) if blank: value = value if value else None if not required or value: break return value
[ "def", "get_input", "(", "*", "args", ",", "secret", "=", "False", ",", "required", "=", "False", ",", "blank", "=", "False", ",", "*", "*", "kwargs", ")", ":", "while", "True", ":", "if", "secret", ":", "value", "=", "getpass", ".", "getpass", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "value", "=", "input", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "blank", ":", "value", "=", "value", "if", "value", "else", "None", "if", "not", "required", "or", "value", ":", "break", "return", "value" ]
secret: Don't show user input when they are typing. required: Keep prompting if the user enters an empty value. blank: turn all empty strings into None.
[ "secret", ":", "Don", "t", "show", "user", "input", "when", "they", "are", "typing", ".", "required", ":", "Keep", "prompting", "if", "the", "user", "enters", "an", "empty", "value", ".", "blank", ":", "turn", "all", "empty", "strings", "into", "None", "." ]
train
https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/utils.py#L17-L36
gazpachoking/jsonref
proxytypes.py
ProxyMetaClass._no_proxy
def _no_proxy(method): """ Returns a wrapped version of `method`, such that proxying is turned off during the method call. """ @wraps(method) def wrapper(self, *args, **kwargs): notproxied = _oga(self, "__notproxied__") _osa(self, "__notproxied__", True) try: return method(self, *args, **kwargs) finally: _osa(self, "__notproxied__", notproxied) return wrapper
python
def _no_proxy(method): """ Returns a wrapped version of `method`, such that proxying is turned off during the method call. """ @wraps(method) def wrapper(self, *args, **kwargs): notproxied = _oga(self, "__notproxied__") _osa(self, "__notproxied__", True) try: return method(self, *args, **kwargs) finally: _osa(self, "__notproxied__", notproxied) return wrapper
[ "def", "_no_proxy", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "notproxied", "=", "_oga", "(", "self", ",", "\"__notproxied__\"", ")", "_osa", "(", "self", ",", "\"__notproxied__\"", ",", "True", ")", "try", ":", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "finally", ":", "_osa", "(", "self", ",", "\"__notproxied__\"", ",", "notproxied", ")", "return", "wrapper" ]
Returns a wrapped version of `method`, such that proxying is turned off during the method call.
[ "Returns", "a", "wrapped", "version", "of", "method", "such", "that", "proxying", "is", "turned", "off", "during", "the", "method", "call", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/proxytypes.py#L122-L138
gazpachoking/jsonref
proxytypes.py
Proxy._should_proxy
def _should_proxy(self, attr): """ Determines whether `attr` should be looked up on the proxied object, or the proxy itself. """ if attr in type(self).__notproxied__: return False if _oga(self, "__notproxied__") is True: return False return True
python
def _should_proxy(self, attr): """ Determines whether `attr` should be looked up on the proxied object, or the proxy itself. """ if attr in type(self).__notproxied__: return False if _oga(self, "__notproxied__") is True: return False return True
[ "def", "_should_proxy", "(", "self", ",", "attr", ")", ":", "if", "attr", "in", "type", "(", "self", ")", ".", "__notproxied__", ":", "return", "False", "if", "_oga", "(", "self", ",", "\"__notproxied__\"", ")", "is", "True", ":", "return", "False", "return", "True" ]
Determines whether `attr` should be looked up on the proxied object, or the proxy itself.
[ "Determines", "whether", "attr", "should", "be", "looked", "up", "on", "the", "proxied", "object", "or", "the", "proxy", "itself", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/proxytypes.py#L161-L171
gazpachoking/jsonref
proxytypes.py
Proxy.add_proxy_meth
def add_proxy_meth(cls, name, func, arg_pos=0): """ Add a method `name` to the class, which returns the value of `func`, called with the proxied value inserted at `arg_pos` """ @wraps(func) def proxied(self, *args, **kwargs): args = list(args) args.insert(arg_pos, self.__subject__) result = func(*args, **kwargs) return result setattr(cls, name, proxied)
python
def add_proxy_meth(cls, name, func, arg_pos=0): """ Add a method `name` to the class, which returns the value of `func`, called with the proxied value inserted at `arg_pos` """ @wraps(func) def proxied(self, *args, **kwargs): args = list(args) args.insert(arg_pos, self.__subject__) result = func(*args, **kwargs) return result setattr(cls, name, proxied)
[ "def", "add_proxy_meth", "(", "cls", ",", "name", ",", "func", ",", "arg_pos", "=", "0", ")", ":", "@", "wraps", "(", "func", ")", "def", "proxied", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "args", "=", "list", "(", "args", ")", "args", ".", "insert", "(", "arg_pos", ",", "self", ".", "__subject__", ")", "result", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "setattr", "(", "cls", ",", "name", ",", "proxied", ")" ]
Add a method `name` to the class, which returns the value of `func`, called with the proxied value inserted at `arg_pos`
[ "Add", "a", "method", "name", "to", "the", "class", "which", "returns", "the", "value", "of", "func", "called", "with", "the", "proxied", "value", "inserted", "at", "arg_pos" ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/proxytypes.py#L192-L206
gazpachoking/jsonref
jsonref.py
load
def load(fp, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs): """ Drop in replacement for :func:`json.load`, where JSON references are proxied to their referent data. :param fp: File-like object containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.load` """ if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.load(fp, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
python
def load(fp, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs): """ Drop in replacement for :func:`json.load`, where JSON references are proxied to their referent data. :param fp: File-like object containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.load` """ if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.load(fp, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
[ "def", "load", "(", "fp", ",", "base_uri", "=", "\"\"", ",", "loader", "=", "None", ",", "jsonschema", "=", "False", ",", "load_on_repr", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "loader", "is", "None", ":", "loader", "=", "functools", ".", "partial", "(", "jsonloader", ",", "*", "*", "kwargs", ")", "return", "JsonRef", ".", "replace_refs", "(", "json", ".", "load", "(", "fp", ",", "*", "*", "kwargs", ")", ",", "base_uri", "=", "base_uri", ",", "loader", "=", "loader", ",", "jsonschema", "=", "jsonschema", ",", "load_on_repr", "=", "load_on_repr", ",", ")" ]
Drop in replacement for :func:`json.load`, where JSON references are proxied to their referent data. :param fp: File-like object containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.load`
[ "Drop", "in", "replacement", "for", ":", "func", ":", "json", ".", "load", "where", "JSON", "references", "are", "proxied", "to", "their", "referent", "data", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L324-L345
gazpachoking/jsonref
jsonref.py
loads
def loads(s, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs): """ Drop in replacement for :func:`json.loads`, where JSON references are proxied to their referent data. :param s: String containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.loads` """ if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.loads(s, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
python
def loads(s, base_uri="", loader=None, jsonschema=False, load_on_repr=True, **kwargs): """ Drop in replacement for :func:`json.loads`, where JSON references are proxied to their referent data. :param s: String containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.loads` """ if loader is None: loader = functools.partial(jsonloader, **kwargs) return JsonRef.replace_refs( json.loads(s, **kwargs), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
[ "def", "loads", "(", "s", ",", "base_uri", "=", "\"\"", ",", "loader", "=", "None", ",", "jsonschema", "=", "False", ",", "load_on_repr", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "loader", "is", "None", ":", "loader", "=", "functools", ".", "partial", "(", "jsonloader", ",", "*", "*", "kwargs", ")", "return", "JsonRef", ".", "replace_refs", "(", "json", ".", "loads", "(", "s", ",", "*", "*", "kwargs", ")", ",", "base_uri", "=", "base_uri", ",", "loader", "=", "loader", ",", "jsonschema", "=", "jsonschema", ",", "load_on_repr", "=", "load_on_repr", ",", ")" ]
Drop in replacement for :func:`json.loads`, where JSON references are proxied to their referent data. :param s: String containing JSON document :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`. Any other keyword arguments will be passed to :func:`json.loads`
[ "Drop", "in", "replacement", "for", ":", "func", ":", "json", ".", "loads", "where", "JSON", "references", "are", "proxied", "to", "their", "referent", "data", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L348-L369
gazpachoking/jsonref
jsonref.py
load_uri
def load_uri(uri, base_uri=None, loader=None, jsonschema=False, load_on_repr=True): """ Load JSON data from ``uri`` with JSON references proxied to their referent data. :param uri: URI to fetch the JSON from :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs` """ if loader is None: loader = jsonloader if base_uri is None: base_uri = uri return JsonRef.replace_refs( loader(uri), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
python
def load_uri(uri, base_uri=None, loader=None, jsonschema=False, load_on_repr=True): """ Load JSON data from ``uri`` with JSON references proxied to their referent data. :param uri: URI to fetch the JSON from :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs` """ if loader is None: loader = jsonloader if base_uri is None: base_uri = uri return JsonRef.replace_refs( loader(uri), base_uri=base_uri, loader=loader, jsonschema=jsonschema, load_on_repr=load_on_repr, )
[ "def", "load_uri", "(", "uri", ",", "base_uri", "=", "None", ",", "loader", "=", "None", ",", "jsonschema", "=", "False", ",", "load_on_repr", "=", "True", ")", ":", "if", "loader", "is", "None", ":", "loader", "=", "jsonloader", "if", "base_uri", "is", "None", ":", "base_uri", "=", "uri", "return", "JsonRef", ".", "replace_refs", "(", "loader", "(", "uri", ")", ",", "base_uri", "=", "base_uri", ",", "loader", "=", "loader", ",", "jsonschema", "=", "jsonschema", ",", "load_on_repr", "=", "load_on_repr", ",", ")" ]
Load JSON data from ``uri`` with JSON references proxied to their referent data. :param uri: URI to fetch the JSON from :param kwargs: This function takes any of the keyword arguments from :meth:`JsonRef.replace_refs`
[ "Load", "JSON", "data", "from", "uri", "with", "JSON", "references", "proxied", "to", "their", "referent", "data", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L372-L394
gazpachoking/jsonref
jsonref.py
dumps
def dumps(obj, **kwargs): """ Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON formatted string. `JsonRef` objects will be dumped as the original reference object they were created from. :param obj: Object to serialize :param kwargs: Keyword arguments are the same as to :func:`json.dumps` """ kwargs["cls"] = _ref_encoder_factory(kwargs.get("cls", json.JSONEncoder)) return json.dumps(obj, **kwargs)
python
def dumps(obj, **kwargs): """ Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON formatted string. `JsonRef` objects will be dumped as the original reference object they were created from. :param obj: Object to serialize :param kwargs: Keyword arguments are the same as to :func:`json.dumps` """ kwargs["cls"] = _ref_encoder_factory(kwargs.get("cls", json.JSONEncoder)) return json.dumps(obj, **kwargs)
[ "def", "dumps", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"cls\"", "]", "=", "_ref_encoder_factory", "(", "kwargs", ".", "get", "(", "\"cls\"", ",", "json", ".", "JSONEncoder", ")", ")", "return", "json", ".", "dumps", "(", "obj", ",", "*", "*", "kwargs", ")" ]
Serialize `obj`, which may contain :class:`JsonRef` objects, to a JSON formatted string. `JsonRef` objects will be dumped as the original reference object they were created from. :param obj: Object to serialize :param kwargs: Keyword arguments are the same as to :func:`json.dumps`
[ "Serialize", "obj", "which", "may", "contain", ":", "class", ":", "JsonRef", "objects", "to", "a", "JSON", "formatted", "string", ".", "JsonRef", "objects", "will", "be", "dumped", "as", "the", "original", "reference", "object", "they", "were", "created", "from", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L413-L424
gazpachoking/jsonref
jsonref.py
JsonRef.replace_refs
def replace_refs(cls, obj, _recursive=False, **kwargs): """ Returns a deep copy of `obj` with all contained JSON reference objects replaced with :class:`JsonRef` instances. :param obj: If this is a JSON reference object, a :class:`JsonRef` instance will be created. If `obj` is not a JSON reference object, a deep copy of it will be created with all contained JSON reference objects replaced by :class:`JsonRef` instances :param base_uri: URI to resolve relative references against :param loader: Callable that takes a URI and returns the parsed JSON (defaults to global ``jsonloader``, a :class:`JsonLoader` instance) :param jsonschema: Flag to turn on `JSON Schema mode <http://json-schema.org/latest/json-schema-core.html#anchor25>`_. 'id' keyword changes the `base_uri` for references contained within the object :param load_on_repr: If set to ``False``, :func:`repr` call on a :class:`JsonRef` object will not cause the reference to be loaded if it hasn't already. (defaults to ``True``) """ store = kwargs.setdefault("_store", _URIDict()) base_uri, frag = urlparse.urldefrag(kwargs.get("base_uri", "")) store_uri = None # If this does not get set, we won't store the result if not frag and not _recursive: store_uri = base_uri try: if kwargs.get("jsonschema") and isinstance(obj["id"], basestring): kwargs["base_uri"] = urlparse.urljoin( kwargs.get("base_uri", ""), obj["id"] ) store_uri = kwargs["base_uri"] except (TypeError, LookupError): pass try: if not isinstance(obj["$ref"], basestring): raise TypeError except (TypeError, LookupError): pass else: return cls(obj, **kwargs) # If our obj was not a json reference object, iterate through it, # replacing children with JsonRefs kwargs["_recursive"] = True path = list(kwargs.pop("_path", ())) if isinstance(obj, Mapping): obj = type(obj)( (k, cls.replace_refs(v, _path=path + [k], **kwargs)) for k, v in iteritems(obj) ) elif isinstance(obj, Sequence) and not isinstance(obj, basestring): obj = type(obj)( cls.replace_refs(v, _path=path + [i], **kwargs) for i, v in enumerate(obj) ) if store_uri is not None: store[store_uri] = obj return obj
python
def replace_refs(cls, obj, _recursive=False, **kwargs): """ Returns a deep copy of `obj` with all contained JSON reference objects replaced with :class:`JsonRef` instances. :param obj: If this is a JSON reference object, a :class:`JsonRef` instance will be created. If `obj` is not a JSON reference object, a deep copy of it will be created with all contained JSON reference objects replaced by :class:`JsonRef` instances :param base_uri: URI to resolve relative references against :param loader: Callable that takes a URI and returns the parsed JSON (defaults to global ``jsonloader``, a :class:`JsonLoader` instance) :param jsonschema: Flag to turn on `JSON Schema mode <http://json-schema.org/latest/json-schema-core.html#anchor25>`_. 'id' keyword changes the `base_uri` for references contained within the object :param load_on_repr: If set to ``False``, :func:`repr` call on a :class:`JsonRef` object will not cause the reference to be loaded if it hasn't already. (defaults to ``True``) """ store = kwargs.setdefault("_store", _URIDict()) base_uri, frag = urlparse.urldefrag(kwargs.get("base_uri", "")) store_uri = None # If this does not get set, we won't store the result if not frag and not _recursive: store_uri = base_uri try: if kwargs.get("jsonschema") and isinstance(obj["id"], basestring): kwargs["base_uri"] = urlparse.urljoin( kwargs.get("base_uri", ""), obj["id"] ) store_uri = kwargs["base_uri"] except (TypeError, LookupError): pass try: if not isinstance(obj["$ref"], basestring): raise TypeError except (TypeError, LookupError): pass else: return cls(obj, **kwargs) # If our obj was not a json reference object, iterate through it, # replacing children with JsonRefs kwargs["_recursive"] = True path = list(kwargs.pop("_path", ())) if isinstance(obj, Mapping): obj = type(obj)( (k, cls.replace_refs(v, _path=path + [k], **kwargs)) for k, v in iteritems(obj) ) elif isinstance(obj, Sequence) and not isinstance(obj, basestring): obj = type(obj)( cls.replace_refs(v, _path=path + [i], **kwargs) for i, v in enumerate(obj) ) if store_uri is not None: store[store_uri] = obj return obj
[ "def", "replace_refs", "(", "cls", ",", "obj", ",", "_recursive", "=", "False", ",", "*", "*", "kwargs", ")", ":", "store", "=", "kwargs", ".", "setdefault", "(", "\"_store\"", ",", "_URIDict", "(", ")", ")", "base_uri", ",", "frag", "=", "urlparse", ".", "urldefrag", "(", "kwargs", ".", "get", "(", "\"base_uri\"", ",", "\"\"", ")", ")", "store_uri", "=", "None", "# If this does not get set, we won't store the result", "if", "not", "frag", "and", "not", "_recursive", ":", "store_uri", "=", "base_uri", "try", ":", "if", "kwargs", ".", "get", "(", "\"jsonschema\"", ")", "and", "isinstance", "(", "obj", "[", "\"id\"", "]", ",", "basestring", ")", ":", "kwargs", "[", "\"base_uri\"", "]", "=", "urlparse", ".", "urljoin", "(", "kwargs", ".", "get", "(", "\"base_uri\"", ",", "\"\"", ")", ",", "obj", "[", "\"id\"", "]", ")", "store_uri", "=", "kwargs", "[", "\"base_uri\"", "]", "except", "(", "TypeError", ",", "LookupError", ")", ":", "pass", "try", ":", "if", "not", "isinstance", "(", "obj", "[", "\"$ref\"", "]", ",", "basestring", ")", ":", "raise", "TypeError", "except", "(", "TypeError", ",", "LookupError", ")", ":", "pass", "else", ":", "return", "cls", "(", "obj", ",", "*", "*", "kwargs", ")", "# If our obj was not a json reference object, iterate through it,", "# replacing children with JsonRefs", "kwargs", "[", "\"_recursive\"", "]", "=", "True", "path", "=", "list", "(", "kwargs", ".", "pop", "(", "\"_path\"", ",", "(", ")", ")", ")", "if", "isinstance", "(", "obj", ",", "Mapping", ")", ":", "obj", "=", "type", "(", "obj", ")", "(", "(", "k", ",", "cls", ".", "replace_refs", "(", "v", ",", "_path", "=", "path", "+", "[", "k", "]", ",", "*", "*", "kwargs", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "obj", ")", ")", "elif", "isinstance", "(", "obj", ",", "Sequence", ")", "and", "not", "isinstance", "(", "obj", ",", "basestring", ")", ":", "obj", "=", "type", "(", "obj", ")", "(", "cls", ".", "replace_refs", "(", "v", ",", "_path", "=", "path", "+", "[", "i", "]", ",", "*", "*", "kwargs", ")", "for", "i", ",", "v", "in", "enumerate", "(", "obj", ")", ")", "if", "store_uri", "is", "not", "None", ":", "store", "[", "store_uri", "]", "=", "obj", "return", "obj" ]
Returns a deep copy of `obj` with all contained JSON reference objects replaced with :class:`JsonRef` instances. :param obj: If this is a JSON reference object, a :class:`JsonRef` instance will be created. If `obj` is not a JSON reference object, a deep copy of it will be created with all contained JSON reference objects replaced by :class:`JsonRef` instances :param base_uri: URI to resolve relative references against :param loader: Callable that takes a URI and returns the parsed JSON (defaults to global ``jsonloader``, a :class:`JsonLoader` instance) :param jsonschema: Flag to turn on `JSON Schema mode <http://json-schema.org/latest/json-schema-core.html#anchor25>`_. 'id' keyword changes the `base_uri` for references contained within the object :param load_on_repr: If set to ``False``, :func:`repr` call on a :class:`JsonRef` object will not cause the reference to be loaded if it hasn't already. (defaults to ``True``)
[ "Returns", "a", "deep", "copy", "of", "obj", "with", "all", "contained", "JSON", "reference", "objects", "replaced", "with", ":", "class", ":", "JsonRef", "instances", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L70-L130
gazpachoking/jsonref
jsonref.py
JsonRef.resolve_pointer
def resolve_pointer(self, document, pointer): """ Resolve a json pointer ``pointer`` within the referenced ``document``. :argument document: the referent document :argument str pointer: a json pointer URI fragment to resolve within it """ # Do only split at single forward slashes which are not prefixed by a caret parts = re.split(r"(?<!\^)/", unquote(pointer.lstrip("/"))) if pointer else [] for part in parts: # Restore escaped slashes and carets replacements = {r"^/": r"/", r"^^": r"^"} part = re.sub( "|".join(re.escape(key) for key in replacements.keys()), lambda k: replacements[k.group(0)], part, ) if isinstance(document, Sequence): # Try to turn an array index to an int try: part = int(part) except ValueError: pass try: document = document[part] except (TypeError, LookupError) as e: self._error("Unresolvable JSON pointer: %r" % pointer, cause=e) return document
python
def resolve_pointer(self, document, pointer): """ Resolve a json pointer ``pointer`` within the referenced ``document``. :argument document: the referent document :argument str pointer: a json pointer URI fragment to resolve within it """ # Do only split at single forward slashes which are not prefixed by a caret parts = re.split(r"(?<!\^)/", unquote(pointer.lstrip("/"))) if pointer else [] for part in parts: # Restore escaped slashes and carets replacements = {r"^/": r"/", r"^^": r"^"} part = re.sub( "|".join(re.escape(key) for key in replacements.keys()), lambda k: replacements[k.group(0)], part, ) if isinstance(document, Sequence): # Try to turn an array index to an int try: part = int(part) except ValueError: pass try: document = document[part] except (TypeError, LookupError) as e: self._error("Unresolvable JSON pointer: %r" % pointer, cause=e) return document
[ "def", "resolve_pointer", "(", "self", ",", "document", ",", "pointer", ")", ":", "# Do only split at single forward slashes which are not prefixed by a caret", "parts", "=", "re", ".", "split", "(", "r\"(?<!\\^)/\"", ",", "unquote", "(", "pointer", ".", "lstrip", "(", "\"/\"", ")", ")", ")", "if", "pointer", "else", "[", "]", "for", "part", "in", "parts", ":", "# Restore escaped slashes and carets", "replacements", "=", "{", "r\"^/\"", ":", "r\"/\"", ",", "r\"^^\"", ":", "r\"^\"", "}", "part", "=", "re", ".", "sub", "(", "\"|\"", ".", "join", "(", "re", ".", "escape", "(", "key", ")", "for", "key", "in", "replacements", ".", "keys", "(", ")", ")", ",", "lambda", "k", ":", "replacements", "[", "k", ".", "group", "(", "0", ")", "]", ",", "part", ",", ")", "if", "isinstance", "(", "document", ",", "Sequence", ")", ":", "# Try to turn an array index to an int", "try", ":", "part", "=", "int", "(", "part", ")", "except", "ValueError", ":", "pass", "try", ":", "document", "=", "document", "[", "part", "]", "except", "(", "TypeError", ",", "LookupError", ")", "as", "e", ":", "self", ".", "_error", "(", "\"Unresolvable JSON pointer: %r\"", "%", "pointer", ",", "cause", "=", "e", ")", "return", "document" ]
Resolve a json pointer ``pointer`` within the referenced ``document``. :argument document: the referent document :argument str pointer: a json pointer URI fragment to resolve within it
[ "Resolve", "a", "json", "pointer", "pointer", "within", "the", "referenced", "document", "." ]
train
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L191-L220
Iotic-Labs/py-ubjson
ubjson/encoder.py
dump
def dump(obj, fp, container_count=False, sort_keys=False, no_float32=True, default=None): """Writes the given object as UBJSON to the provided file-like object Args: obj: The object to encode fp: write([size])-able object container_count (bool): Specify length for container types (including for empty ones). This can aid decoding speed depending on implementation but requires a bit more space and encoding speed could be reduced if getting length of any of the containers is expensive. sort_keys (bool): Sort keys of mappings no_float32 (bool): Never use float32 to store float numbers (other than for zero). Disabling this might save space at the loss of precision. default (callable): Called for objects which cannot be serialised. Should return a UBJSON-encodable version of the object or raise an EncoderException. Raises: EncoderException: If an encoding failure occured. The following Python types and interfaces (ABCs) are supported (as are any subclasses): +------------------------------+-----------------------------------+ | Python | UBJSON | +==============================+===================================+ | (3) str | string | | (2) unicode | | +------------------------------+-----------------------------------+ | None | null | +------------------------------+-----------------------------------+ | bool | true, false | +------------------------------+-----------------------------------+ | (3) int | uint8, int8, int16, int32, int64, | | (2) int, long | high_precision | +------------------------------+-----------------------------------+ | float | float32, float64, high_precision | +------------------------------+-----------------------------------+ | Decimal | high_precision | +------------------------------+-----------------------------------+ | (3) bytes, bytearray | array (type, uint8) | | (2) str | array (type, uint8) | +------------------------------+-----------------------------------+ | (3) collections.abc.Mapping | object | | (2) collections.Mapping | | +------------------------------+-----------------------------------+ | (3) collections.abc.Sequence | array | | (2) collections.Sequence | | +------------------------------+-----------------------------------+ Notes: - Items are resolved in the order of this table, e.g. if the item implements both Mapping and Sequence interfaces, it will be encoded as a mapping. - None and bool do not use an isinstance check - Numbers in brackets denote Python version. - Only unicode strings in Python 2 are encoded as strings, i.e. for compatibility with e.g. Python 3 one MUST NOT use str in Python 2 (as that will be interpreted as a byte array). - Mapping keys have to be strings: str for Python3 and unicode or str in Python 2. - float conversion rules (depending on no_float32 setting): float32: 1.18e-38 <= abs(value) <= 3.4e38 or value == 0 float64: 2.23e-308 <= abs(value) < 1.8e308 For other values Decimal is used. """ if not callable(fp.write): raise TypeError('fp.write not callable') fp_write = fp.write __encode_value(fp_write, obj, {}, container_count, sort_keys, no_float32, default)
python
def dump(obj, fp, container_count=False, sort_keys=False, no_float32=True, default=None): """Writes the given object as UBJSON to the provided file-like object Args: obj: The object to encode fp: write([size])-able object container_count (bool): Specify length for container types (including for empty ones). This can aid decoding speed depending on implementation but requires a bit more space and encoding speed could be reduced if getting length of any of the containers is expensive. sort_keys (bool): Sort keys of mappings no_float32 (bool): Never use float32 to store float numbers (other than for zero). Disabling this might save space at the loss of precision. default (callable): Called for objects which cannot be serialised. Should return a UBJSON-encodable version of the object or raise an EncoderException. Raises: EncoderException: If an encoding failure occured. The following Python types and interfaces (ABCs) are supported (as are any subclasses): +------------------------------+-----------------------------------+ | Python | UBJSON | +==============================+===================================+ | (3) str | string | | (2) unicode | | +------------------------------+-----------------------------------+ | None | null | +------------------------------+-----------------------------------+ | bool | true, false | +------------------------------+-----------------------------------+ | (3) int | uint8, int8, int16, int32, int64, | | (2) int, long | high_precision | +------------------------------+-----------------------------------+ | float | float32, float64, high_precision | +------------------------------+-----------------------------------+ | Decimal | high_precision | +------------------------------+-----------------------------------+ | (3) bytes, bytearray | array (type, uint8) | | (2) str | array (type, uint8) | +------------------------------+-----------------------------------+ | (3) collections.abc.Mapping | object | | (2) collections.Mapping | | +------------------------------+-----------------------------------+ | (3) collections.abc.Sequence | array | | (2) collections.Sequence | | +------------------------------+-----------------------------------+ Notes: - Items are resolved in the order of this table, e.g. if the item implements both Mapping and Sequence interfaces, it will be encoded as a mapping. - None and bool do not use an isinstance check - Numbers in brackets denote Python version. - Only unicode strings in Python 2 are encoded as strings, i.e. for compatibility with e.g. Python 3 one MUST NOT use str in Python 2 (as that will be interpreted as a byte array). - Mapping keys have to be strings: str for Python3 and unicode or str in Python 2. - float conversion rules (depending on no_float32 setting): float32: 1.18e-38 <= abs(value) <= 3.4e38 or value == 0 float64: 2.23e-308 <= abs(value) < 1.8e308 For other values Decimal is used. """ if not callable(fp.write): raise TypeError('fp.write not callable') fp_write = fp.write __encode_value(fp_write, obj, {}, container_count, sort_keys, no_float32, default)
[ "def", "dump", "(", "obj", ",", "fp", ",", "container_count", "=", "False", ",", "sort_keys", "=", "False", ",", "no_float32", "=", "True", ",", "default", "=", "None", ")", ":", "if", "not", "callable", "(", "fp", ".", "write", ")", ":", "raise", "TypeError", "(", "'fp.write not callable'", ")", "fp_write", "=", "fp", ".", "write", "__encode_value", "(", "fp_write", ",", "obj", ",", "{", "}", ",", "container_count", ",", "sort_keys", ",", "no_float32", ",", "default", ")" ]
Writes the given object as UBJSON to the provided file-like object Args: obj: The object to encode fp: write([size])-able object container_count (bool): Specify length for container types (including for empty ones). This can aid decoding speed depending on implementation but requires a bit more space and encoding speed could be reduced if getting length of any of the containers is expensive. sort_keys (bool): Sort keys of mappings no_float32 (bool): Never use float32 to store float numbers (other than for zero). Disabling this might save space at the loss of precision. default (callable): Called for objects which cannot be serialised. Should return a UBJSON-encodable version of the object or raise an EncoderException. Raises: EncoderException: If an encoding failure occured. The following Python types and interfaces (ABCs) are supported (as are any subclasses): +------------------------------+-----------------------------------+ | Python | UBJSON | +==============================+===================================+ | (3) str | string | | (2) unicode | | +------------------------------+-----------------------------------+ | None | null | +------------------------------+-----------------------------------+ | bool | true, false | +------------------------------+-----------------------------------+ | (3) int | uint8, int8, int16, int32, int64, | | (2) int, long | high_precision | +------------------------------+-----------------------------------+ | float | float32, float64, high_precision | +------------------------------+-----------------------------------+ | Decimal | high_precision | +------------------------------+-----------------------------------+ | (3) bytes, bytearray | array (type, uint8) | | (2) str | array (type, uint8) | +------------------------------+-----------------------------------+ | (3) collections.abc.Mapping | object | | (2) collections.Mapping | | +------------------------------+-----------------------------------+ | (3) collections.abc.Sequence | array | | (2) collections.Sequence | | +------------------------------+-----------------------------------+ Notes: - Items are resolved in the order of this table, e.g. if the item implements both Mapping and Sequence interfaces, it will be encoded as a mapping. - None and bool do not use an isinstance check - Numbers in brackets denote Python version. - Only unicode strings in Python 2 are encoded as strings, i.e. for compatibility with e.g. Python 3 one MUST NOT use str in Python 2 (as that will be interpreted as a byte array). - Mapping keys have to be strings: str for Python3 and unicode or str in Python 2. - float conversion rules (depending on no_float32 setting): float32: 1.18e-38 <= abs(value) <= 3.4e38 or value == 0 float64: 2.23e-308 <= abs(value) < 1.8e308 For other values Decimal is used.
[ "Writes", "the", "given", "object", "as", "UBJSON", "to", "the", "provided", "file", "-", "like", "object" ]
train
https://github.com/Iotic-Labs/py-ubjson/blob/80dcacbc7bba1759c69759fb3109ac1c6574da68/ubjson/encoder.py#L233-L305
Iotic-Labs/py-ubjson
ubjson/encoder.py
dumpb
def dumpb(obj, container_count=False, sort_keys=False, no_float32=True, default=None): """Returns the given object as UBJSON in a bytes instance. See dump() for available arguments.""" with BytesIO() as fp: dump(obj, fp, container_count=container_count, sort_keys=sort_keys, no_float32=no_float32, default=default) return fp.getvalue()
python
def dumpb(obj, container_count=False, sort_keys=False, no_float32=True, default=None): """Returns the given object as UBJSON in a bytes instance. See dump() for available arguments.""" with BytesIO() as fp: dump(obj, fp, container_count=container_count, sort_keys=sort_keys, no_float32=no_float32, default=default) return fp.getvalue()
[ "def", "dumpb", "(", "obj", ",", "container_count", "=", "False", ",", "sort_keys", "=", "False", ",", "no_float32", "=", "True", ",", "default", "=", "None", ")", ":", "with", "BytesIO", "(", ")", "as", "fp", ":", "dump", "(", "obj", ",", "fp", ",", "container_count", "=", "container_count", ",", "sort_keys", "=", "sort_keys", ",", "no_float32", "=", "no_float32", ",", "default", "=", "default", ")", "return", "fp", ".", "getvalue", "(", ")" ]
Returns the given object as UBJSON in a bytes instance. See dump() for available arguments.
[ "Returns", "the", "given", "object", "as", "UBJSON", "in", "a", "bytes", "instance", ".", "See", "dump", "()", "for", "available", "arguments", "." ]
train
https://github.com/Iotic-Labs/py-ubjson/blob/80dcacbc7bba1759c69759fb3109ac1c6574da68/ubjson/encoder.py#L308-L313
Iotic-Labs/py-ubjson
ez_setup.py
_resolve_version
def _resolve_version(version): """ Resolve LATEST version """ if version is not LATEST: return version resp = urlopen('https://pypi.python.org/pypi/setuptools/json') with contextlib.closing(resp): try: charset = resp.info().get_content_charset() except Exception: # Python 2 compat; assume UTF-8 charset = 'UTF-8' reader = codecs.getreader(charset) doc = json.load(reader(resp)) return str(doc['info']['version'])
python
def _resolve_version(version): """ Resolve LATEST version """ if version is not LATEST: return version resp = urlopen('https://pypi.python.org/pypi/setuptools/json') with contextlib.closing(resp): try: charset = resp.info().get_content_charset() except Exception: # Python 2 compat; assume UTF-8 charset = 'UTF-8' reader = codecs.getreader(charset) doc = json.load(reader(resp)) return str(doc['info']['version'])
[ "def", "_resolve_version", "(", "version", ")", ":", "if", "version", "is", "not", "LATEST", ":", "return", "version", "resp", "=", "urlopen", "(", "'https://pypi.python.org/pypi/setuptools/json'", ")", "with", "contextlib", ".", "closing", "(", "resp", ")", ":", "try", ":", "charset", "=", "resp", ".", "info", "(", ")", ".", "get_content_charset", "(", ")", "except", "Exception", ":", "# Python 2 compat; assume UTF-8", "charset", "=", "'UTF-8'", "reader", "=", "codecs", ".", "getreader", "(", "charset", ")", "doc", "=", "json", ".", "load", "(", "reader", "(", "resp", ")", ")", "return", "str", "(", "doc", "[", "'info'", "]", "[", "'version'", "]", ")" ]
Resolve LATEST version
[ "Resolve", "LATEST", "version" ]
train
https://github.com/Iotic-Labs/py-ubjson/blob/80dcacbc7bba1759c69759fb3109ac1c6574da68/ez_setup.py#L340-L357
Iotic-Labs/py-ubjson
ubjson/decoder.py
load
def load(fp, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False): """Decodes and returns UBJSON from the given file-like object Args: fp: read([size])-able object no_bytes (bool): If set, typed UBJSON arrays (uint8) will not be converted to a bytes instance and instead treated like any other array (i.e. result in a list). object_hook (callable): Called with the result of any object literal decoded (instead of dict). object_pairs_hook (callable): Called with the result of any object literal decoded with an ordered list of pairs (instead of dict). Takes precedence over object_hook. intern_object_keys (bool): If set, object keys are interned which can provide a memory saving when many repeated keys are used. NOTE: This is not supported in Python2 (since interning does not apply to unicode) and wil be ignored. Returns: Decoded object Raises: DecoderException: If an encoding failure occured. UBJSON types are mapped to Python types as follows. Numbers in brackets denote Python version. +----------------------------------+---------------+ | UBJSON | Python | +==================================+===============+ | object | dict | +----------------------------------+---------------+ | array | list | +----------------------------------+---------------+ | string | (3) str | | | (2) unicode | +----------------------------------+---------------+ | uint8, int8, int16, int32, int64 | (3) int | | | (2) int, long | +----------------------------------+---------------+ | float32, float64 | float | +----------------------------------+---------------+ | high_precision | Decimal | +----------------------------------+---------------+ | array (typed, uint8) | (3) bytes | | | (2) str | +----------------------------------+---------------+ | true | True | +----------------------------------+---------------+ | false | False | +----------------------------------+---------------+ | null | None | +----------------------------------+---------------+ """ if object_pairs_hook is None and object_hook is None: object_hook = __object_hook_noop if not callable(fp.read): raise TypeError('fp.read not callable') fp_read = fp.read marker = fp_read(1) try: try: return __METHOD_MAP[marker](fp_read, marker) except KeyError: pass if marker == ARRAY_START: return __decode_array(fp_read, bool(no_bytes), object_hook, object_pairs_hook, intern_object_keys) elif marker == OBJECT_START: return __decode_object(fp_read, bool(no_bytes), object_hook, object_pairs_hook, intern_object_keys) else: raise DecoderException('Invalid marker') except DecoderException as ex: raise_from(DecoderException(ex.args[0], fp), ex)
python
def load(fp, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False): """Decodes and returns UBJSON from the given file-like object Args: fp: read([size])-able object no_bytes (bool): If set, typed UBJSON arrays (uint8) will not be converted to a bytes instance and instead treated like any other array (i.e. result in a list). object_hook (callable): Called with the result of any object literal decoded (instead of dict). object_pairs_hook (callable): Called with the result of any object literal decoded with an ordered list of pairs (instead of dict). Takes precedence over object_hook. intern_object_keys (bool): If set, object keys are interned which can provide a memory saving when many repeated keys are used. NOTE: This is not supported in Python2 (since interning does not apply to unicode) and wil be ignored. Returns: Decoded object Raises: DecoderException: If an encoding failure occured. UBJSON types are mapped to Python types as follows. Numbers in brackets denote Python version. +----------------------------------+---------------+ | UBJSON | Python | +==================================+===============+ | object | dict | +----------------------------------+---------------+ | array | list | +----------------------------------+---------------+ | string | (3) str | | | (2) unicode | +----------------------------------+---------------+ | uint8, int8, int16, int32, int64 | (3) int | | | (2) int, long | +----------------------------------+---------------+ | float32, float64 | float | +----------------------------------+---------------+ | high_precision | Decimal | +----------------------------------+---------------+ | array (typed, uint8) | (3) bytes | | | (2) str | +----------------------------------+---------------+ | true | True | +----------------------------------+---------------+ | false | False | +----------------------------------+---------------+ | null | None | +----------------------------------+---------------+ """ if object_pairs_hook is None and object_hook is None: object_hook = __object_hook_noop if not callable(fp.read): raise TypeError('fp.read not callable') fp_read = fp.read marker = fp_read(1) try: try: return __METHOD_MAP[marker](fp_read, marker) except KeyError: pass if marker == ARRAY_START: return __decode_array(fp_read, bool(no_bytes), object_hook, object_pairs_hook, intern_object_keys) elif marker == OBJECT_START: return __decode_object(fp_read, bool(no_bytes), object_hook, object_pairs_hook, intern_object_keys) else: raise DecoderException('Invalid marker') except DecoderException as ex: raise_from(DecoderException(ex.args[0], fp), ex)
[ "def", "load", "(", "fp", ",", "no_bytes", "=", "False", ",", "object_hook", "=", "None", ",", "object_pairs_hook", "=", "None", ",", "intern_object_keys", "=", "False", ")", ":", "if", "object_pairs_hook", "is", "None", "and", "object_hook", "is", "None", ":", "object_hook", "=", "__object_hook_noop", "if", "not", "callable", "(", "fp", ".", "read", ")", ":", "raise", "TypeError", "(", "'fp.read not callable'", ")", "fp_read", "=", "fp", ".", "read", "marker", "=", "fp_read", "(", "1", ")", "try", ":", "try", ":", "return", "__METHOD_MAP", "[", "marker", "]", "(", "fp_read", ",", "marker", ")", "except", "KeyError", ":", "pass", "if", "marker", "==", "ARRAY_START", ":", "return", "__decode_array", "(", "fp_read", ",", "bool", "(", "no_bytes", ")", ",", "object_hook", ",", "object_pairs_hook", ",", "intern_object_keys", ")", "elif", "marker", "==", "OBJECT_START", ":", "return", "__decode_object", "(", "fp_read", ",", "bool", "(", "no_bytes", ")", ",", "object_hook", ",", "object_pairs_hook", ",", "intern_object_keys", ")", "else", ":", "raise", "DecoderException", "(", "'Invalid marker'", ")", "except", "DecoderException", "as", "ex", ":", "raise_from", "(", "DecoderException", "(", "ex", ".", "args", "[", "0", "]", ",", "fp", ")", ",", "ex", ")" ]
Decodes and returns UBJSON from the given file-like object Args: fp: read([size])-able object no_bytes (bool): If set, typed UBJSON arrays (uint8) will not be converted to a bytes instance and instead treated like any other array (i.e. result in a list). object_hook (callable): Called with the result of any object literal decoded (instead of dict). object_pairs_hook (callable): Called with the result of any object literal decoded with an ordered list of pairs (instead of dict). Takes precedence over object_hook. intern_object_keys (bool): If set, object keys are interned which can provide a memory saving when many repeated keys are used. NOTE: This is not supported in Python2 (since interning does not apply to unicode) and wil be ignored. Returns: Decoded object Raises: DecoderException: If an encoding failure occured. UBJSON types are mapped to Python types as follows. Numbers in brackets denote Python version. +----------------------------------+---------------+ | UBJSON | Python | +==================================+===============+ | object | dict | +----------------------------------+---------------+ | array | list | +----------------------------------+---------------+ | string | (3) str | | | (2) unicode | +----------------------------------+---------------+ | uint8, int8, int16, int32, int64 | (3) int | | | (2) int, long | +----------------------------------+---------------+ | float32, float64 | float | +----------------------------------+---------------+ | high_precision | Decimal | +----------------------------------+---------------+ | array (typed, uint8) | (3) bytes | | | (2) str | +----------------------------------+---------------+ | true | True | +----------------------------------+---------------+ | false | False | +----------------------------------+---------------+ | null | None | +----------------------------------+---------------+
[ "Decodes", "and", "returns", "UBJSON", "from", "the", "given", "file", "-", "like", "object" ]
train
https://github.com/Iotic-Labs/py-ubjson/blob/80dcacbc7bba1759c69759fb3109ac1c6574da68/ubjson/decoder.py#L307-L383
Iotic-Labs/py-ubjson
ubjson/decoder.py
loadb
def loadb(chars, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False): """Decodes and returns UBJSON from the given bytes or bytesarray object. See load() for available arguments.""" with BytesIO(chars) as fp: return load(fp, no_bytes=no_bytes, object_hook=object_hook, object_pairs_hook=object_pairs_hook, intern_object_keys=intern_object_keys)
python
def loadb(chars, no_bytes=False, object_hook=None, object_pairs_hook=None, intern_object_keys=False): """Decodes and returns UBJSON from the given bytes or bytesarray object. See load() for available arguments.""" with BytesIO(chars) as fp: return load(fp, no_bytes=no_bytes, object_hook=object_hook, object_pairs_hook=object_pairs_hook, intern_object_keys=intern_object_keys)
[ "def", "loadb", "(", "chars", ",", "no_bytes", "=", "False", ",", "object_hook", "=", "None", ",", "object_pairs_hook", "=", "None", ",", "intern_object_keys", "=", "False", ")", ":", "with", "BytesIO", "(", "chars", ")", "as", "fp", ":", "return", "load", "(", "fp", ",", "no_bytes", "=", "no_bytes", ",", "object_hook", "=", "object_hook", ",", "object_pairs_hook", "=", "object_pairs_hook", ",", "intern_object_keys", "=", "intern_object_keys", ")" ]
Decodes and returns UBJSON from the given bytes or bytesarray object. See load() for available arguments.
[ "Decodes", "and", "returns", "UBJSON", "from", "the", "given", "bytes", "or", "bytesarray", "object", ".", "See", "load", "()", "for", "available", "arguments", "." ]
train
https://github.com/Iotic-Labs/py-ubjson/blob/80dcacbc7bba1759c69759fb3109ac1c6574da68/ubjson/decoder.py#L386-L391
jborean93/requests-credssp
requests_credssp/asn_structures.py
TSRequest.check_error_code
def check_error_code(self): """ For CredSSP version of 3 or newer, the server can response with an NtStatus error code with details of what error occurred. This method will check if the error code exists and throws an NTStatusException if it is no STATUS_SUCCESS. """ # start off with STATUS_SUCCESS as a baseline status = NtStatusCodes.STATUS_SUCCESS error_code = self['errorCode'] if error_code.isValue: # ASN.1 Integer is stored as an signed integer, we need to # convert it to a unsigned integer status = ctypes.c_uint32(error_code).value if status != NtStatusCodes.STATUS_SUCCESS: raise NTStatusException(status)
python
def check_error_code(self): """ For CredSSP version of 3 or newer, the server can response with an NtStatus error code with details of what error occurred. This method will check if the error code exists and throws an NTStatusException if it is no STATUS_SUCCESS. """ # start off with STATUS_SUCCESS as a baseline status = NtStatusCodes.STATUS_SUCCESS error_code = self['errorCode'] if error_code.isValue: # ASN.1 Integer is stored as an signed integer, we need to # convert it to a unsigned integer status = ctypes.c_uint32(error_code).value if status != NtStatusCodes.STATUS_SUCCESS: raise NTStatusException(status)
[ "def", "check_error_code", "(", "self", ")", ":", "# start off with STATUS_SUCCESS as a baseline", "status", "=", "NtStatusCodes", ".", "STATUS_SUCCESS", "error_code", "=", "self", "[", "'errorCode'", "]", "if", "error_code", ".", "isValue", ":", "# ASN.1 Integer is stored as an signed integer, we need to", "# convert it to a unsigned integer", "status", "=", "ctypes", ".", "c_uint32", "(", "error_code", ")", ".", "value", "if", "status", "!=", "NtStatusCodes", ".", "STATUS_SUCCESS", ":", "raise", "NTStatusException", "(", "status", ")" ]
For CredSSP version of 3 or newer, the server can response with an NtStatus error code with details of what error occurred. This method will check if the error code exists and throws an NTStatusException if it is no STATUS_SUCCESS.
[ "For", "CredSSP", "version", "of", "3", "or", "newer", "the", "server", "can", "response", "with", "an", "NtStatus", "error", "code", "with", "details", "of", "what", "error", "occurred", ".", "This", "method", "will", "check", "if", "the", "error", "code", "exists", "and", "throws", "an", "NTStatusException", "if", "it", "is", "no", "STATUS_SUCCESS", "." ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/asn_structures.py#L108-L125
jborean93/requests-credssp
requests_credssp/spnego.py
get_auth_context
def get_auth_context(hostname, username, password, auth_mech): """ Returns an AuthContext used in the CredSSP authentication process and to wrap/unwrap tokens sent to and from the client. This step get's the context based on the auth_mech configured and what is available on the server. It tries to favour system libraries like SSPI (Windows) or GSSAPI (Unix) if possible but falls back to a Python implementation of NTLM that works on all platforms. While in some cases the system libraries are used and they may not require a password to authenticate, CredSSP requires the password as it is sent to the remote host and so we won't rely on the user's logon tokens. :param hostname: The hostname of the server, this should be the FQDN when kerberos is desired :param username: The username to authenticate with :param password: The password of username :param auth_mech: The authentication mechanism to use; auto: Uses the SPNEGO/Negotiate mechanism which tries Kerberos if possible and then falls back to NTLM kerberos: Only allow authentication with Kerberos ntlm: Only allow authentication with NTLM :return: tuple AuthContext: The authentication context chosen that has been init generator step: The Python generator that takes further input tokens and produces output tokens to send to the server bytes first token: The first output token to send to the server """ if auth_mech not in ["auto", "ntlm", "kerberos"]: raise InvalidConfigurationException("Invalid auth_mech supplied " "%s, must be auto, ntlm, or " "kerberos" % auth_mech) context_init = False out_token = None context_gen = None if HAS_SSPI: # always use SSPI when it is available log.debug("SSPI is available and will be used as auth backend") context = SSPIContext(hostname, username, password, auth_mech) elif HAS_GSSAPI: mechs_available = ["kerberos"] # to save on computing costs we only check the mechs that are available # when auth_mech is auto or ntlm as it doesn't matter when kerberos # is set (Kerberos is always available when python-gssapi is installed if auth_mech != "kerberos": log.debug("GSSAPI is available, determine what mechanism to use " "as auth backend") mechs_available = GSSAPIContext.get_mechs_available() log.debug("GSSAPI mechs available: %s" % ", ".join(mechs_available)) if auth_mech in mechs_available or auth_mech == "kerberos": log.debug("GSSAPI with mech %s is being used as auth backend" % auth_mech) context = GSSAPIContext(hostname, username, password, auth_mech) elif auth_mech == "ntlm": log.debug("GSSAPI is available but does not support NTLM, using " "ntlm-auth as auth backend instead") context = NTLMContext(username, password) else: # make sure we can actually initialise a GSSAPI context in auto, # otherwise fallback to NTLMContext if that fails # we need to explicitly set auth_mech as kerberos if the GSSAPI # implementation does not support NTLM so we know to use NTLM if # GSSAPI fails try: log.debug("Attempting to use GSSAPI Kerberos as auth backend") context = GSSAPIContext(hostname, username, password, "kerberos") context.init_context() context_gen = context.step() out_token = next(context_gen) context_init = True log.info("GSSAPI with mech kerberos is being used as auth " "backend") except gssapi.exceptions.GSSError as err: log.warning("Failed to initialise GSSAPI context, falling " "back to NTLM: %s" % str(err)) context = NTLMContext(username, password) else: log.debug("SSPI or GSSAPI is not available, using ntlm-auth as auth " "backend") if auth_mech == "kerberos": raise InvalidConfigurationException("The auth_mechanism is set " "to kerberos but SSPI or " "GSSAPI is not available") context = NTLMContext(username, password) # we only init the context when HAS_GSSAPI and it doesn't natively offer # SPNEGO that works with Windows, so let's init it here if not context_init: context.init_context() context_gen = context.step() out_token = next(context_gen) return context, context_gen, out_token
python
def get_auth_context(hostname, username, password, auth_mech): """ Returns an AuthContext used in the CredSSP authentication process and to wrap/unwrap tokens sent to and from the client. This step get's the context based on the auth_mech configured and what is available on the server. It tries to favour system libraries like SSPI (Windows) or GSSAPI (Unix) if possible but falls back to a Python implementation of NTLM that works on all platforms. While in some cases the system libraries are used and they may not require a password to authenticate, CredSSP requires the password as it is sent to the remote host and so we won't rely on the user's logon tokens. :param hostname: The hostname of the server, this should be the FQDN when kerberos is desired :param username: The username to authenticate with :param password: The password of username :param auth_mech: The authentication mechanism to use; auto: Uses the SPNEGO/Negotiate mechanism which tries Kerberos if possible and then falls back to NTLM kerberos: Only allow authentication with Kerberos ntlm: Only allow authentication with NTLM :return: tuple AuthContext: The authentication context chosen that has been init generator step: The Python generator that takes further input tokens and produces output tokens to send to the server bytes first token: The first output token to send to the server """ if auth_mech not in ["auto", "ntlm", "kerberos"]: raise InvalidConfigurationException("Invalid auth_mech supplied " "%s, must be auto, ntlm, or " "kerberos" % auth_mech) context_init = False out_token = None context_gen = None if HAS_SSPI: # always use SSPI when it is available log.debug("SSPI is available and will be used as auth backend") context = SSPIContext(hostname, username, password, auth_mech) elif HAS_GSSAPI: mechs_available = ["kerberos"] # to save on computing costs we only check the mechs that are available # when auth_mech is auto or ntlm as it doesn't matter when kerberos # is set (Kerberos is always available when python-gssapi is installed if auth_mech != "kerberos": log.debug("GSSAPI is available, determine what mechanism to use " "as auth backend") mechs_available = GSSAPIContext.get_mechs_available() log.debug("GSSAPI mechs available: %s" % ", ".join(mechs_available)) if auth_mech in mechs_available or auth_mech == "kerberos": log.debug("GSSAPI with mech %s is being used as auth backend" % auth_mech) context = GSSAPIContext(hostname, username, password, auth_mech) elif auth_mech == "ntlm": log.debug("GSSAPI is available but does not support NTLM, using " "ntlm-auth as auth backend instead") context = NTLMContext(username, password) else: # make sure we can actually initialise a GSSAPI context in auto, # otherwise fallback to NTLMContext if that fails # we need to explicitly set auth_mech as kerberos if the GSSAPI # implementation does not support NTLM so we know to use NTLM if # GSSAPI fails try: log.debug("Attempting to use GSSAPI Kerberos as auth backend") context = GSSAPIContext(hostname, username, password, "kerberos") context.init_context() context_gen = context.step() out_token = next(context_gen) context_init = True log.info("GSSAPI with mech kerberos is being used as auth " "backend") except gssapi.exceptions.GSSError as err: log.warning("Failed to initialise GSSAPI context, falling " "back to NTLM: %s" % str(err)) context = NTLMContext(username, password) else: log.debug("SSPI or GSSAPI is not available, using ntlm-auth as auth " "backend") if auth_mech == "kerberos": raise InvalidConfigurationException("The auth_mechanism is set " "to kerberos but SSPI or " "GSSAPI is not available") context = NTLMContext(username, password) # we only init the context when HAS_GSSAPI and it doesn't natively offer # SPNEGO that works with Windows, so let's init it here if not context_init: context.init_context() context_gen = context.step() out_token = next(context_gen) return context, context_gen, out_token
[ "def", "get_auth_context", "(", "hostname", ",", "username", ",", "password", ",", "auth_mech", ")", ":", "if", "auth_mech", "not", "in", "[", "\"auto\"", ",", "\"ntlm\"", ",", "\"kerberos\"", "]", ":", "raise", "InvalidConfigurationException", "(", "\"Invalid auth_mech supplied \"", "\"%s, must be auto, ntlm, or \"", "\"kerberos\"", "%", "auth_mech", ")", "context_init", "=", "False", "out_token", "=", "None", "context_gen", "=", "None", "if", "HAS_SSPI", ":", "# always use SSPI when it is available", "log", ".", "debug", "(", "\"SSPI is available and will be used as auth backend\"", ")", "context", "=", "SSPIContext", "(", "hostname", ",", "username", ",", "password", ",", "auth_mech", ")", "elif", "HAS_GSSAPI", ":", "mechs_available", "=", "[", "\"kerberos\"", "]", "# to save on computing costs we only check the mechs that are available", "# when auth_mech is auto or ntlm as it doesn't matter when kerberos", "# is set (Kerberos is always available when python-gssapi is installed", "if", "auth_mech", "!=", "\"kerberos\"", ":", "log", ".", "debug", "(", "\"GSSAPI is available, determine what mechanism to use \"", "\"as auth backend\"", ")", "mechs_available", "=", "GSSAPIContext", ".", "get_mechs_available", "(", ")", "log", ".", "debug", "(", "\"GSSAPI mechs available: %s\"", "%", "\", \"", ".", "join", "(", "mechs_available", ")", ")", "if", "auth_mech", "in", "mechs_available", "or", "auth_mech", "==", "\"kerberos\"", ":", "log", ".", "debug", "(", "\"GSSAPI with mech %s is being used as auth backend\"", "%", "auth_mech", ")", "context", "=", "GSSAPIContext", "(", "hostname", ",", "username", ",", "password", ",", "auth_mech", ")", "elif", "auth_mech", "==", "\"ntlm\"", ":", "log", ".", "debug", "(", "\"GSSAPI is available but does not support NTLM, using \"", "\"ntlm-auth as auth backend instead\"", ")", "context", "=", "NTLMContext", "(", "username", ",", "password", ")", "else", ":", "# make sure we can actually initialise a GSSAPI context in auto,", "# otherwise fallback to NTLMContext if that fails", "# we need to explicitly set auth_mech as kerberos if the GSSAPI", "# implementation does not support NTLM so we know to use NTLM if", "# GSSAPI fails", "try", ":", "log", ".", "debug", "(", "\"Attempting to use GSSAPI Kerberos as auth backend\"", ")", "context", "=", "GSSAPIContext", "(", "hostname", ",", "username", ",", "password", ",", "\"kerberos\"", ")", "context", ".", "init_context", "(", ")", "context_gen", "=", "context", ".", "step", "(", ")", "out_token", "=", "next", "(", "context_gen", ")", "context_init", "=", "True", "log", ".", "info", "(", "\"GSSAPI with mech kerberos is being used as auth \"", "\"backend\"", ")", "except", "gssapi", ".", "exceptions", ".", "GSSError", "as", "err", ":", "log", ".", "warning", "(", "\"Failed to initialise GSSAPI context, falling \"", "\"back to NTLM: %s\"", "%", "str", "(", "err", ")", ")", "context", "=", "NTLMContext", "(", "username", ",", "password", ")", "else", ":", "log", ".", "debug", "(", "\"SSPI or GSSAPI is not available, using ntlm-auth as auth \"", "\"backend\"", ")", "if", "auth_mech", "==", "\"kerberos\"", ":", "raise", "InvalidConfigurationException", "(", "\"The auth_mechanism is set \"", "\"to kerberos but SSPI or \"", "\"GSSAPI is not available\"", ")", "context", "=", "NTLMContext", "(", "username", ",", "password", ")", "# we only init the context when HAS_GSSAPI and it doesn't natively offer", "# SPNEGO that works with Windows, so let's init it here", "if", "not", "context_init", ":", "context", ".", "init_context", "(", ")", "context_gen", "=", "context", ".", "step", "(", ")", "out_token", "=", "next", "(", "context_gen", ")", "return", "context", ",", "context_gen", ",", "out_token" ]
Returns an AuthContext used in the CredSSP authentication process and to wrap/unwrap tokens sent to and from the client. This step get's the context based on the auth_mech configured and what is available on the server. It tries to favour system libraries like SSPI (Windows) or GSSAPI (Unix) if possible but falls back to a Python implementation of NTLM that works on all platforms. While in some cases the system libraries are used and they may not require a password to authenticate, CredSSP requires the password as it is sent to the remote host and so we won't rely on the user's logon tokens. :param hostname: The hostname of the server, this should be the FQDN when kerberos is desired :param username: The username to authenticate with :param password: The password of username :param auth_mech: The authentication mechanism to use; auto: Uses the SPNEGO/Negotiate mechanism which tries Kerberos if possible and then falls back to NTLM kerberos: Only allow authentication with Kerberos ntlm: Only allow authentication with NTLM :return: tuple AuthContext: The authentication context chosen that has been init generator step: The Python generator that takes further input tokens and produces output tokens to send to the server bytes first token: The first output token to send to the server
[ "Returns", "an", "AuthContext", "used", "in", "the", "CredSSP", "authentication", "process", "and", "to", "wrap", "/", "unwrap", "tokens", "sent", "to", "and", "from", "the", "client", ".", "This", "step", "get", "s", "the", "context", "based", "on", "the", "auth_mech", "configured", "and", "what", "is", "available", "on", "the", "server", ".", "It", "tries", "to", "favour", "system", "libraries", "like", "SSPI", "(", "Windows", ")", "or", "GSSAPI", "(", "Unix", ")", "if", "possible", "but", "falls", "back", "to", "a", "Python", "implementation", "of", "NTLM", "that", "works", "on", "all", "platforms", "." ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/spnego.py#L34-L129
jborean93/requests-credssp
requests_credssp/spnego.py
GSSAPIContext.get_mechs_available
def get_mechs_available(): """ Returns a list of auth mechanisms that are available to the local GSSAPI instance. Because we are interacting with Windows, we only care if SPNEGO, Kerberos and NTLM are available where NTLM is the only wildcard that may not be available by default. The only NTLM implementation that works properly is gss-ntlmssp and part of this test is to verify the gss-ntlmssp OID GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required for SPNEGO and NTLM to work properly. :return: list - A list of supported mechs available in the installed version of GSSAPI """ ntlm_oid = GSSAPIContext._AUTH_MECHANISMS['ntlm'] ntlm_mech = gssapi.OID.from_int_seq(ntlm_oid) # GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH # github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68 reset_mech = gssapi.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3") try: # we don't actually care about the account used here so just use # a random username and password ntlm_context = GSSAPIContext._get_security_context( gssapi.NameType.user, ntlm_mech, "http@server", "username", "password" ) ntlm_context.step() set_sec_context_option(reset_mech, context=ntlm_context, value=b"\x00" * 4) except gssapi.exceptions.GSSError as exc: # failed to init NTLM and verify gss-ntlmssp is available, this # means NTLM is either not available or won't work # (not gss-ntlmssp) so we return kerberos as the only available # mechanism for the GSSAPI Context log.debug("Failed to init test NTLM context with GSSAPI: %s" % str(exc)) return ['kerberos'] else: return ['auto', 'kerberos', 'ntlm']
python
def get_mechs_available(): """ Returns a list of auth mechanisms that are available to the local GSSAPI instance. Because we are interacting with Windows, we only care if SPNEGO, Kerberos and NTLM are available where NTLM is the only wildcard that may not be available by default. The only NTLM implementation that works properly is gss-ntlmssp and part of this test is to verify the gss-ntlmssp OID GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required for SPNEGO and NTLM to work properly. :return: list - A list of supported mechs available in the installed version of GSSAPI """ ntlm_oid = GSSAPIContext._AUTH_MECHANISMS['ntlm'] ntlm_mech = gssapi.OID.from_int_seq(ntlm_oid) # GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH # github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68 reset_mech = gssapi.OID.from_int_seq("1.3.6.1.4.1.7165.655.1.3") try: # we don't actually care about the account used here so just use # a random username and password ntlm_context = GSSAPIContext._get_security_context( gssapi.NameType.user, ntlm_mech, "http@server", "username", "password" ) ntlm_context.step() set_sec_context_option(reset_mech, context=ntlm_context, value=b"\x00" * 4) except gssapi.exceptions.GSSError as exc: # failed to init NTLM and verify gss-ntlmssp is available, this # means NTLM is either not available or won't work # (not gss-ntlmssp) so we return kerberos as the only available # mechanism for the GSSAPI Context log.debug("Failed to init test NTLM context with GSSAPI: %s" % str(exc)) return ['kerberos'] else: return ['auto', 'kerberos', 'ntlm']
[ "def", "get_mechs_available", "(", ")", ":", "ntlm_oid", "=", "GSSAPIContext", ".", "_AUTH_MECHANISMS", "[", "'ntlm'", "]", "ntlm_mech", "=", "gssapi", ".", "OID", ".", "from_int_seq", "(", "ntlm_oid", ")", "# GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH", "# github.com/simo5/gss-ntlmssp/blob/master/src/gssapi_ntlmssp.h#L68", "reset_mech", "=", "gssapi", ".", "OID", ".", "from_int_seq", "(", "\"1.3.6.1.4.1.7165.655.1.3\"", ")", "try", ":", "# we don't actually care about the account used here so just use", "# a random username and password", "ntlm_context", "=", "GSSAPIContext", ".", "_get_security_context", "(", "gssapi", ".", "NameType", ".", "user", ",", "ntlm_mech", ",", "\"http@server\"", ",", "\"username\"", ",", "\"password\"", ")", "ntlm_context", ".", "step", "(", ")", "set_sec_context_option", "(", "reset_mech", ",", "context", "=", "ntlm_context", ",", "value", "=", "b\"\\x00\"", "*", "4", ")", "except", "gssapi", ".", "exceptions", ".", "GSSError", "as", "exc", ":", "# failed to init NTLM and verify gss-ntlmssp is available, this", "# means NTLM is either not available or won't work", "# (not gss-ntlmssp) so we return kerberos as the only available", "# mechanism for the GSSAPI Context", "log", ".", "debug", "(", "\"Failed to init test NTLM context with GSSAPI: %s\"", "%", "str", "(", "exc", ")", ")", "return", "[", "'kerberos'", "]", "else", ":", "return", "[", "'auto'", ",", "'kerberos'", ",", "'ntlm'", "]" ]
Returns a list of auth mechanisms that are available to the local GSSAPI instance. Because we are interacting with Windows, we only care if SPNEGO, Kerberos and NTLM are available where NTLM is the only wildcard that may not be available by default. The only NTLM implementation that works properly is gss-ntlmssp and part of this test is to verify the gss-ntlmssp OID GSS_NTLMSSP_RESET_CRYPTO_OID_LENGTH is implemented which is required for SPNEGO and NTLM to work properly. :return: list - A list of supported mechs available in the installed version of GSSAPI
[ "Returns", "a", "list", "of", "auth", "mechanisms", "that", "are", "available", "to", "the", "local", "GSSAPI", "instance", ".", "Because", "we", "are", "interacting", "with", "Windows", "we", "only", "care", "if", "SPNEGO", "Kerberos", "and", "NTLM", "are", "available", "where", "NTLM", "is", "the", "only", "wildcard", "that", "may", "not", "be", "available", "by", "default", "." ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/spnego.py#L397-L440
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext.credssp_generator
def credssp_generator(self): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules https://msdn.microsoft.com/en-us/library/cc226791.aspx Generator function that yields each CredSSP token to sent to the server. CredSSP has multiple steps that must be run for the client to successfully authenticate with the server and delegate the credentials. """ log.debug("Starting TLS handshake process") self.tls_connection = SSL.Connection(self.tls_context) self.tls_connection.set_connect_state() while True: try: self.tls_connection.do_handshake() except SSL.WantReadError: out_token = self.tls_connection.bio_read(self.BIO_BUFFER_SIZE) log.debug("Step 1. TLS Handshake, returning token: %s" % binascii.hexlify(out_token)) in_token = yield out_token, "Step 1. TLS Handshake" log.debug("Step 1. TLS Handshake, received token: %s" % binascii.hexlify(in_token)) self.tls_connection.bio_write(in_token) else: break log.debug("TLS Handshake complete. Protocol: %s, Cipher: %s" % (self.tls_connection.get_protocol_version_name(), self.tls_connection.get_cipher_name())) server_certificate = self.tls_connection.get_peer_certificate() server_public_key = self._get_subject_public_key(server_certificate) log.debug("Starting Authentication process") version = 6 context, auth_step, out_token = get_auth_context(self.hostname, self.username, self.password, self.auth_mechanism) while not context.complete: nego_token = NegoToken() nego_token['negoToken'] = out_token ts_request = TSRequest() ts_request['negoTokens'].append(nego_token) ts_request_token = encoder.encode(ts_request) log.debug("Step 2. Authenticate, returning token: %s" % binascii.hexlify(ts_request_token)) in_token = yield self.wrap(ts_request_token), \ "Step 2. Authenticate" in_token = self.unwrap(in_token) log.debug("Step 3. Authenticate, received token: %s" % binascii.hexlify(in_token)) ts_request = decoder.decode(in_token, asn1Spec=TSRequest())[0] ts_request.check_error_code() version = int(ts_request['version']) out_token = \ auth_step.send(bytes(ts_request['negoTokens'][0]['negoToken'])) version = min(version, TSRequest.CLIENT_VERSION) log.debug("Starting public key verification process at version %d" % version) if version < self.minimum_version: raise AuthenticationException("The reported server version was %d " "and did not meet the minimum " "requirements of %d" % (version, self.minimum_version)) if version > 4: nonce = os.urandom(32) else: log.warning("Reported server version was %d, susceptible to MitM " "attacks and should be patched - CVE 2018-0886" % version) nonce = None pub_key_auth = self._build_pub_key_auth(context, nonce, out_token, server_public_key) log.debug("Step 3. Server Authentication, returning token: %s" % binascii.hexlify(pub_key_auth)) in_token = yield (self.wrap(pub_key_auth), "Step 3. Server Authentication") in_token = self.unwrap(in_token) log.debug("Step 3. Server Authentication, received token: %s" % binascii.hexlify(in_token)) log.debug("Starting server public key response verification") ts_request = decoder.decode(in_token, asn1Spec=TSRequest())[0] ts_request.check_error_code() if not ts_request['pubKeyAuth'].isValue: raise AuthenticationException("The server did not response with " "pubKeyAuth info, authentication " "was rejected") if len(ts_request['negoTokens']) > 0: # SPNEGO auth returned the mechListMIC for us to verify auth_step.send(bytes(ts_request['negoTokens'][0]['negoToken'])) response_key = context.unwrap(bytes(ts_request['pubKeyAuth'])) self._verify_public_keys(nonce, response_key, server_public_key) log.debug("Sending encrypted credentials") enc_credentials = self._get_encrypted_credentials(context) yield self.wrap(enc_credentials), "Step 5. Delegate Credentials"
python
def credssp_generator(self): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules https://msdn.microsoft.com/en-us/library/cc226791.aspx Generator function that yields each CredSSP token to sent to the server. CredSSP has multiple steps that must be run for the client to successfully authenticate with the server and delegate the credentials. """ log.debug("Starting TLS handshake process") self.tls_connection = SSL.Connection(self.tls_context) self.tls_connection.set_connect_state() while True: try: self.tls_connection.do_handshake() except SSL.WantReadError: out_token = self.tls_connection.bio_read(self.BIO_BUFFER_SIZE) log.debug("Step 1. TLS Handshake, returning token: %s" % binascii.hexlify(out_token)) in_token = yield out_token, "Step 1. TLS Handshake" log.debug("Step 1. TLS Handshake, received token: %s" % binascii.hexlify(in_token)) self.tls_connection.bio_write(in_token) else: break log.debug("TLS Handshake complete. Protocol: %s, Cipher: %s" % (self.tls_connection.get_protocol_version_name(), self.tls_connection.get_cipher_name())) server_certificate = self.tls_connection.get_peer_certificate() server_public_key = self._get_subject_public_key(server_certificate) log.debug("Starting Authentication process") version = 6 context, auth_step, out_token = get_auth_context(self.hostname, self.username, self.password, self.auth_mechanism) while not context.complete: nego_token = NegoToken() nego_token['negoToken'] = out_token ts_request = TSRequest() ts_request['negoTokens'].append(nego_token) ts_request_token = encoder.encode(ts_request) log.debug("Step 2. Authenticate, returning token: %s" % binascii.hexlify(ts_request_token)) in_token = yield self.wrap(ts_request_token), \ "Step 2. Authenticate" in_token = self.unwrap(in_token) log.debug("Step 3. Authenticate, received token: %s" % binascii.hexlify(in_token)) ts_request = decoder.decode(in_token, asn1Spec=TSRequest())[0] ts_request.check_error_code() version = int(ts_request['version']) out_token = \ auth_step.send(bytes(ts_request['negoTokens'][0]['negoToken'])) version = min(version, TSRequest.CLIENT_VERSION) log.debug("Starting public key verification process at version %d" % version) if version < self.minimum_version: raise AuthenticationException("The reported server version was %d " "and did not meet the minimum " "requirements of %d" % (version, self.minimum_version)) if version > 4: nonce = os.urandom(32) else: log.warning("Reported server version was %d, susceptible to MitM " "attacks and should be patched - CVE 2018-0886" % version) nonce = None pub_key_auth = self._build_pub_key_auth(context, nonce, out_token, server_public_key) log.debug("Step 3. Server Authentication, returning token: %s" % binascii.hexlify(pub_key_auth)) in_token = yield (self.wrap(pub_key_auth), "Step 3. Server Authentication") in_token = self.unwrap(in_token) log.debug("Step 3. Server Authentication, received token: %s" % binascii.hexlify(in_token)) log.debug("Starting server public key response verification") ts_request = decoder.decode(in_token, asn1Spec=TSRequest())[0] ts_request.check_error_code() if not ts_request['pubKeyAuth'].isValue: raise AuthenticationException("The server did not response with " "pubKeyAuth info, authentication " "was rejected") if len(ts_request['negoTokens']) > 0: # SPNEGO auth returned the mechListMIC for us to verify auth_step.send(bytes(ts_request['negoTokens'][0]['negoToken'])) response_key = context.unwrap(bytes(ts_request['pubKeyAuth'])) self._verify_public_keys(nonce, response_key, server_public_key) log.debug("Sending encrypted credentials") enc_credentials = self._get_encrypted_credentials(context) yield self.wrap(enc_credentials), "Step 5. Delegate Credentials"
[ "def", "credssp_generator", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Starting TLS handshake process\"", ")", "self", ".", "tls_connection", "=", "SSL", ".", "Connection", "(", "self", ".", "tls_context", ")", "self", ".", "tls_connection", ".", "set_connect_state", "(", ")", "while", "True", ":", "try", ":", "self", ".", "tls_connection", ".", "do_handshake", "(", ")", "except", "SSL", ".", "WantReadError", ":", "out_token", "=", "self", ".", "tls_connection", ".", "bio_read", "(", "self", ".", "BIO_BUFFER_SIZE", ")", "log", ".", "debug", "(", "\"Step 1. TLS Handshake, returning token: %s\"", "%", "binascii", ".", "hexlify", "(", "out_token", ")", ")", "in_token", "=", "yield", "out_token", ",", "\"Step 1. TLS Handshake\"", "log", ".", "debug", "(", "\"Step 1. TLS Handshake, received token: %s\"", "%", "binascii", ".", "hexlify", "(", "in_token", ")", ")", "self", ".", "tls_connection", ".", "bio_write", "(", "in_token", ")", "else", ":", "break", "log", ".", "debug", "(", "\"TLS Handshake complete. Protocol: %s, Cipher: %s\"", "%", "(", "self", ".", "tls_connection", ".", "get_protocol_version_name", "(", ")", ",", "self", ".", "tls_connection", ".", "get_cipher_name", "(", ")", ")", ")", "server_certificate", "=", "self", ".", "tls_connection", ".", "get_peer_certificate", "(", ")", "server_public_key", "=", "self", ".", "_get_subject_public_key", "(", "server_certificate", ")", "log", ".", "debug", "(", "\"Starting Authentication process\"", ")", "version", "=", "6", "context", ",", "auth_step", ",", "out_token", "=", "get_auth_context", "(", "self", ".", "hostname", ",", "self", ".", "username", ",", "self", ".", "password", ",", "self", ".", "auth_mechanism", ")", "while", "not", "context", ".", "complete", ":", "nego_token", "=", "NegoToken", "(", ")", "nego_token", "[", "'negoToken'", "]", "=", "out_token", "ts_request", "=", "TSRequest", "(", ")", "ts_request", "[", "'negoTokens'", "]", ".", "append", "(", "nego_token", ")", "ts_request_token", "=", "encoder", ".", "encode", "(", "ts_request", ")", "log", ".", "debug", "(", "\"Step 2. Authenticate, returning token: %s\"", "%", "binascii", ".", "hexlify", "(", "ts_request_token", ")", ")", "in_token", "=", "yield", "self", ".", "wrap", "(", "ts_request_token", ")", ",", "\"Step 2. Authenticate\"", "in_token", "=", "self", ".", "unwrap", "(", "in_token", ")", "log", ".", "debug", "(", "\"Step 3. Authenticate, received token: %s\"", "%", "binascii", ".", "hexlify", "(", "in_token", ")", ")", "ts_request", "=", "decoder", ".", "decode", "(", "in_token", ",", "asn1Spec", "=", "TSRequest", "(", ")", ")", "[", "0", "]", "ts_request", ".", "check_error_code", "(", ")", "version", "=", "int", "(", "ts_request", "[", "'version'", "]", ")", "out_token", "=", "auth_step", ".", "send", "(", "bytes", "(", "ts_request", "[", "'negoTokens'", "]", "[", "0", "]", "[", "'negoToken'", "]", ")", ")", "version", "=", "min", "(", "version", ",", "TSRequest", ".", "CLIENT_VERSION", ")", "log", ".", "debug", "(", "\"Starting public key verification process at version %d\"", "%", "version", ")", "if", "version", "<", "self", ".", "minimum_version", ":", "raise", "AuthenticationException", "(", "\"The reported server version was %d \"", "\"and did not meet the minimum \"", "\"requirements of %d\"", "%", "(", "version", ",", "self", ".", "minimum_version", ")", ")", "if", "version", ">", "4", ":", "nonce", "=", "os", ".", "urandom", "(", "32", ")", "else", ":", "log", ".", "warning", "(", "\"Reported server version was %d, susceptible to MitM \"", "\"attacks and should be patched - CVE 2018-0886\"", "%", "version", ")", "nonce", "=", "None", "pub_key_auth", "=", "self", ".", "_build_pub_key_auth", "(", "context", ",", "nonce", ",", "out_token", ",", "server_public_key", ")", "log", ".", "debug", "(", "\"Step 3. Server Authentication, returning token: %s\"", "%", "binascii", ".", "hexlify", "(", "pub_key_auth", ")", ")", "in_token", "=", "yield", "(", "self", ".", "wrap", "(", "pub_key_auth", ")", ",", "\"Step 3. Server Authentication\"", ")", "in_token", "=", "self", ".", "unwrap", "(", "in_token", ")", "log", ".", "debug", "(", "\"Step 3. Server Authentication, received token: %s\"", "%", "binascii", ".", "hexlify", "(", "in_token", ")", ")", "log", ".", "debug", "(", "\"Starting server public key response verification\"", ")", "ts_request", "=", "decoder", ".", "decode", "(", "in_token", ",", "asn1Spec", "=", "TSRequest", "(", ")", ")", "[", "0", "]", "ts_request", ".", "check_error_code", "(", ")", "if", "not", "ts_request", "[", "'pubKeyAuth'", "]", ".", "isValue", ":", "raise", "AuthenticationException", "(", "\"The server did not response with \"", "\"pubKeyAuth info, authentication \"", "\"was rejected\"", ")", "if", "len", "(", "ts_request", "[", "'negoTokens'", "]", ")", ">", "0", ":", "# SPNEGO auth returned the mechListMIC for us to verify", "auth_step", ".", "send", "(", "bytes", "(", "ts_request", "[", "'negoTokens'", "]", "[", "0", "]", "[", "'negoToken'", "]", ")", ")", "response_key", "=", "context", ".", "unwrap", "(", "bytes", "(", "ts_request", "[", "'pubKeyAuth'", "]", ")", ")", "self", ".", "_verify_public_keys", "(", "nonce", ",", "response_key", ",", "server_public_key", ")", "log", ".", "debug", "(", "\"Sending encrypted credentials\"", ")", "enc_credentials", "=", "self", ".", "_get_encrypted_credentials", "(", "context", ")", "yield", "self", ".", "wrap", "(", "enc_credentials", ")", ",", "\"Step 5. Delegate Credentials\"" ]
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules https://msdn.microsoft.com/en-us/library/cc226791.aspx Generator function that yields each CredSSP token to sent to the server. CredSSP has multiple steps that must be run for the client to successfully authenticate with the server and delegate the credentials.
[ "[", "MS", "-", "CSSP", "]", "3", ".", "1", ".", "5", "Processing", "Events", "and", "Sequencing", "Rules", "https", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "cc226791", ".", "aspx" ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L68-L173
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext._build_pub_key_auth
def _build_pub_key_auth(self, context, nonce, auth_token, public_key): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3 https://msdn.microsoft.com/en-us/library/cc226791.aspx This step sends the final SPNEGO token to the server if required and computes the value for the pubKeyAuth field for the protocol version negotiated. The format of the pubKeyAuth field depends on the version that the server supports. For version 2 to 4: The pubKeyAuth field is just wrapped using the authenticated context For versions 5 to 6: The pubKeyAuth is a sha256 hash of the server's public key plus a nonce and a magic string value. This hash is wrapped using the authenticated context and the nonce is added to the TSRequest alongside the nonce used in the hash calcs. :param context: The authenticated context :param nonce: If versions 5+, the nonce to use in the hash :param auth_token: If NTLM, this is the last msg (authenticate msg) to send in the same request :param public_key: The server's public key :return: The TSRequest as a byte string to send to the server """ ts_request = TSRequest() if auth_token is not None: nego_token = NegoToken() nego_token['negoToken'] = auth_token ts_request['negoTokens'].append(nego_token) if nonce is not None: ts_request['clientNonce'] = nonce hash_input = b"CredSSP Client-To-Server Binding Hash\x00" + \ nonce + public_key pub_value = hashlib.sha256(hash_input).digest() else: pub_value = public_key enc_public_key = context.wrap(pub_value) ts_request['pubKeyAuth'] = enc_public_key return encoder.encode(ts_request)
python
def _build_pub_key_auth(self, context, nonce, auth_token, public_key): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3 https://msdn.microsoft.com/en-us/library/cc226791.aspx This step sends the final SPNEGO token to the server if required and computes the value for the pubKeyAuth field for the protocol version negotiated. The format of the pubKeyAuth field depends on the version that the server supports. For version 2 to 4: The pubKeyAuth field is just wrapped using the authenticated context For versions 5 to 6: The pubKeyAuth is a sha256 hash of the server's public key plus a nonce and a magic string value. This hash is wrapped using the authenticated context and the nonce is added to the TSRequest alongside the nonce used in the hash calcs. :param context: The authenticated context :param nonce: If versions 5+, the nonce to use in the hash :param auth_token: If NTLM, this is the last msg (authenticate msg) to send in the same request :param public_key: The server's public key :return: The TSRequest as a byte string to send to the server """ ts_request = TSRequest() if auth_token is not None: nego_token = NegoToken() nego_token['negoToken'] = auth_token ts_request['negoTokens'].append(nego_token) if nonce is not None: ts_request['clientNonce'] = nonce hash_input = b"CredSSP Client-To-Server Binding Hash\x00" + \ nonce + public_key pub_value = hashlib.sha256(hash_input).digest() else: pub_value = public_key enc_public_key = context.wrap(pub_value) ts_request['pubKeyAuth'] = enc_public_key return encoder.encode(ts_request)
[ "def", "_build_pub_key_auth", "(", "self", ",", "context", ",", "nonce", ",", "auth_token", ",", "public_key", ")", ":", "ts_request", "=", "TSRequest", "(", ")", "if", "auth_token", "is", "not", "None", ":", "nego_token", "=", "NegoToken", "(", ")", "nego_token", "[", "'negoToken'", "]", "=", "auth_token", "ts_request", "[", "'negoTokens'", "]", ".", "append", "(", "nego_token", ")", "if", "nonce", "is", "not", "None", ":", "ts_request", "[", "'clientNonce'", "]", "=", "nonce", "hash_input", "=", "b\"CredSSP Client-To-Server Binding Hash\\x00\"", "+", "nonce", "+", "public_key", "pub_value", "=", "hashlib", ".", "sha256", "(", "hash_input", ")", ".", "digest", "(", ")", "else", ":", "pub_value", "=", "public_key", "enc_public_key", "=", "context", ".", "wrap", "(", "pub_value", ")", "ts_request", "[", "'pubKeyAuth'", "]", "=", "enc_public_key", "return", "encoder", ".", "encode", "(", "ts_request", ")" ]
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3 https://msdn.microsoft.com/en-us/library/cc226791.aspx This step sends the final SPNEGO token to the server if required and computes the value for the pubKeyAuth field for the protocol version negotiated. The format of the pubKeyAuth field depends on the version that the server supports. For version 2 to 4: The pubKeyAuth field is just wrapped using the authenticated context For versions 5 to 6: The pubKeyAuth is a sha256 hash of the server's public key plus a nonce and a magic string value. This hash is wrapped using the authenticated context and the nonce is added to the TSRequest alongside the nonce used in the hash calcs. :param context: The authenticated context :param nonce: If versions 5+, the nonce to use in the hash :param auth_token: If NTLM, this is the last msg (authenticate msg) to send in the same request :param public_key: The server's public key :return: The TSRequest as a byte string to send to the server
[ "[", "MS", "-", "CSSP", "]", "3", ".", "1", ".", "5", "Processing", "Events", "and", "Sequencing", "Rules", "-", "Step", "3", "https", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "cc226791", ".", "aspx" ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L175-L221
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext._verify_public_keys
def _verify_public_keys(self, nonce, server_key, public_key): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 4 https://msdn.microsoft.com/en-us/library/cc226791.aspx The rules vary depending on the server version For version 2 to 4: After the server received the public key in Step 3 it verifies the key with what was in the handshake. After the verification it then adds 1 to the first byte representing the public key and encrypts the bytes result by using the authentication protocol's encryption services. This method does the opposite where it will decrypt the public key returned from the server and subtract the first byte by 1 to compare with the public key we sent originally. For versions 5 to 6: A hash is calculated with the magic string value, the nonce that was sent to the server and the public key that was used. This is verified against the returned server public key. :param nonce: If version 5+, the nonce used in the hash calculations :param server_key: The unwrapped value returned in the TSRequest['pubKeyAuth'] field. :param public_key: The actual public key of the server """ if nonce is not None: hash_input = b"CredSSP Server-To-Client Binding Hash\x00" + nonce \ + public_key actual = hashlib.sha256(hash_input).digest() expected = server_key else: first_byte = struct.unpack("B", server_key[0:1])[0] actual_first_byte = struct.pack("B", first_byte - 1) actual = actual_first_byte + server_key[1:] expected = public_key if actual != expected: raise AuthenticationException("Could not verify key sent from the " "server, potential man in the " "middle attack")
python
def _verify_public_keys(self, nonce, server_key, public_key): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 4 https://msdn.microsoft.com/en-us/library/cc226791.aspx The rules vary depending on the server version For version 2 to 4: After the server received the public key in Step 3 it verifies the key with what was in the handshake. After the verification it then adds 1 to the first byte representing the public key and encrypts the bytes result by using the authentication protocol's encryption services. This method does the opposite where it will decrypt the public key returned from the server and subtract the first byte by 1 to compare with the public key we sent originally. For versions 5 to 6: A hash is calculated with the magic string value, the nonce that was sent to the server and the public key that was used. This is verified against the returned server public key. :param nonce: If version 5+, the nonce used in the hash calculations :param server_key: The unwrapped value returned in the TSRequest['pubKeyAuth'] field. :param public_key: The actual public key of the server """ if nonce is not None: hash_input = b"CredSSP Server-To-Client Binding Hash\x00" + nonce \ + public_key actual = hashlib.sha256(hash_input).digest() expected = server_key else: first_byte = struct.unpack("B", server_key[0:1])[0] actual_first_byte = struct.pack("B", first_byte - 1) actual = actual_first_byte + server_key[1:] expected = public_key if actual != expected: raise AuthenticationException("Could not verify key sent from the " "server, potential man in the " "middle attack")
[ "def", "_verify_public_keys", "(", "self", ",", "nonce", ",", "server_key", ",", "public_key", ")", ":", "if", "nonce", "is", "not", "None", ":", "hash_input", "=", "b\"CredSSP Server-To-Client Binding Hash\\x00\"", "+", "nonce", "+", "public_key", "actual", "=", "hashlib", ".", "sha256", "(", "hash_input", ")", ".", "digest", "(", ")", "expected", "=", "server_key", "else", ":", "first_byte", "=", "struct", ".", "unpack", "(", "\"B\"", ",", "server_key", "[", "0", ":", "1", "]", ")", "[", "0", "]", "actual_first_byte", "=", "struct", ".", "pack", "(", "\"B\"", ",", "first_byte", "-", "1", ")", "actual", "=", "actual_first_byte", "+", "server_key", "[", "1", ":", "]", "expected", "=", "public_key", "if", "actual", "!=", "expected", ":", "raise", "AuthenticationException", "(", "\"Could not verify key sent from the \"", "\"server, potential man in the \"", "\"middle attack\"", ")" ]
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 4 https://msdn.microsoft.com/en-us/library/cc226791.aspx The rules vary depending on the server version For version 2 to 4: After the server received the public key in Step 3 it verifies the key with what was in the handshake. After the verification it then adds 1 to the first byte representing the public key and encrypts the bytes result by using the authentication protocol's encryption services. This method does the opposite where it will decrypt the public key returned from the server and subtract the first byte by 1 to compare with the public key we sent originally. For versions 5 to 6: A hash is calculated with the magic string value, the nonce that was sent to the server and the public key that was used. This is verified against the returned server public key. :param nonce: If version 5+, the nonce used in the hash calculations :param server_key: The unwrapped value returned in the TSRequest['pubKeyAuth'] field. :param public_key: The actual public key of the server
[ "[", "MS", "-", "CSSP", "]", "3", ".", "1", ".", "5", "Processing", "Events", "and", "Sequencing", "Rules", "-", "Step", "4", "https", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "cc226791", ".", "aspx" ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L223-L265
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext._get_encrypted_credentials
def _get_encrypted_credentials(self, context): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 5 https://msdn.microsoft.com/en-us/library/cc226791.aspx After the client has verified the server's authenticity, it encrypts the user's credentials with the authentication protocol's encryption services. The resulting value is encapsulated in the authInfo field of the TSRequest structure and sent over the encrypted TLS channel to the server :param context: The authenticated security context :return: The encrypted TSRequest that contains the user's credentials """ ts_password = TSPasswordCreds() ts_password['domainName'] = context.domain.encode('utf-16-le') ts_password['userName'] = context.username.encode('utf-16-le') ts_password['password'] = context.password.encode('utf-16-le') ts_credentials = TSCredentials() ts_credentials['credType'] = ts_password.CRED_TYPE ts_credentials['credentials'] = encoder.encode(ts_password) ts_request = TSRequest() enc_credentials = context.wrap(encoder.encode(ts_credentials)) ts_request['authInfo'] = enc_credentials return encoder.encode(ts_request)
python
def _get_encrypted_credentials(self, context): """ [MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 5 https://msdn.microsoft.com/en-us/library/cc226791.aspx After the client has verified the server's authenticity, it encrypts the user's credentials with the authentication protocol's encryption services. The resulting value is encapsulated in the authInfo field of the TSRequest structure and sent over the encrypted TLS channel to the server :param context: The authenticated security context :return: The encrypted TSRequest that contains the user's credentials """ ts_password = TSPasswordCreds() ts_password['domainName'] = context.domain.encode('utf-16-le') ts_password['userName'] = context.username.encode('utf-16-le') ts_password['password'] = context.password.encode('utf-16-le') ts_credentials = TSCredentials() ts_credentials['credType'] = ts_password.CRED_TYPE ts_credentials['credentials'] = encoder.encode(ts_password) ts_request = TSRequest() enc_credentials = context.wrap(encoder.encode(ts_credentials)) ts_request['authInfo'] = enc_credentials return encoder.encode(ts_request)
[ "def", "_get_encrypted_credentials", "(", "self", ",", "context", ")", ":", "ts_password", "=", "TSPasswordCreds", "(", ")", "ts_password", "[", "'domainName'", "]", "=", "context", ".", "domain", ".", "encode", "(", "'utf-16-le'", ")", "ts_password", "[", "'userName'", "]", "=", "context", ".", "username", ".", "encode", "(", "'utf-16-le'", ")", "ts_password", "[", "'password'", "]", "=", "context", ".", "password", ".", "encode", "(", "'utf-16-le'", ")", "ts_credentials", "=", "TSCredentials", "(", ")", "ts_credentials", "[", "'credType'", "]", "=", "ts_password", ".", "CRED_TYPE", "ts_credentials", "[", "'credentials'", "]", "=", "encoder", ".", "encode", "(", "ts_password", ")", "ts_request", "=", "TSRequest", "(", ")", "enc_credentials", "=", "context", ".", "wrap", "(", "encoder", ".", "encode", "(", "ts_credentials", ")", ")", "ts_request", "[", "'authInfo'", "]", "=", "enc_credentials", "return", "encoder", ".", "encode", "(", "ts_request", ")" ]
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 5 https://msdn.microsoft.com/en-us/library/cc226791.aspx After the client has verified the server's authenticity, it encrypts the user's credentials with the authentication protocol's encryption services. The resulting value is encapsulated in the authInfo field of the TSRequest structure and sent over the encrypted TLS channel to the server :param context: The authenticated security context :return: The encrypted TSRequest that contains the user's credentials
[ "[", "MS", "-", "CSSP", "]", "3", ".", "1", ".", "5", "Processing", "Events", "and", "Sequencing", "Rules", "-", "Step", "5", "https", ":", "//", "msdn", ".", "microsoft", ".", "com", "/", "en", "-", "us", "/", "library", "/", "cc226791", ".", "aspx" ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L267-L294
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext.wrap
def wrap(self, data): """ Encrypts the data in preparation for sending to the server. The data is encrypted using the TLS channel negotiated between the client and the server. :param data: a byte string of data to encrypt :return: a byte string of the encrypted data """ length = self.tls_connection.send(data) encrypted_data = b'' counter = 0 while True: try: encrypted_chunk = \ self.tls_connection.bio_read(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break encrypted_data += encrypted_chunk # in case of a borked TLS connection, break the loop if the current # buffer counter is > the length of the original message plus the # the size of the buffer (to be careful) counter += self.BIO_BUFFER_SIZE if counter > length + self.BIO_BUFFER_SIZE: break return encrypted_data
python
def wrap(self, data): """ Encrypts the data in preparation for sending to the server. The data is encrypted using the TLS channel negotiated between the client and the server. :param data: a byte string of data to encrypt :return: a byte string of the encrypted data """ length = self.tls_connection.send(data) encrypted_data = b'' counter = 0 while True: try: encrypted_chunk = \ self.tls_connection.bio_read(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break encrypted_data += encrypted_chunk # in case of a borked TLS connection, break the loop if the current # buffer counter is > the length of the original message plus the # the size of the buffer (to be careful) counter += self.BIO_BUFFER_SIZE if counter > length + self.BIO_BUFFER_SIZE: break return encrypted_data
[ "def", "wrap", "(", "self", ",", "data", ")", ":", "length", "=", "self", ".", "tls_connection", ".", "send", "(", "data", ")", "encrypted_data", "=", "b''", "counter", "=", "0", "while", "True", ":", "try", ":", "encrypted_chunk", "=", "self", ".", "tls_connection", ".", "bio_read", "(", "self", ".", "BIO_BUFFER_SIZE", ")", "except", "SSL", ".", "WantReadError", ":", "break", "encrypted_data", "+=", "encrypted_chunk", "# in case of a borked TLS connection, break the loop if the current", "# buffer counter is > the length of the original message plus the", "# the size of the buffer (to be careful)", "counter", "+=", "self", ".", "BIO_BUFFER_SIZE", "if", "counter", ">", "length", "+", "self", ".", "BIO_BUFFER_SIZE", ":", "break", "return", "encrypted_data" ]
Encrypts the data in preparation for sending to the server. The data is encrypted using the TLS channel negotiated between the client and the server. :param data: a byte string of data to encrypt :return: a byte string of the encrypted data
[ "Encrypts", "the", "data", "in", "preparation", "for", "sending", "to", "the", "server", ".", "The", "data", "is", "encrypted", "using", "the", "TLS", "channel", "negotiated", "between", "the", "client", "and", "the", "server", "." ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L296-L324
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext.unwrap
def unwrap(self, encrypted_data): """ Decrypts the data send by the server using the TLS channel negotiated between the client and the server. :param encrypted_data: the byte string of the encrypted data :return: a byte string of the decrypted data """ length = self.tls_connection.bio_write(encrypted_data) data = b'' counter = 0 while True: try: data_chunk = self.tls_connection.recv(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break data += data_chunk counter += self.BIO_BUFFER_SIZE if counter > length: break return data
python
def unwrap(self, encrypted_data): """ Decrypts the data send by the server using the TLS channel negotiated between the client and the server. :param encrypted_data: the byte string of the encrypted data :return: a byte string of the decrypted data """ length = self.tls_connection.bio_write(encrypted_data) data = b'' counter = 0 while True: try: data_chunk = self.tls_connection.recv(self.BIO_BUFFER_SIZE) except SSL.WantReadError: break data += data_chunk counter += self.BIO_BUFFER_SIZE if counter > length: break return data
[ "def", "unwrap", "(", "self", ",", "encrypted_data", ")", ":", "length", "=", "self", ".", "tls_connection", ".", "bio_write", "(", "encrypted_data", ")", "data", "=", "b''", "counter", "=", "0", "while", "True", ":", "try", ":", "data_chunk", "=", "self", ".", "tls_connection", ".", "recv", "(", "self", ".", "BIO_BUFFER_SIZE", ")", "except", "SSL", ".", "WantReadError", ":", "break", "data", "+=", "data_chunk", "counter", "+=", "self", ".", "BIO_BUFFER_SIZE", "if", "counter", ">", "length", ":", "break", "return", "data" ]
Decrypts the data send by the server using the TLS channel negotiated between the client and the server. :param encrypted_data: the byte string of the encrypted data :return: a byte string of the decrypted data
[ "Decrypts", "the", "data", "send", "by", "the", "server", "using", "the", "TLS", "channel", "negotiated", "between", "the", "client", "and", "the", "server", "." ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L326-L349
jborean93/requests-credssp
requests_credssp/credssp.py
CredSSPContext._get_subject_public_key
def _get_subject_public_key(cert): """ Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field """ public_key = cert.get_pubkey() cryptographic_key = public_key.to_cryptography_key() subject_public_key = cryptographic_key.public_bytes(Encoding.DER, PublicFormat.PKCS1) return subject_public_key
python
def _get_subject_public_key(cert): """ Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field """ public_key = cert.get_pubkey() cryptographic_key = public_key.to_cryptography_key() subject_public_key = cryptographic_key.public_bytes(Encoding.DER, PublicFormat.PKCS1) return subject_public_key
[ "def", "_get_subject_public_key", "(", "cert", ")", ":", "public_key", "=", "cert", ".", "get_pubkey", "(", ")", "cryptographic_key", "=", "public_key", ".", "to_cryptography_key", "(", ")", "subject_public_key", "=", "cryptographic_key", ".", "public_bytes", "(", "Encoding", ".", "DER", ",", "PublicFormat", ".", "PKCS1", ")", "return", "subject_public_key" ]
Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field
[ "Returns", "the", "SubjectPublicKey", "asn", ".", "1", "field", "of", "the", "SubjectPublicKeyInfo", "field", "of", "the", "server", "s", "certificate", ".", "This", "is", "used", "in", "the", "server", "verification", "steps", "to", "thwart", "MitM", "attacks", "." ]
train
https://github.com/jborean93/requests-credssp/blob/470db8d74dff919da67cf382e9ff784d4e8dd053/requests_credssp/credssp.py#L352-L365
msuozzo/Lector
lector/reader.py
_KindleCloudReaderBrowser._to_reader_home
def _to_reader_home(self): """Navigate to the Cloud Reader library page. Raises: BrowserError: If the KCR homepage could not be loaded. ConnectionError: If there was a connection error. """ # NOTE: Prevents QueryInterface error caused by getting a URL # while switched to an iframe self.switch_to_default_content() self.get(_KindleCloudReaderBrowser._CLOUD_READER_URL) if self.title == u'Problem loading page': raise ConnectionError # Wait for either the login page or the reader to load login_or_reader_loaded = lambda br: ( br.find_elements_by_id('amzn_kcr') or br.find_elements_by_id('KindleLibraryIFrame')) self._wait(5).until(login_or_reader_loaded) try: self._wait(5).until(lambda br: br.title == u'Amazon.com Sign In') except TimeoutException: raise BrowserError('Failed to load Kindle Cloud Reader.') else: self._login()
python
def _to_reader_home(self): """Navigate to the Cloud Reader library page. Raises: BrowserError: If the KCR homepage could not be loaded. ConnectionError: If there was a connection error. """ # NOTE: Prevents QueryInterface error caused by getting a URL # while switched to an iframe self.switch_to_default_content() self.get(_KindleCloudReaderBrowser._CLOUD_READER_URL) if self.title == u'Problem loading page': raise ConnectionError # Wait for either the login page or the reader to load login_or_reader_loaded = lambda br: ( br.find_elements_by_id('amzn_kcr') or br.find_elements_by_id('KindleLibraryIFrame')) self._wait(5).until(login_or_reader_loaded) try: self._wait(5).until(lambda br: br.title == u'Amazon.com Sign In') except TimeoutException: raise BrowserError('Failed to load Kindle Cloud Reader.') else: self._login()
[ "def", "_to_reader_home", "(", "self", ")", ":", "# NOTE: Prevents QueryInterface error caused by getting a URL", "# while switched to an iframe", "self", ".", "switch_to_default_content", "(", ")", "self", ".", "get", "(", "_KindleCloudReaderBrowser", ".", "_CLOUD_READER_URL", ")", "if", "self", ".", "title", "==", "u'Problem loading page'", ":", "raise", "ConnectionError", "# Wait for either the login page or the reader to load", "login_or_reader_loaded", "=", "lambda", "br", ":", "(", "br", ".", "find_elements_by_id", "(", "'amzn_kcr'", ")", "or", "br", ".", "find_elements_by_id", "(", "'KindleLibraryIFrame'", ")", ")", "self", ".", "_wait", "(", "5", ")", ".", "until", "(", "login_or_reader_loaded", ")", "try", ":", "self", ".", "_wait", "(", "5", ")", ".", "until", "(", "lambda", "br", ":", "br", ".", "title", "==", "u'Amazon.com Sign In'", ")", "except", "TimeoutException", ":", "raise", "BrowserError", "(", "'Failed to load Kindle Cloud Reader.'", ")", "else", ":", "self", ".", "_login", "(", ")" ]
Navigate to the Cloud Reader library page. Raises: BrowserError: If the KCR homepage could not be loaded. ConnectionError: If there was a connection error.
[ "Navigate", "to", "the", "Cloud", "Reader", "library", "page", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L199-L225
msuozzo/Lector
lector/reader.py
_KindleCloudReaderBrowser._login
def _login(self, max_tries=2): """Logs in to Kindle Cloud Reader. Args: max_tries: The maximum number of login attempts that will be made. Raises: BrowserError: If method called when browser not at a signin URL. LoginError: If login unsuccessful after `max_tries` attempts. """ if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL): raise BrowserError( 'Current url "%s" is not a signin url ("%s")' % (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL)) email_field_loaded = lambda br: br.find_elements_by_id('ap_email') self._wait().until(email_field_loaded) tries = 0 while tries < max_tries: # Enter the username email_elem = self.find_element_by_id('ap_email') email_elem.clear() email_elem.send_keys(self._uname) # Enter the password pword_elem = self.find_element_by_id('ap_password') pword_elem.clear() pword_elem.send_keys(self._pword) def creds_entered(_): """Returns whether the credentials were properly entered.""" email_ok = email_elem.get_attribute('value') == self._uname pword_ok = pword_elem.get_attribute('value') == self._pword return email_ok and pword_ok kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader' try: self._wait(5).until(creds_entered) self.find_element_by_id('signInSubmit-input').click() self._wait(5).until(kcr_page_loaded) except TimeoutException: tries += 1 else: return raise LoginError
python
def _login(self, max_tries=2): """Logs in to Kindle Cloud Reader. Args: max_tries: The maximum number of login attempts that will be made. Raises: BrowserError: If method called when browser not at a signin URL. LoginError: If login unsuccessful after `max_tries` attempts. """ if not self.current_url.startswith(_KindleCloudReaderBrowser._SIGNIN_URL): raise BrowserError( 'Current url "%s" is not a signin url ("%s")' % (self.current_url, _KindleCloudReaderBrowser._SIGNIN_URL)) email_field_loaded = lambda br: br.find_elements_by_id('ap_email') self._wait().until(email_field_loaded) tries = 0 while tries < max_tries: # Enter the username email_elem = self.find_element_by_id('ap_email') email_elem.clear() email_elem.send_keys(self._uname) # Enter the password pword_elem = self.find_element_by_id('ap_password') pword_elem.clear() pword_elem.send_keys(self._pword) def creds_entered(_): """Returns whether the credentials were properly entered.""" email_ok = email_elem.get_attribute('value') == self._uname pword_ok = pword_elem.get_attribute('value') == self._pword return email_ok and pword_ok kcr_page_loaded = lambda br: br.title == u'Kindle Cloud Reader' try: self._wait(5).until(creds_entered) self.find_element_by_id('signInSubmit-input').click() self._wait(5).until(kcr_page_loaded) except TimeoutException: tries += 1 else: return raise LoginError
[ "def", "_login", "(", "self", ",", "max_tries", "=", "2", ")", ":", "if", "not", "self", ".", "current_url", ".", "startswith", "(", "_KindleCloudReaderBrowser", ".", "_SIGNIN_URL", ")", ":", "raise", "BrowserError", "(", "'Current url \"%s\" is not a signin url (\"%s\")'", "%", "(", "self", ".", "current_url", ",", "_KindleCloudReaderBrowser", ".", "_SIGNIN_URL", ")", ")", "email_field_loaded", "=", "lambda", "br", ":", "br", ".", "find_elements_by_id", "(", "'ap_email'", ")", "self", ".", "_wait", "(", ")", ".", "until", "(", "email_field_loaded", ")", "tries", "=", "0", "while", "tries", "<", "max_tries", ":", "# Enter the username", "email_elem", "=", "self", ".", "find_element_by_id", "(", "'ap_email'", ")", "email_elem", ".", "clear", "(", ")", "email_elem", ".", "send_keys", "(", "self", ".", "_uname", ")", "# Enter the password", "pword_elem", "=", "self", ".", "find_element_by_id", "(", "'ap_password'", ")", "pword_elem", ".", "clear", "(", ")", "pword_elem", ".", "send_keys", "(", "self", ".", "_pword", ")", "def", "creds_entered", "(", "_", ")", ":", "\"\"\"Returns whether the credentials were properly entered.\"\"\"", "email_ok", "=", "email_elem", ".", "get_attribute", "(", "'value'", ")", "==", "self", ".", "_uname", "pword_ok", "=", "pword_elem", ".", "get_attribute", "(", "'value'", ")", "==", "self", ".", "_pword", "return", "email_ok", "and", "pword_ok", "kcr_page_loaded", "=", "lambda", "br", ":", "br", ".", "title", "==", "u'Kindle Cloud Reader'", "try", ":", "self", ".", "_wait", "(", "5", ")", ".", "until", "(", "creds_entered", ")", "self", ".", "find_element_by_id", "(", "'signInSubmit-input'", ")", ".", "click", "(", ")", "self", ".", "_wait", "(", "5", ")", ".", "until", "(", "kcr_page_loaded", ")", "except", "TimeoutException", ":", "tries", "+=", "1", "else", ":", "return", "raise", "LoginError" ]
Logs in to Kindle Cloud Reader. Args: max_tries: The maximum number of login attempts that will be made. Raises: BrowserError: If method called when browser not at a signin URL. LoginError: If login unsuccessful after `max_tries` attempts.
[ "Logs", "in", "to", "Kindle", "Cloud", "Reader", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L227-L274
msuozzo/Lector
lector/reader.py
_KindleCloudReaderBrowser._to_reader_frame
def _to_reader_frame(self): """Navigate to the KindleReader iframe.""" reader_frame = 'KindleReaderIFrame' frame_loaded = lambda br: br.find_elements_by_id(reader_frame) self._wait().until(frame_loaded) self.switch_to.frame(reader_frame) # pylint: disable=no-member reader_loaded = lambda br: br.find_elements_by_id('kindleReader_header') self._wait().until(reader_loaded)
python
def _to_reader_frame(self): """Navigate to the KindleReader iframe.""" reader_frame = 'KindleReaderIFrame' frame_loaded = lambda br: br.find_elements_by_id(reader_frame) self._wait().until(frame_loaded) self.switch_to.frame(reader_frame) # pylint: disable=no-member reader_loaded = lambda br: br.find_elements_by_id('kindleReader_header') self._wait().until(reader_loaded)
[ "def", "_to_reader_frame", "(", "self", ")", ":", "reader_frame", "=", "'KindleReaderIFrame'", "frame_loaded", "=", "lambda", "br", ":", "br", ".", "find_elements_by_id", "(", "reader_frame", ")", "self", ".", "_wait", "(", ")", ".", "until", "(", "frame_loaded", ")", "self", ".", "switch_to", ".", "frame", "(", "reader_frame", ")", "# pylint: disable=no-member", "reader_loaded", "=", "lambda", "br", ":", "br", ".", "find_elements_by_id", "(", "'kindleReader_header'", ")", "self", ".", "_wait", "(", ")", ".", "until", "(", "reader_loaded", ")" ]
Navigate to the KindleReader iframe.
[ "Navigate", "to", "the", "KindleReader", "iframe", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L276-L286
msuozzo/Lector
lector/reader.py
_KindleCloudReaderBrowser._wait_for_js
def _wait_for_js(self): """Wait for the Kindle Cloud Reader JS modules to initialize. These modules provide the interface used to execute API queries. """ # Wait for the Module Manager to load mod_mgr_script = ur"return window.hasOwnProperty('KindleModuleManager');" mod_mgr_loaded = lambda br: br.execute_script(mod_mgr_script) self._wait(5).until(mod_mgr_loaded) # Wait for the DB Client to load db_client_script = dedent(ur""" var done = arguments[0]; if (!window.hasOwnProperty('KindleModuleManager') || !KindleModuleManager .isModuleInitialized(Kindle.MODULE.DB_CLIENT)) { done(false); } else { KindleModuleManager .getModuleSync(Kindle.MODULE.DB_CLIENT) .getAppDb() .getAllBooks() .done(function(books) { done(!!books.length); }); } """) db_client_loaded = lambda br: br.execute_async_script(db_client_script) self._wait(5).until(db_client_loaded)
python
def _wait_for_js(self): """Wait for the Kindle Cloud Reader JS modules to initialize. These modules provide the interface used to execute API queries. """ # Wait for the Module Manager to load mod_mgr_script = ur"return window.hasOwnProperty('KindleModuleManager');" mod_mgr_loaded = lambda br: br.execute_script(mod_mgr_script) self._wait(5).until(mod_mgr_loaded) # Wait for the DB Client to load db_client_script = dedent(ur""" var done = arguments[0]; if (!window.hasOwnProperty('KindleModuleManager') || !KindleModuleManager .isModuleInitialized(Kindle.MODULE.DB_CLIENT)) { done(false); } else { KindleModuleManager .getModuleSync(Kindle.MODULE.DB_CLIENT) .getAppDb() .getAllBooks() .done(function(books) { done(!!books.length); }); } """) db_client_loaded = lambda br: br.execute_async_script(db_client_script) self._wait(5).until(db_client_loaded)
[ "def", "_wait_for_js", "(", "self", ")", ":", "# Wait for the Module Manager to load", "mod_mgr_script", "=", "ur\"return window.hasOwnProperty('KindleModuleManager');\"", "mod_mgr_loaded", "=", "lambda", "br", ":", "br", ".", "execute_script", "(", "mod_mgr_script", ")", "self", ".", "_wait", "(", "5", ")", ".", "until", "(", "mod_mgr_loaded", ")", "# Wait for the DB Client to load", "db_client_script", "=", "dedent", "(", "ur\"\"\"\n var done = arguments[0];\n if (!window.hasOwnProperty('KindleModuleManager') ||\n !KindleModuleManager\n .isModuleInitialized(Kindle.MODULE.DB_CLIENT)) {\n done(false);\n } else {\n KindleModuleManager\n .getModuleSync(Kindle.MODULE.DB_CLIENT)\n .getAppDb()\n .getAllBooks()\n .done(function(books) { done(!!books.length); });\n }\n \"\"\"", ")", "db_client_loaded", "=", "lambda", "br", ":", "br", ".", "execute_async_script", "(", "db_client_script", ")", "self", ".", "_wait", "(", "5", ")", ".", "until", "(", "db_client_loaded", ")" ]
Wait for the Kindle Cloud Reader JS modules to initialize. These modules provide the interface used to execute API queries.
[ "Wait", "for", "the", "Kindle", "Cloud", "Reader", "JS", "modules", "to", "initialize", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L288-L314
msuozzo/Lector
lector/reader.py
KindleCloudReaderAPI._get_api_call
def _get_api_call(self, function_name, *args): """Runs an api call with javascript-formatted arguments. Args: function_name: The name of the KindleAPI call to run. *args: Javascript-formatted arguments to pass to the API call. Returns: The result of the API call. Raises: APIError: If the API call fails or times out. """ api_call = dedent(""" var done = arguments[0]; KindleAPI.%(api_call)s(%(args)s).always(function(a) { done(a); }); """) % { 'api_call': function_name, 'args': ', '.join(args) } script = '\n'.join((api.API_SCRIPT, api_call)) try: return self._browser.execute_async_script(script) except TimeoutException: # FIXME: KCR will occassionally not load library and fall over raise APIError
python
def _get_api_call(self, function_name, *args): """Runs an api call with javascript-formatted arguments. Args: function_name: The name of the KindleAPI call to run. *args: Javascript-formatted arguments to pass to the API call. Returns: The result of the API call. Raises: APIError: If the API call fails or times out. """ api_call = dedent(""" var done = arguments[0]; KindleAPI.%(api_call)s(%(args)s).always(function(a) { done(a); }); """) % { 'api_call': function_name, 'args': ', '.join(args) } script = '\n'.join((api.API_SCRIPT, api_call)) try: return self._browser.execute_async_script(script) except TimeoutException: # FIXME: KCR will occassionally not load library and fall over raise APIError
[ "def", "_get_api_call", "(", "self", ",", "function_name", ",", "*", "args", ")", ":", "api_call", "=", "dedent", "(", "\"\"\"\n var done = arguments[0];\n KindleAPI.%(api_call)s(%(args)s).always(function(a) {\n done(a);\n });\n \"\"\"", ")", "%", "{", "'api_call'", ":", "function_name", ",", "'args'", ":", "', '", ".", "join", "(", "args", ")", "}", "script", "=", "'\\n'", ".", "join", "(", "(", "api", ".", "API_SCRIPT", ",", "api_call", ")", ")", "try", ":", "return", "self", ".", "_browser", ".", "execute_async_script", "(", "script", ")", "except", "TimeoutException", ":", "# FIXME: KCR will occassionally not load library and fall over", "raise", "APIError" ]
Runs an api call with javascript-formatted arguments. Args: function_name: The name of the KindleAPI call to run. *args: Javascript-formatted arguments to pass to the API call. Returns: The result of the API call. Raises: APIError: If the API call fails or times out.
[ "Runs", "an", "api", "call", "with", "javascript", "-", "formatted", "arguments", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L328-L355
msuozzo/Lector
lector/reader.py
KindleCloudReaderAPI.get_book_metadata
def get_book_metadata(self, asin): """Returns a book's metadata. Args: asin: The ASIN of the book to be queried. Returns: A `KindleBook` instance corresponding to the book associated with `asin`. """ kbm = self._get_api_call('get_book_metadata', '"%s"' % asin) return KindleCloudReaderAPI._kbm_to_book(kbm)
python
def get_book_metadata(self, asin): """Returns a book's metadata. Args: asin: The ASIN of the book to be queried. Returns: A `KindleBook` instance corresponding to the book associated with `asin`. """ kbm = self._get_api_call('get_book_metadata', '"%s"' % asin) return KindleCloudReaderAPI._kbm_to_book(kbm)
[ "def", "get_book_metadata", "(", "self", ",", "asin", ")", ":", "kbm", "=", "self", ".", "_get_api_call", "(", "'get_book_metadata'", ",", "'\"%s\"'", "%", "asin", ")", "return", "KindleCloudReaderAPI", ".", "_kbm_to_book", "(", "kbm", ")" ]
Returns a book's metadata. Args: asin: The ASIN of the book to be queried. Returns: A `KindleBook` instance corresponding to the book associated with `asin`.
[ "Returns", "a", "book", "s", "metadata", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L387-L398
msuozzo/Lector
lector/reader.py
KindleCloudReaderAPI.get_book_progress
def get_book_progress(self, asin): """Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`. """ kbp = self._get_api_call('get_book_progress', '"%s"' % asin) return KindleCloudReaderAPI._kbp_to_progress(kbp)
python
def get_book_progress(self, asin): """Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`. """ kbp = self._get_api_call('get_book_progress', '"%s"' % asin) return KindleCloudReaderAPI._kbp_to_progress(kbp)
[ "def", "get_book_progress", "(", "self", ",", "asin", ")", ":", "kbp", "=", "self", ".", "_get_api_call", "(", "'get_book_progress'", ",", "'\"%s\"'", "%", "asin", ")", "return", "KindleCloudReaderAPI", ".", "_kbp_to_progress", "(", "kbp", ")" ]
Returns the progress data available for a book. NOTE: A summary of the two progress formats can be found in the docstring for `ReadingProgress`. Args: asin: The asin of the book to be queried. Returns: A `ReadingProgress` instance corresponding to the book associated with `asin`.
[ "Returns", "the", "progress", "data", "available", "for", "a", "book", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L410-L424
msuozzo/Lector
lector/reader.py
KindleCloudReaderAPI.get_library_progress
def get_library_progress(self): """Returns the reading progress for all books in the kindle library. Returns: A mapping of ASINs to `ReadingProgress` instances corresponding to the books in the current user's library. """ kbp_dict = self._get_api_call('get_library_progress') return {asin: KindleCloudReaderAPI._kbp_to_progress(kbp) for asin, kbp in kbp_dict.iteritems()}
python
def get_library_progress(self): """Returns the reading progress for all books in the kindle library. Returns: A mapping of ASINs to `ReadingProgress` instances corresponding to the books in the current user's library. """ kbp_dict = self._get_api_call('get_library_progress') return {asin: KindleCloudReaderAPI._kbp_to_progress(kbp) for asin, kbp in kbp_dict.iteritems()}
[ "def", "get_library_progress", "(", "self", ")", ":", "kbp_dict", "=", "self", ".", "_get_api_call", "(", "'get_library_progress'", ")", "return", "{", "asin", ":", "KindleCloudReaderAPI", ".", "_kbp_to_progress", "(", "kbp", ")", "for", "asin", ",", "kbp", "in", "kbp_dict", ".", "iteritems", "(", ")", "}" ]
Returns the reading progress for all books in the kindle library. Returns: A mapping of ASINs to `ReadingProgress` instances corresponding to the books in the current user's library.
[ "Returns", "the", "reading", "progress", "for", "all", "books", "in", "the", "kindle", "library", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L426-L435
msuozzo/Lector
lector/reader.py
KindleCloudReaderAPI.get_instance
def get_instance(*args, **kwargs): """Context manager for an instance of `KindleCloudReaderAPI`.""" inst = KindleCloudReaderAPI(*args, **kwargs) try: yield inst except Exception: raise finally: inst.close()
python
def get_instance(*args, **kwargs): """Context manager for an instance of `KindleCloudReaderAPI`.""" inst = KindleCloudReaderAPI(*args, **kwargs) try: yield inst except Exception: raise finally: inst.close()
[ "def", "get_instance", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "inst", "=", "KindleCloudReaderAPI", "(", "*", "args", ",", "*", "*", "kwargs", ")", "try", ":", "yield", "inst", "except", "Exception", ":", "raise", "finally", ":", "inst", ".", "close", "(", ")" ]
Context manager for an instance of `KindleCloudReaderAPI`.
[ "Context", "manager", "for", "an", "instance", "of", "KindleCloudReaderAPI", "." ]
train
https://github.com/msuozzo/Lector/blob/1570f7734a1c68f294648f44088a7ccb09c26241/lector/reader.py#L443-L452
alexanderlukanin13/coolname
coolname/loader.py
load_config
def load_config(path): """ Loads configuration from a path. Path can be a json file, or a directory containing config.json and zero or more *.txt files with word lists or phrase lists. Returns config dict. Raises InitializationError when something is wrong. """ path = os.path.abspath(path) if os.path.isdir(path): config, wordlists = _load_data(path) elif os.path.isfile(path): config = _load_config(path) wordlists = {} else: raise InitializationError('File or directory not found: {0}'.format(path)) for name, wordlist in wordlists.items(): if name in config: raise InitializationError("Conflict: list {!r} is defined both in config " "and in *.txt file. If it's a {!r} list, " "you should remove it from config." .format(name, _CONF.TYPE.WORDS)) config[name] = wordlist return config
python
def load_config(path): """ Loads configuration from a path. Path can be a json file, or a directory containing config.json and zero or more *.txt files with word lists or phrase lists. Returns config dict. Raises InitializationError when something is wrong. """ path = os.path.abspath(path) if os.path.isdir(path): config, wordlists = _load_data(path) elif os.path.isfile(path): config = _load_config(path) wordlists = {} else: raise InitializationError('File or directory not found: {0}'.format(path)) for name, wordlist in wordlists.items(): if name in config: raise InitializationError("Conflict: list {!r} is defined both in config " "and in *.txt file. If it's a {!r} list, " "you should remove it from config." .format(name, _CONF.TYPE.WORDS)) config[name] = wordlist return config
[ "def", "load_config", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "config", ",", "wordlists", "=", "_load_data", "(", "path", ")", "elif", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "config", "=", "_load_config", "(", "path", ")", "wordlists", "=", "{", "}", "else", ":", "raise", "InitializationError", "(", "'File or directory not found: {0}'", ".", "format", "(", "path", ")", ")", "for", "name", ",", "wordlist", "in", "wordlists", ".", "items", "(", ")", ":", "if", "name", "in", "config", ":", "raise", "InitializationError", "(", "\"Conflict: list {!r} is defined both in config \"", "\"and in *.txt file. If it's a {!r} list, \"", "\"you should remove it from config.\"", ".", "format", "(", "name", ",", "_CONF", ".", "TYPE", ".", "WORDS", ")", ")", "config", "[", "name", "]", "=", "wordlist", "return", "config" ]
Loads configuration from a path. Path can be a json file, or a directory containing config.json and zero or more *.txt files with word lists or phrase lists. Returns config dict. Raises InitializationError when something is wrong.
[ "Loads", "configuration", "from", "a", "path", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/loader.py#L19-L45
alexanderlukanin13/coolname
coolname/loader.py
_load_data
def _load_data(path): """ Loads data from a directory. Returns tuple (config_dict, wordlists). Raises Exception on failure (e.g. if data is corrupted). """ path = os.path.abspath(path) if not os.path.isdir(path): raise InitializationError('Directory not found: {0}'.format(path)) wordlists = {} for file_name in os.listdir(path): if os.path.splitext(file_name)[1] != '.txt': continue file_path = os.path.join(path, file_name) name = os.path.splitext(os.path.split(file_path)[1])[0] try: with codecs.open(file_path, encoding='utf-8') as file: wordlists[name] = _load_wordlist(name, file) except OSError as ex: raise InitializationError('Failed to read {}: {}'.format(file_path, ex)) config = _load_config(os.path.join(path, 'config.json')) return (config, wordlists)
python
def _load_data(path): """ Loads data from a directory. Returns tuple (config_dict, wordlists). Raises Exception on failure (e.g. if data is corrupted). """ path = os.path.abspath(path) if not os.path.isdir(path): raise InitializationError('Directory not found: {0}'.format(path)) wordlists = {} for file_name in os.listdir(path): if os.path.splitext(file_name)[1] != '.txt': continue file_path = os.path.join(path, file_name) name = os.path.splitext(os.path.split(file_path)[1])[0] try: with codecs.open(file_path, encoding='utf-8') as file: wordlists[name] = _load_wordlist(name, file) except OSError as ex: raise InitializationError('Failed to read {}: {}'.format(file_path, ex)) config = _load_config(os.path.join(path, 'config.json')) return (config, wordlists)
[ "def", "_load_data", "(", "path", ")", ":", "path", "=", "os", ".", "path", ".", "abspath", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "InitializationError", "(", "'Directory not found: {0}'", ".", "format", "(", "path", ")", ")", "wordlists", "=", "{", "}", "for", "file_name", "in", "os", ".", "listdir", "(", "path", ")", ":", "if", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "[", "1", "]", "!=", "'.txt'", ":", "continue", "file_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "file_name", ")", "name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "split", "(", "file_path", ")", "[", "1", "]", ")", "[", "0", "]", "try", ":", "with", "codecs", ".", "open", "(", "file_path", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "wordlists", "[", "name", "]", "=", "_load_wordlist", "(", "name", ",", "file", ")", "except", "OSError", "as", "ex", ":", "raise", "InitializationError", "(", "'Failed to read {}: {}'", ".", "format", "(", "file_path", ",", "ex", ")", ")", "config", "=", "_load_config", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'config.json'", ")", ")", "return", "(", "config", ",", "wordlists", ")" ]
Loads data from a directory. Returns tuple (config_dict, wordlists). Raises Exception on failure (e.g. if data is corrupted).
[ "Loads", "data", "from", "a", "directory", ".", "Returns", "tuple", "(", "config_dict", "wordlists", ")", ".", "Raises", "Exception", "on", "failure", "(", "e", ".", "g", ".", "if", "data", "is", "corrupted", ")", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/loader.py#L48-L69
alexanderlukanin13/coolname
coolname/loader.py
_parse_option
def _parse_option(line): """ Parses option line. Returns (name, value). Raises ValueError on invalid syntax or unknown option. """ match = _OPTION_REGEX.match(line) if not match: raise ValueError('Invalid syntax') for name, type_ in _OPTIONS: if name == match.group(1): return name, type_(match.group(2)) raise ValueError('Unknown option')
python
def _parse_option(line): """ Parses option line. Returns (name, value). Raises ValueError on invalid syntax or unknown option. """ match = _OPTION_REGEX.match(line) if not match: raise ValueError('Invalid syntax') for name, type_ in _OPTIONS: if name == match.group(1): return name, type_(match.group(2)) raise ValueError('Unknown option')
[ "def", "_parse_option", "(", "line", ")", ":", "match", "=", "_OPTION_REGEX", ".", "match", "(", "line", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "'Invalid syntax'", ")", "for", "name", ",", "type_", "in", "_OPTIONS", ":", "if", "name", "==", "match", ".", "group", "(", "1", ")", ":", "return", "name", ",", "type_", "(", "match", ".", "group", "(", "2", ")", ")", "raise", "ValueError", "(", "'Unknown option'", ")" ]
Parses option line. Returns (name, value). Raises ValueError on invalid syntax or unknown option.
[ "Parses", "option", "line", ".", "Returns", "(", "name", "value", ")", ".", "Raises", "ValueError", "on", "invalid", "syntax", "or", "unknown", "option", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/loader.py#L95-L107
alexanderlukanin13/coolname
coolname/loader.py
_load_wordlist
def _load_wordlist(name, stream): """ Loads list of words or phrases from file. Returns "words" or "phrases" dictionary, the same as used in config. Raises Exception if file is missing or invalid. """ items = [] max_length = None multiword = False multiword_start = None number_of_words = None for i, line in enumerate(stream, start=1): line = line.strip() if not line or line.startswith('#'): continue # Is it an option line, e.g. 'max_length = 10'? if '=' in line: if items: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '(options must be defined before words)' .format(name, i, line)) try: option, option_value = _parse_option(line) except ValueError as ex: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '({})' .format(name, i, line, ex)) if option == _CONF.FIELD.MAX_LENGTH: max_length = option_value elif option == _CONF.FIELD.NUMBER_OF_WORDS: number_of_words = option_value continue # pragma: no cover # Parse words if not multiword and _WORD_REGEX.match(line): if max_length is not None and len(line) > max_length: raise ConfigurationError('Word is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(line) elif _PHRASE_REGEX.match(line): if not multiword: multiword = True multiword_start = len(items) phrase = tuple(line.split(' ')) if number_of_words is not None and len(phrase) != number_of_words: raise ConfigurationError('Phrase has {} word(s) (while number_of_words={}) ' 'at list {!r} line {}: {!r}' .format(len(phrase), number_of_words, name, i, line)) if max_length is not None and sum(len(x) for x in phrase) > max_length: raise ConfigurationError('Phrase is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(phrase) else: raise ConfigurationError('Invalid syntax at list {!r} line {}: {!r}' .format(name, i, line)) if multiword: # If in phrase mode, convert everything to tuples for i in range(0, multiword_start): items[i] = (items[i], ) result = { _CONF.FIELD.TYPE: _CONF.TYPE.PHRASES, _CONF.FIELD.PHRASES: items } if number_of_words is not None: result[_CONF.FIELD.NUMBER_OF_WORDS] = number_of_words else: result = { _CONF.FIELD.TYPE: _CONF.TYPE.WORDS, _CONF.FIELD.WORDS: items } if max_length is not None: result[_CONF.FIELD.MAX_LENGTH] = max_length return result
python
def _load_wordlist(name, stream): """ Loads list of words or phrases from file. Returns "words" or "phrases" dictionary, the same as used in config. Raises Exception if file is missing or invalid. """ items = [] max_length = None multiword = False multiword_start = None number_of_words = None for i, line in enumerate(stream, start=1): line = line.strip() if not line or line.startswith('#'): continue # Is it an option line, e.g. 'max_length = 10'? if '=' in line: if items: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '(options must be defined before words)' .format(name, i, line)) try: option, option_value = _parse_option(line) except ValueError as ex: raise ConfigurationError('Invalid assignment at list {!r} line {}: {!r} ' '({})' .format(name, i, line, ex)) if option == _CONF.FIELD.MAX_LENGTH: max_length = option_value elif option == _CONF.FIELD.NUMBER_OF_WORDS: number_of_words = option_value continue # pragma: no cover # Parse words if not multiword and _WORD_REGEX.match(line): if max_length is not None and len(line) > max_length: raise ConfigurationError('Word is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(line) elif _PHRASE_REGEX.match(line): if not multiword: multiword = True multiword_start = len(items) phrase = tuple(line.split(' ')) if number_of_words is not None and len(phrase) != number_of_words: raise ConfigurationError('Phrase has {} word(s) (while number_of_words={}) ' 'at list {!r} line {}: {!r}' .format(len(phrase), number_of_words, name, i, line)) if max_length is not None and sum(len(x) for x in phrase) > max_length: raise ConfigurationError('Phrase is too long at list {!r} line {}: {!r}' .format(name, i, line)) items.append(phrase) else: raise ConfigurationError('Invalid syntax at list {!r} line {}: {!r}' .format(name, i, line)) if multiword: # If in phrase mode, convert everything to tuples for i in range(0, multiword_start): items[i] = (items[i], ) result = { _CONF.FIELD.TYPE: _CONF.TYPE.PHRASES, _CONF.FIELD.PHRASES: items } if number_of_words is not None: result[_CONF.FIELD.NUMBER_OF_WORDS] = number_of_words else: result = { _CONF.FIELD.TYPE: _CONF.TYPE.WORDS, _CONF.FIELD.WORDS: items } if max_length is not None: result[_CONF.FIELD.MAX_LENGTH] = max_length return result
[ "def", "_load_wordlist", "(", "name", ",", "stream", ")", ":", "items", "=", "[", "]", "max_length", "=", "None", "multiword", "=", "False", "multiword_start", "=", "None", "number_of_words", "=", "None", "for", "i", ",", "line", "in", "enumerate", "(", "stream", ",", "start", "=", "1", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "line", ".", "startswith", "(", "'#'", ")", ":", "continue", "# Is it an option line, e.g. 'max_length = 10'?", "if", "'='", "in", "line", ":", "if", "items", ":", "raise", "ConfigurationError", "(", "'Invalid assignment at list {!r} line {}: {!r} '", "'(options must be defined before words)'", ".", "format", "(", "name", ",", "i", ",", "line", ")", ")", "try", ":", "option", ",", "option_value", "=", "_parse_option", "(", "line", ")", "except", "ValueError", "as", "ex", ":", "raise", "ConfigurationError", "(", "'Invalid assignment at list {!r} line {}: {!r} '", "'({})'", ".", "format", "(", "name", ",", "i", ",", "line", ",", "ex", ")", ")", "if", "option", "==", "_CONF", ".", "FIELD", ".", "MAX_LENGTH", ":", "max_length", "=", "option_value", "elif", "option", "==", "_CONF", ".", "FIELD", ".", "NUMBER_OF_WORDS", ":", "number_of_words", "=", "option_value", "continue", "# pragma: no cover", "# Parse words", "if", "not", "multiword", "and", "_WORD_REGEX", ".", "match", "(", "line", ")", ":", "if", "max_length", "is", "not", "None", "and", "len", "(", "line", ")", ">", "max_length", ":", "raise", "ConfigurationError", "(", "'Word is too long at list {!r} line {}: {!r}'", ".", "format", "(", "name", ",", "i", ",", "line", ")", ")", "items", ".", "append", "(", "line", ")", "elif", "_PHRASE_REGEX", ".", "match", "(", "line", ")", ":", "if", "not", "multiword", ":", "multiword", "=", "True", "multiword_start", "=", "len", "(", "items", ")", "phrase", "=", "tuple", "(", "line", ".", "split", "(", "' '", ")", ")", "if", "number_of_words", "is", "not", "None", "and", "len", "(", "phrase", ")", "!=", "number_of_words", ":", "raise", "ConfigurationError", "(", "'Phrase has {} word(s) (while number_of_words={}) '", "'at list {!r} line {}: {!r}'", ".", "format", "(", "len", "(", "phrase", ")", ",", "number_of_words", ",", "name", ",", "i", ",", "line", ")", ")", "if", "max_length", "is", "not", "None", "and", "sum", "(", "len", "(", "x", ")", "for", "x", "in", "phrase", ")", ">", "max_length", ":", "raise", "ConfigurationError", "(", "'Phrase is too long at list {!r} line {}: {!r}'", ".", "format", "(", "name", ",", "i", ",", "line", ")", ")", "items", ".", "append", "(", "phrase", ")", "else", ":", "raise", "ConfigurationError", "(", "'Invalid syntax at list {!r} line {}: {!r}'", ".", "format", "(", "name", ",", "i", ",", "line", ")", ")", "if", "multiword", ":", "# If in phrase mode, convert everything to tuples", "for", "i", "in", "range", "(", "0", ",", "multiword_start", ")", ":", "items", "[", "i", "]", "=", "(", "items", "[", "i", "]", ",", ")", "result", "=", "{", "_CONF", ".", "FIELD", ".", "TYPE", ":", "_CONF", ".", "TYPE", ".", "PHRASES", ",", "_CONF", ".", "FIELD", ".", "PHRASES", ":", "items", "}", "if", "number_of_words", "is", "not", "None", ":", "result", "[", "_CONF", ".", "FIELD", ".", "NUMBER_OF_WORDS", "]", "=", "number_of_words", "else", ":", "result", "=", "{", "_CONF", ".", "FIELD", ".", "TYPE", ":", "_CONF", ".", "TYPE", ".", "WORDS", ",", "_CONF", ".", "FIELD", ".", "WORDS", ":", "items", "}", "if", "max_length", "is", "not", "None", ":", "result", "[", "_CONF", ".", "FIELD", ".", "MAX_LENGTH", "]", "=", "max_length", "return", "result" ]
Loads list of words or phrases from file. Returns "words" or "phrases" dictionary, the same as used in config. Raises Exception if file is missing or invalid.
[ "Loads", "list", "of", "words", "or", "phrases", "from", "file", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/loader.py#L110-L182
alexanderlukanin13/coolname
coolname/impl.py
_validate_config
def _validate_config(config): """ A big and ugly method for config validation. It would be nice to use cerberus, but we don't want to introduce dependencies just for that. """ try: referenced_sublists = set() for key, listdef in list(config.items()): # Check if section is a list if not isinstance(listdef, dict): raise ValueError('Value at key {!r} is not a dict' .format(key)) # Check if it has correct type if _CONF.FIELD.TYPE not in listdef: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.TYPE)) # Nested or Cartesian if listdef[_CONF.FIELD.TYPE] in (_CONF.TYPE.NESTED, _CONF.TYPE.CARTESIAN): sublists = listdef.get(_CONF.FIELD.LISTS) if sublists is None: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.LISTS)) if (not isinstance(sublists, list) or not sublists or not all(_is_str(x) for x in sublists)): raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.LISTS)) referenced_sublists.update(sublists) # Const elif listdef[_CONF.FIELD.TYPE] == _CONF.TYPE.CONST: try: value = listdef[_CONF.FIELD.VALUE] except KeyError: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.VALUE)) if not _is_str(value): raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.VALUE)) # Words elif listdef[_CONF.FIELD.TYPE] == _CONF.TYPE.WORDS: try: words = listdef[_CONF.FIELD.WORDS] except KeyError: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.WORDS)) if not isinstance(words, list) or not words: raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.WORDS)) # Validate word length try: max_length = int(listdef[_CONF.FIELD.MAX_LENGTH]) except KeyError: max_length = None if max_length is not None: for word in words: if len(word) > max_length: raise ValueError('Config at key {!r} has invalid word {!r} ' '(longer than {} characters)' .format(key, word, max_length)) # Phrases (sequences of one or more words) elif listdef[_CONF.FIELD.TYPE] == _CONF.TYPE.PHRASES: try: phrases = listdef[_CONF.FIELD.PHRASES] except KeyError: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.PHRASES)) if not isinstance(phrases, list) or not phrases: raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.PHRASES)) # Validate multi-word and max length try: number_of_words = int(listdef[_CONF.FIELD.NUMBER_OF_WORDS]) except KeyError: number_of_words = None try: max_length = int(listdef[_CONF.FIELD.MAX_LENGTH]) except KeyError: max_length = None for phrase in phrases: phrase = _split_phrase(phrase) # str -> sequence, if necessary if not isinstance(phrase, (tuple, list)) or not all(isinstance(x, _str_types) for x in phrase): raise ValueError('Config at key {!r} has invalid {!r}: ' 'must be all string/tuple/list' .format(key, _CONF.FIELD.PHRASES)) if number_of_words is not None and len(phrase) != number_of_words: raise ValueError('Config at key {!r} has invalid phrase {!r} ' '({} word(s) but {}={})' .format(key, ' '.join(phrase), len(phrase), _CONF.FIELD.NUMBER_OF_WORDS, number_of_words)) if max_length is not None and sum(len(word) for word in phrase) > max_length: raise ValueError('Config at key {!r} has invalid phrase {!r} ' '(longer than {} characters)' .format(key, ' '.join(phrase), max_length)) else: raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.TYPE)) # Check that all sublists are defined diff = referenced_sublists.difference(config.keys()) if diff: raise ValueError('Lists are referenced but not defined: {}' .format(', '.join(sorted(diff)[:10]))) except (KeyError, ValueError) as ex: raise ConfigurationError(str(ex))
python
def _validate_config(config): """ A big and ugly method for config validation. It would be nice to use cerberus, but we don't want to introduce dependencies just for that. """ try: referenced_sublists = set() for key, listdef in list(config.items()): # Check if section is a list if not isinstance(listdef, dict): raise ValueError('Value at key {!r} is not a dict' .format(key)) # Check if it has correct type if _CONF.FIELD.TYPE not in listdef: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.TYPE)) # Nested or Cartesian if listdef[_CONF.FIELD.TYPE] in (_CONF.TYPE.NESTED, _CONF.TYPE.CARTESIAN): sublists = listdef.get(_CONF.FIELD.LISTS) if sublists is None: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.LISTS)) if (not isinstance(sublists, list) or not sublists or not all(_is_str(x) for x in sublists)): raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.LISTS)) referenced_sublists.update(sublists) # Const elif listdef[_CONF.FIELD.TYPE] == _CONF.TYPE.CONST: try: value = listdef[_CONF.FIELD.VALUE] except KeyError: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.VALUE)) if not _is_str(value): raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.VALUE)) # Words elif listdef[_CONF.FIELD.TYPE] == _CONF.TYPE.WORDS: try: words = listdef[_CONF.FIELD.WORDS] except KeyError: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.WORDS)) if not isinstance(words, list) or not words: raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.WORDS)) # Validate word length try: max_length = int(listdef[_CONF.FIELD.MAX_LENGTH]) except KeyError: max_length = None if max_length is not None: for word in words: if len(word) > max_length: raise ValueError('Config at key {!r} has invalid word {!r} ' '(longer than {} characters)' .format(key, word, max_length)) # Phrases (sequences of one or more words) elif listdef[_CONF.FIELD.TYPE] == _CONF.TYPE.PHRASES: try: phrases = listdef[_CONF.FIELD.PHRASES] except KeyError: raise ValueError('Config at key {!r} has no {!r}' .format(key, _CONF.FIELD.PHRASES)) if not isinstance(phrases, list) or not phrases: raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.PHRASES)) # Validate multi-word and max length try: number_of_words = int(listdef[_CONF.FIELD.NUMBER_OF_WORDS]) except KeyError: number_of_words = None try: max_length = int(listdef[_CONF.FIELD.MAX_LENGTH]) except KeyError: max_length = None for phrase in phrases: phrase = _split_phrase(phrase) # str -> sequence, if necessary if not isinstance(phrase, (tuple, list)) or not all(isinstance(x, _str_types) for x in phrase): raise ValueError('Config at key {!r} has invalid {!r}: ' 'must be all string/tuple/list' .format(key, _CONF.FIELD.PHRASES)) if number_of_words is not None and len(phrase) != number_of_words: raise ValueError('Config at key {!r} has invalid phrase {!r} ' '({} word(s) but {}={})' .format(key, ' '.join(phrase), len(phrase), _CONF.FIELD.NUMBER_OF_WORDS, number_of_words)) if max_length is not None and sum(len(word) for word in phrase) > max_length: raise ValueError('Config at key {!r} has invalid phrase {!r} ' '(longer than {} characters)' .format(key, ' '.join(phrase), max_length)) else: raise ValueError('Config at key {!r} has invalid {!r}' .format(key, _CONF.FIELD.TYPE)) # Check that all sublists are defined diff = referenced_sublists.difference(config.keys()) if diff: raise ValueError('Lists are referenced but not defined: {}' .format(', '.join(sorted(diff)[:10]))) except (KeyError, ValueError) as ex: raise ConfigurationError(str(ex))
[ "def", "_validate_config", "(", "config", ")", ":", "try", ":", "referenced_sublists", "=", "set", "(", ")", "for", "key", ",", "listdef", "in", "list", "(", "config", ".", "items", "(", ")", ")", ":", "# Check if section is a list", "if", "not", "isinstance", "(", "listdef", ",", "dict", ")", ":", "raise", "ValueError", "(", "'Value at key {!r} is not a dict'", ".", "format", "(", "key", ")", ")", "# Check if it has correct type", "if", "_CONF", ".", "FIELD", ".", "TYPE", "not", "in", "listdef", ":", "raise", "ValueError", "(", "'Config at key {!r} has no {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "TYPE", ")", ")", "# Nested or Cartesian", "if", "listdef", "[", "_CONF", ".", "FIELD", ".", "TYPE", "]", "in", "(", "_CONF", ".", "TYPE", ".", "NESTED", ",", "_CONF", ".", "TYPE", ".", "CARTESIAN", ")", ":", "sublists", "=", "listdef", ".", "get", "(", "_CONF", ".", "FIELD", ".", "LISTS", ")", "if", "sublists", "is", "None", ":", "raise", "ValueError", "(", "'Config at key {!r} has no {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "LISTS", ")", ")", "if", "(", "not", "isinstance", "(", "sublists", ",", "list", ")", "or", "not", "sublists", "or", "not", "all", "(", "_is_str", "(", "x", ")", "for", "x", "in", "sublists", ")", ")", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "LISTS", ")", ")", "referenced_sublists", ".", "update", "(", "sublists", ")", "# Const", "elif", "listdef", "[", "_CONF", ".", "FIELD", ".", "TYPE", "]", "==", "_CONF", ".", "TYPE", ".", "CONST", ":", "try", ":", "value", "=", "listdef", "[", "_CONF", ".", "FIELD", ".", "VALUE", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Config at key {!r} has no {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "VALUE", ")", ")", "if", "not", "_is_str", "(", "value", ")", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "VALUE", ")", ")", "# Words", "elif", "listdef", "[", "_CONF", ".", "FIELD", ".", "TYPE", "]", "==", "_CONF", ".", "TYPE", ".", "WORDS", ":", "try", ":", "words", "=", "listdef", "[", "_CONF", ".", "FIELD", ".", "WORDS", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Config at key {!r} has no {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "WORDS", ")", ")", "if", "not", "isinstance", "(", "words", ",", "list", ")", "or", "not", "words", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "WORDS", ")", ")", "# Validate word length", "try", ":", "max_length", "=", "int", "(", "listdef", "[", "_CONF", ".", "FIELD", ".", "MAX_LENGTH", "]", ")", "except", "KeyError", ":", "max_length", "=", "None", "if", "max_length", "is", "not", "None", ":", "for", "word", "in", "words", ":", "if", "len", "(", "word", ")", ">", "max_length", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid word {!r} '", "'(longer than {} characters)'", ".", "format", "(", "key", ",", "word", ",", "max_length", ")", ")", "# Phrases (sequences of one or more words)", "elif", "listdef", "[", "_CONF", ".", "FIELD", ".", "TYPE", "]", "==", "_CONF", ".", "TYPE", ".", "PHRASES", ":", "try", ":", "phrases", "=", "listdef", "[", "_CONF", ".", "FIELD", ".", "PHRASES", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Config at key {!r} has no {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "PHRASES", ")", ")", "if", "not", "isinstance", "(", "phrases", ",", "list", ")", "or", "not", "phrases", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "PHRASES", ")", ")", "# Validate multi-word and max length", "try", ":", "number_of_words", "=", "int", "(", "listdef", "[", "_CONF", ".", "FIELD", ".", "NUMBER_OF_WORDS", "]", ")", "except", "KeyError", ":", "number_of_words", "=", "None", "try", ":", "max_length", "=", "int", "(", "listdef", "[", "_CONF", ".", "FIELD", ".", "MAX_LENGTH", "]", ")", "except", "KeyError", ":", "max_length", "=", "None", "for", "phrase", "in", "phrases", ":", "phrase", "=", "_split_phrase", "(", "phrase", ")", "# str -> sequence, if necessary", "if", "not", "isinstance", "(", "phrase", ",", "(", "tuple", ",", "list", ")", ")", "or", "not", "all", "(", "isinstance", "(", "x", ",", "_str_types", ")", "for", "x", "in", "phrase", ")", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid {!r}: '", "'must be all string/tuple/list'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "PHRASES", ")", ")", "if", "number_of_words", "is", "not", "None", "and", "len", "(", "phrase", ")", "!=", "number_of_words", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid phrase {!r} '", "'({} word(s) but {}={})'", ".", "format", "(", "key", ",", "' '", ".", "join", "(", "phrase", ")", ",", "len", "(", "phrase", ")", ",", "_CONF", ".", "FIELD", ".", "NUMBER_OF_WORDS", ",", "number_of_words", ")", ")", "if", "max_length", "is", "not", "None", "and", "sum", "(", "len", "(", "word", ")", "for", "word", "in", "phrase", ")", ">", "max_length", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid phrase {!r} '", "'(longer than {} characters)'", ".", "format", "(", "key", ",", "' '", ".", "join", "(", "phrase", ")", ",", "max_length", ")", ")", "else", ":", "raise", "ValueError", "(", "'Config at key {!r} has invalid {!r}'", ".", "format", "(", "key", ",", "_CONF", ".", "FIELD", ".", "TYPE", ")", ")", "# Check that all sublists are defined", "diff", "=", "referenced_sublists", ".", "difference", "(", "config", ".", "keys", "(", ")", ")", "if", "diff", ":", "raise", "ValueError", "(", "'Lists are referenced but not defined: {}'", ".", "format", "(", "', '", ".", "join", "(", "sorted", "(", "diff", ")", "[", ":", "10", "]", ")", ")", ")", "except", "(", "KeyError", ",", "ValueError", ")", "as", "ex", ":", "raise", "ConfigurationError", "(", "str", "(", "ex", ")", ")" ]
A big and ugly method for config validation. It would be nice to use cerberus, but we don't want to introduce dependencies just for that.
[ "A", "big", "and", "ugly", "method", "for", "config", "validation", ".", "It", "would", "be", "nice", "to", "use", "cerberus", "but", "we", "don", "t", "want", "to", "introduce", "dependencies", "just", "for", "that", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/impl.py#L403-L505
alexanderlukanin13/coolname
coolname/impl.py
_create_lists
def _create_lists(config, results, current, stack, inside_cartesian=None): """ An ugly recursive method to transform config dict into a tree of AbstractNestedList. """ # Have we done it already? try: return results[current] except KeyError: pass # Check recursion depth and detect loops if current in stack: raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack)) if len(stack) > 99: raise ConfigurationError('Rule {!r} is too deep'.format(stack[0])) # Track recursion depth stack.append(current) try: # Check what kind of list we have listdef = config[current] list_type = listdef[_CONF.FIELD.TYPE] # 1. List of words if list_type == _CONF.TYPE.WORDS: results[current] = WordList(listdef['words']) # List of phrases elif list_type == _CONF.TYPE.PHRASES: results[current] = PhraseList(listdef['phrases']) # 2. Simple list of lists elif list_type == _CONF.TYPE.NESTED: results[current] = NestedList([_create_lists(config, results, x, stack, inside_cartesian=inside_cartesian) for x in listdef[_CONF.FIELD.LISTS]]) # 3. Cartesian list of lists elif list_type == _CONF.TYPE.CARTESIAN: if inside_cartesian is not None: raise ConfigurationError("Cartesian list {!r} contains another Cartesian list " "{!r}. Nested Cartesian lists are not allowed." .format(inside_cartesian, current)) results[current] = CartesianList([_create_lists(config, results, x, stack, inside_cartesian=current) for x in listdef[_CONF.FIELD.LISTS]]) # 4. Scalar elif list_type == _CONF.TYPE.CONST: results[current] = Scalar(listdef[_CONF.FIELD.VALUE]) # Unknown type else: raise InitializationError("Unknown list type: {!r}".format(list_type)) # Return the result return results[current] finally: stack.pop()
python
def _create_lists(config, results, current, stack, inside_cartesian=None): """ An ugly recursive method to transform config dict into a tree of AbstractNestedList. """ # Have we done it already? try: return results[current] except KeyError: pass # Check recursion depth and detect loops if current in stack: raise ConfigurationError('Rule {!r} is recursive: {!r}'.format(stack[0], stack)) if len(stack) > 99: raise ConfigurationError('Rule {!r} is too deep'.format(stack[0])) # Track recursion depth stack.append(current) try: # Check what kind of list we have listdef = config[current] list_type = listdef[_CONF.FIELD.TYPE] # 1. List of words if list_type == _CONF.TYPE.WORDS: results[current] = WordList(listdef['words']) # List of phrases elif list_type == _CONF.TYPE.PHRASES: results[current] = PhraseList(listdef['phrases']) # 2. Simple list of lists elif list_type == _CONF.TYPE.NESTED: results[current] = NestedList([_create_lists(config, results, x, stack, inside_cartesian=inside_cartesian) for x in listdef[_CONF.FIELD.LISTS]]) # 3. Cartesian list of lists elif list_type == _CONF.TYPE.CARTESIAN: if inside_cartesian is not None: raise ConfigurationError("Cartesian list {!r} contains another Cartesian list " "{!r}. Nested Cartesian lists are not allowed." .format(inside_cartesian, current)) results[current] = CartesianList([_create_lists(config, results, x, stack, inside_cartesian=current) for x in listdef[_CONF.FIELD.LISTS]]) # 4. Scalar elif list_type == _CONF.TYPE.CONST: results[current] = Scalar(listdef[_CONF.FIELD.VALUE]) # Unknown type else: raise InitializationError("Unknown list type: {!r}".format(list_type)) # Return the result return results[current] finally: stack.pop()
[ "def", "_create_lists", "(", "config", ",", "results", ",", "current", ",", "stack", ",", "inside_cartesian", "=", "None", ")", ":", "# Have we done it already?", "try", ":", "return", "results", "[", "current", "]", "except", "KeyError", ":", "pass", "# Check recursion depth and detect loops", "if", "current", "in", "stack", ":", "raise", "ConfigurationError", "(", "'Rule {!r} is recursive: {!r}'", ".", "format", "(", "stack", "[", "0", "]", ",", "stack", ")", ")", "if", "len", "(", "stack", ")", ">", "99", ":", "raise", "ConfigurationError", "(", "'Rule {!r} is too deep'", ".", "format", "(", "stack", "[", "0", "]", ")", ")", "# Track recursion depth", "stack", ".", "append", "(", "current", ")", "try", ":", "# Check what kind of list we have", "listdef", "=", "config", "[", "current", "]", "list_type", "=", "listdef", "[", "_CONF", ".", "FIELD", ".", "TYPE", "]", "# 1. List of words", "if", "list_type", "==", "_CONF", ".", "TYPE", ".", "WORDS", ":", "results", "[", "current", "]", "=", "WordList", "(", "listdef", "[", "'words'", "]", ")", "# List of phrases", "elif", "list_type", "==", "_CONF", ".", "TYPE", ".", "PHRASES", ":", "results", "[", "current", "]", "=", "PhraseList", "(", "listdef", "[", "'phrases'", "]", ")", "# 2. Simple list of lists", "elif", "list_type", "==", "_CONF", ".", "TYPE", ".", "NESTED", ":", "results", "[", "current", "]", "=", "NestedList", "(", "[", "_create_lists", "(", "config", ",", "results", ",", "x", ",", "stack", ",", "inside_cartesian", "=", "inside_cartesian", ")", "for", "x", "in", "listdef", "[", "_CONF", ".", "FIELD", ".", "LISTS", "]", "]", ")", "# 3. Cartesian list of lists", "elif", "list_type", "==", "_CONF", ".", "TYPE", ".", "CARTESIAN", ":", "if", "inside_cartesian", "is", "not", "None", ":", "raise", "ConfigurationError", "(", "\"Cartesian list {!r} contains another Cartesian list \"", "\"{!r}. Nested Cartesian lists are not allowed.\"", ".", "format", "(", "inside_cartesian", ",", "current", ")", ")", "results", "[", "current", "]", "=", "CartesianList", "(", "[", "_create_lists", "(", "config", ",", "results", ",", "x", ",", "stack", ",", "inside_cartesian", "=", "current", ")", "for", "x", "in", "listdef", "[", "_CONF", ".", "FIELD", ".", "LISTS", "]", "]", ")", "# 4. Scalar", "elif", "list_type", "==", "_CONF", ".", "TYPE", ".", "CONST", ":", "results", "[", "current", "]", "=", "Scalar", "(", "listdef", "[", "_CONF", ".", "FIELD", ".", "VALUE", "]", ")", "# Unknown type", "else", ":", "raise", "InitializationError", "(", "\"Unknown list type: {!r}\"", ".", "format", "(", "list_type", ")", ")", "# Return the result", "return", "results", "[", "current", "]", "finally", ":", "stack", ".", "pop", "(", ")" ]
An ugly recursive method to transform config dict into a tree of AbstractNestedList.
[ "An", "ugly", "recursive", "method", "to", "transform", "config", "dict", "into", "a", "tree", "of", "AbstractNestedList", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/impl.py#L508-L559
alexanderlukanin13/coolname
coolname/impl.py
RandomGenerator.generate
def generate(self, pattern=None): """ Generates and returns random name as a list of strings. """ lst = self._lists[pattern] while True: result = lst[self._randrange(lst.length)] # 1. Check that there are no duplicates # 2. Check that there are no duplicate prefixes # 3. Check max slug length n = len(result) if (self._ensure_unique and len(set(result)) != n or self._check_prefix and len(set(x[:self._check_prefix] for x in result)) != n or self._max_slug_length and sum(len(x) for x in result) + n - 1 > self._max_slug_length): continue return result
python
def generate(self, pattern=None): """ Generates and returns random name as a list of strings. """ lst = self._lists[pattern] while True: result = lst[self._randrange(lst.length)] # 1. Check that there are no duplicates # 2. Check that there are no duplicate prefixes # 3. Check max slug length n = len(result) if (self._ensure_unique and len(set(result)) != n or self._check_prefix and len(set(x[:self._check_prefix] for x in result)) != n or self._max_slug_length and sum(len(x) for x in result) + n - 1 > self._max_slug_length): continue return result
[ "def", "generate", "(", "self", ",", "pattern", "=", "None", ")", ":", "lst", "=", "self", ".", "_lists", "[", "pattern", "]", "while", "True", ":", "result", "=", "lst", "[", "self", ".", "_randrange", "(", "lst", ".", "length", ")", "]", "# 1. Check that there are no duplicates", "# 2. Check that there are no duplicate prefixes", "# 3. Check max slug length", "n", "=", "len", "(", "result", ")", "if", "(", "self", ".", "_ensure_unique", "and", "len", "(", "set", "(", "result", ")", ")", "!=", "n", "or", "self", ".", "_check_prefix", "and", "len", "(", "set", "(", "x", "[", ":", "self", ".", "_check_prefix", "]", "for", "x", "in", "result", ")", ")", "!=", "n", "or", "self", ".", "_max_slug_length", "and", "sum", "(", "len", "(", "x", ")", "for", "x", "in", "result", ")", "+", "n", "-", "1", ">", "self", ".", "_max_slug_length", ")", ":", "continue", "return", "result" ]
Generates and returns random name as a list of strings.
[ "Generates", "and", "returns", "random", "name", "as", "a", "list", "of", "strings", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/impl.py#L306-L321
alexanderlukanin13/coolname
coolname/impl.py
RandomGenerator._dump
def _dump(self, stream, pattern=None, object_ids=False): """Dumps current tree into a text stream.""" return self._lists[pattern]._dump(stream, '', object_ids=object_ids)
python
def _dump(self, stream, pattern=None, object_ids=False): """Dumps current tree into a text stream.""" return self._lists[pattern]._dump(stream, '', object_ids=object_ids)
[ "def", "_dump", "(", "self", ",", "stream", ",", "pattern", "=", "None", ",", "object_ids", "=", "False", ")", ":", "return", "self", ".", "_lists", "[", "pattern", "]", ".", "_dump", "(", "stream", ",", "''", ",", "object_ids", "=", "object_ids", ")" ]
Dumps current tree into a text stream.
[ "Dumps", "current", "tree", "into", "a", "text", "stream", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/impl.py#L337-L339
alexanderlukanin13/coolname
coolname/impl.py
RandomGenerator._check_not_hanging
def _check_not_hanging(self): """ Rough check that generate() will not hang or be very slow. Raises ConfigurationError if generate() spends too much time in retry loop. Issues a warning.warn() if there is a risk of slowdown. """ # (field_name, predicate, warning_msg, exception_msg) # predicate(g) is a function that returns True if generated combination g must be rejected, # see checks in generate() checks = [] # ensure_unique can lead to infinite loops for some tiny erroneous configs if self._ensure_unique: checks.append(( _CONF.FIELD.ENSURE_UNIQUE, self._ensure_unique, lambda g: len(set(g)) != len(g), '{generate} may be slow because a significant fraction of combinations contain repeating words and {field_name} is set', # noqa 'Impossible to generate with {field_name}' )) # # max_slug_length can easily slow down or block generation if set too small if self._max_slug_length: checks.append(( _CONF.FIELD.MAX_SLUG_LENGTH, self._max_slug_length, lambda g: sum(len(x) for x in g) + len(g) - 1 > self._max_slug_length, '{generate} may be slow because a significant fraction of combinations exceed {field_name}={field_value}', # noqa 'Impossible to generate with {field_name}={field_value}' )) # Perform the relevant checks for all generators, starting from 'all' n = 100 warning_treshold = 20 # fail probability: 0.04 for 2 attempts, 0.008 for 3 attempts, etc. for lst_id, lst in sorted(self._lists.items(), key=lambda x: '' if x is None else str(x)): context = {'generate': 'coolname.generate({})'.format('' if lst_id is None else repr(lst_id))} # For each generator, perform checks for field_name, field_value, predicate, warning_msg, exception_msg in checks: context.update({'field_name': field_name, 'field_value': field_value}) bad_count = 0 for i in range(n): g = lst[randrange(lst.length)] if predicate(g): bad_count += 1 if bad_count >= n: raise ConfigurationError(exception_msg.format(**context)) elif bad_count >= warning_treshold: import warnings warnings.warn(warning_msg.format(**context))
python
def _check_not_hanging(self): """ Rough check that generate() will not hang or be very slow. Raises ConfigurationError if generate() spends too much time in retry loop. Issues a warning.warn() if there is a risk of slowdown. """ # (field_name, predicate, warning_msg, exception_msg) # predicate(g) is a function that returns True if generated combination g must be rejected, # see checks in generate() checks = [] # ensure_unique can lead to infinite loops for some tiny erroneous configs if self._ensure_unique: checks.append(( _CONF.FIELD.ENSURE_UNIQUE, self._ensure_unique, lambda g: len(set(g)) != len(g), '{generate} may be slow because a significant fraction of combinations contain repeating words and {field_name} is set', # noqa 'Impossible to generate with {field_name}' )) # # max_slug_length can easily slow down or block generation if set too small if self._max_slug_length: checks.append(( _CONF.FIELD.MAX_SLUG_LENGTH, self._max_slug_length, lambda g: sum(len(x) for x in g) + len(g) - 1 > self._max_slug_length, '{generate} may be slow because a significant fraction of combinations exceed {field_name}={field_value}', # noqa 'Impossible to generate with {field_name}={field_value}' )) # Perform the relevant checks for all generators, starting from 'all' n = 100 warning_treshold = 20 # fail probability: 0.04 for 2 attempts, 0.008 for 3 attempts, etc. for lst_id, lst in sorted(self._lists.items(), key=lambda x: '' if x is None else str(x)): context = {'generate': 'coolname.generate({})'.format('' if lst_id is None else repr(lst_id))} # For each generator, perform checks for field_name, field_value, predicate, warning_msg, exception_msg in checks: context.update({'field_name': field_name, 'field_value': field_value}) bad_count = 0 for i in range(n): g = lst[randrange(lst.length)] if predicate(g): bad_count += 1 if bad_count >= n: raise ConfigurationError(exception_msg.format(**context)) elif bad_count >= warning_treshold: import warnings warnings.warn(warning_msg.format(**context))
[ "def", "_check_not_hanging", "(", "self", ")", ":", "# (field_name, predicate, warning_msg, exception_msg)", "# predicate(g) is a function that returns True if generated combination g must be rejected,", "# see checks in generate()", "checks", "=", "[", "]", "# ensure_unique can lead to infinite loops for some tiny erroneous configs", "if", "self", ".", "_ensure_unique", ":", "checks", ".", "append", "(", "(", "_CONF", ".", "FIELD", ".", "ENSURE_UNIQUE", ",", "self", ".", "_ensure_unique", ",", "lambda", "g", ":", "len", "(", "set", "(", "g", ")", ")", "!=", "len", "(", "g", ")", ",", "'{generate} may be slow because a significant fraction of combinations contain repeating words and {field_name} is set'", ",", "# noqa", "'Impossible to generate with {field_name}'", ")", ")", "#", "# max_slug_length can easily slow down or block generation if set too small", "if", "self", ".", "_max_slug_length", ":", "checks", ".", "append", "(", "(", "_CONF", ".", "FIELD", ".", "MAX_SLUG_LENGTH", ",", "self", ".", "_max_slug_length", ",", "lambda", "g", ":", "sum", "(", "len", "(", "x", ")", "for", "x", "in", "g", ")", "+", "len", "(", "g", ")", "-", "1", ">", "self", ".", "_max_slug_length", ",", "'{generate} may be slow because a significant fraction of combinations exceed {field_name}={field_value}'", ",", "# noqa", "'Impossible to generate with {field_name}={field_value}'", ")", ")", "# Perform the relevant checks for all generators, starting from 'all'", "n", "=", "100", "warning_treshold", "=", "20", "# fail probability: 0.04 for 2 attempts, 0.008 for 3 attempts, etc.", "for", "lst_id", ",", "lst", "in", "sorted", "(", "self", ".", "_lists", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "''", "if", "x", "is", "None", "else", "str", "(", "x", ")", ")", ":", "context", "=", "{", "'generate'", ":", "'coolname.generate({})'", ".", "format", "(", "''", "if", "lst_id", "is", "None", "else", "repr", "(", "lst_id", ")", ")", "}", "# For each generator, perform checks", "for", "field_name", ",", "field_value", ",", "predicate", ",", "warning_msg", ",", "exception_msg", "in", "checks", ":", "context", ".", "update", "(", "{", "'field_name'", ":", "field_name", ",", "'field_value'", ":", "field_value", "}", ")", "bad_count", "=", "0", "for", "i", "in", "range", "(", "n", ")", ":", "g", "=", "lst", "[", "randrange", "(", "lst", ".", "length", ")", "]", "if", "predicate", "(", "g", ")", ":", "bad_count", "+=", "1", "if", "bad_count", ">=", "n", ":", "raise", "ConfigurationError", "(", "exception_msg", ".", "format", "(", "*", "*", "context", ")", ")", "elif", "bad_count", ">=", "warning_treshold", ":", "import", "warnings", "warnings", ".", "warn", "(", "warning_msg", ".", "format", "(", "*", "*", "context", ")", ")" ]
Rough check that generate() will not hang or be very slow. Raises ConfigurationError if generate() spends too much time in retry loop. Issues a warning.warn() if there is a risk of slowdown.
[ "Rough", "check", "that", "generate", "()", "will", "not", "hang", "or", "be", "very", "slow", "." ]
train
https://github.com/alexanderlukanin13/coolname/blob/416cc39254ab9e921fd5be77dfe6cdafbad0300c/coolname/impl.py#L341-L388
kvesteri/postgresql-audit
postgresql_audit/migrations.py
alter_column
def alter_column(conn, table, column_name, func, schema=None): """ Run given callable against given table and given column in activity table jsonb data columns. This function is useful when you want to reflect type changes in your schema to activity table. In the following example we change the data type of User's age column from string to integer. :: from alembic import op from postgresql_audit import alter_column def upgrade(): op.alter_column( 'user', 'age', type_=sa.Integer ) alter_column( op, 'user', 'age', lambda value, activity_table: sa.cast(value, sa.Integer) ) :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param column_name: Name of the column to run callable against :param func: A callable to run against specific column in activity table jsonb data columns. The callable should take two parameters the jsonb value corresponding to given column_name and activity table object. :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=( activity_table.c.old_data + sa.cast(sa.func.json_build_object( column_name, func( activity_table.c.old_data[column_name], activity_table ) ), JSONB) ), changed_data=( activity_table.c.changed_data + sa.cast(sa.func.json_build_object( column_name, func( activity_table.c.changed_data[column_name], activity_table ) ), JSONB) ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
python
def alter_column(conn, table, column_name, func, schema=None): """ Run given callable against given table and given column in activity table jsonb data columns. This function is useful when you want to reflect type changes in your schema to activity table. In the following example we change the data type of User's age column from string to integer. :: from alembic import op from postgresql_audit import alter_column def upgrade(): op.alter_column( 'user', 'age', type_=sa.Integer ) alter_column( op, 'user', 'age', lambda value, activity_table: sa.cast(value, sa.Integer) ) :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param column_name: Name of the column to run callable against :param func: A callable to run against specific column in activity table jsonb data columns. The callable should take two parameters the jsonb value corresponding to given column_name and activity table object. :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=( activity_table.c.old_data + sa.cast(sa.func.json_build_object( column_name, func( activity_table.c.old_data[column_name], activity_table ) ), JSONB) ), changed_data=( activity_table.c.changed_data + sa.cast(sa.func.json_build_object( column_name, func( activity_table.c.changed_data[column_name], activity_table ) ), JSONB) ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
[ "def", "alter_column", "(", "conn", ",", "table", ",", "column_name", ",", "func", ",", "schema", "=", "None", ")", ":", "activity_table", "=", "get_activity_table", "(", "schema", "=", "schema", ")", "query", "=", "(", "activity_table", ".", "update", "(", ")", ".", "values", "(", "old_data", "=", "(", "activity_table", ".", "c", ".", "old_data", "+", "sa", ".", "cast", "(", "sa", ".", "func", ".", "json_build_object", "(", "column_name", ",", "func", "(", "activity_table", ".", "c", ".", "old_data", "[", "column_name", "]", ",", "activity_table", ")", ")", ",", "JSONB", ")", ")", ",", "changed_data", "=", "(", "activity_table", ".", "c", ".", "changed_data", "+", "sa", ".", "cast", "(", "sa", ".", "func", ".", "json_build_object", "(", "column_name", ",", "func", "(", "activity_table", ".", "c", ".", "changed_data", "[", "column_name", "]", ",", "activity_table", ")", ")", ",", "JSONB", ")", ")", ")", ".", "where", "(", "activity_table", ".", "c", ".", "table_name", "==", "table", ")", ")", "return", "conn", ".", "execute", "(", "query", ")" ]
Run given callable against given table and given column in activity table jsonb data columns. This function is useful when you want to reflect type changes in your schema to activity table. In the following example we change the data type of User's age column from string to integer. :: from alembic import op from postgresql_audit import alter_column def upgrade(): op.alter_column( 'user', 'age', type_=sa.Integer ) alter_column( op, 'user', 'age', lambda value, activity_table: sa.cast(value, sa.Integer) ) :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param column_name: Name of the column to run callable against :param func: A callable to run against specific column in activity table jsonb data columns. The callable should take two parameters the jsonb value corresponding to given column_name and activity table object. :param schema: Optional name of schema to use.
[ "Run", "given", "callable", "against", "given", "table", "and", "given", "column", "in", "activity", "table", "jsonb", "data", "columns", ".", "This", "function", "is", "useful", "when", "you", "want", "to", "reflect", "type", "changes", "in", "your", "schema", "to", "activity", "table", "." ]
train
https://github.com/kvesteri/postgresql-audit/blob/91b497ced2e04dd44bb757b02983d2a64a2b1514/postgresql_audit/migrations.py#L20-L93
kvesteri/postgresql-audit
postgresql_audit/migrations.py
change_column_name
def change_column_name( conn, table, old_column_name, new_column_name, schema=None ): """ Changes given `activity` jsonb data column key. This function is useful when you want to reflect column name changes to activity table. :: from alembic import op from postgresql_audit import change_column_name def upgrade(): op.alter_column( 'my_table', 'my_column', new_column_name='some_column' ) change_column_name(op, 'my_table', 'my_column', 'some_column') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param old_column_name: Name of the column to change :param new_column_name: New colum name :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=jsonb_change_key_name( activity_table.c.old_data, old_column_name, new_column_name ), changed_data=jsonb_change_key_name( activity_table.c.changed_data, old_column_name, new_column_name ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
python
def change_column_name( conn, table, old_column_name, new_column_name, schema=None ): """ Changes given `activity` jsonb data column key. This function is useful when you want to reflect column name changes to activity table. :: from alembic import op from postgresql_audit import change_column_name def upgrade(): op.alter_column( 'my_table', 'my_column', new_column_name='some_column' ) change_column_name(op, 'my_table', 'my_column', 'some_column') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param old_column_name: Name of the column to change :param new_column_name: New colum name :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) query = ( activity_table .update() .values( old_data=jsonb_change_key_name( activity_table.c.old_data, old_column_name, new_column_name ), changed_data=jsonb_change_key_name( activity_table.c.changed_data, old_column_name, new_column_name ) ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
[ "def", "change_column_name", "(", "conn", ",", "table", ",", "old_column_name", ",", "new_column_name", ",", "schema", "=", "None", ")", ":", "activity_table", "=", "get_activity_table", "(", "schema", "=", "schema", ")", "query", "=", "(", "activity_table", ".", "update", "(", ")", ".", "values", "(", "old_data", "=", "jsonb_change_key_name", "(", "activity_table", ".", "c", ".", "old_data", ",", "old_column_name", ",", "new_column_name", ")", ",", "changed_data", "=", "jsonb_change_key_name", "(", "activity_table", ".", "c", ".", "changed_data", ",", "old_column_name", ",", "new_column_name", ")", ")", ".", "where", "(", "activity_table", ".", "c", ".", "table_name", "==", "table", ")", ")", "return", "conn", ".", "execute", "(", "query", ")" ]
Changes given `activity` jsonb data column key. This function is useful when you want to reflect column name changes to activity table. :: from alembic import op from postgresql_audit import change_column_name def upgrade(): op.alter_column( 'my_table', 'my_column', new_column_name='some_column' ) change_column_name(op, 'my_table', 'my_column', 'some_column') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to run the column name changes against :param old_column_name: Name of the column to change :param new_column_name: New colum name :param schema: Optional name of schema to use.
[ "Changes", "given", "activity", "jsonb", "data", "column", "key", ".", "This", "function", "is", "useful", "when", "you", "want", "to", "reflect", "column", "name", "changes", "to", "activity", "table", "." ]
train
https://github.com/kvesteri/postgresql-audit/blob/91b497ced2e04dd44bb757b02983d2a64a2b1514/postgresql_audit/migrations.py#L96-L153
kvesteri/postgresql-audit
postgresql_audit/migrations.py
add_column
def add_column(conn, table, column_name, default_value=None, schema=None): """ Adds given column to `activity` table jsonb data columns. In the following example we reflect the changes made to our schema to activity table. :: import sqlalchemy as sa from alembic import op from postgresql_audit import add_column def upgrade(): op.add_column('article', sa.Column('created_at', sa.DateTime())) add_column(op, 'article', 'created_at') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to remove the column from :param column_name: Name of the column to add :param default_value: The default value of the column :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) data = {column_name: default_value} query = ( activity_table .update() .values( old_data=sa.case( [ ( sa.cast(activity_table.c.old_data, sa.Text) != '{}', activity_table.c.old_data + data ), ], else_=sa.cast({}, JSONB) ), changed_data=sa.case( [ ( sa.and_( sa.cast( activity_table.c.changed_data, sa.Text ) != '{}', activity_table.c.verb != 'update' ), activity_table.c.changed_data + data ) ], else_=activity_table.c.changed_data ), ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
python
def add_column(conn, table, column_name, default_value=None, schema=None): """ Adds given column to `activity` table jsonb data columns. In the following example we reflect the changes made to our schema to activity table. :: import sqlalchemy as sa from alembic import op from postgresql_audit import add_column def upgrade(): op.add_column('article', sa.Column('created_at', sa.DateTime())) add_column(op, 'article', 'created_at') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to remove the column from :param column_name: Name of the column to add :param default_value: The default value of the column :param schema: Optional name of schema to use. """ activity_table = get_activity_table(schema=schema) data = {column_name: default_value} query = ( activity_table .update() .values( old_data=sa.case( [ ( sa.cast(activity_table.c.old_data, sa.Text) != '{}', activity_table.c.old_data + data ), ], else_=sa.cast({}, JSONB) ), changed_data=sa.case( [ ( sa.and_( sa.cast( activity_table.c.changed_data, sa.Text ) != '{}', activity_table.c.verb != 'update' ), activity_table.c.changed_data + data ) ], else_=activity_table.c.changed_data ), ) .where(activity_table.c.table_name == table) ) return conn.execute(query)
[ "def", "add_column", "(", "conn", ",", "table", ",", "column_name", ",", "default_value", "=", "None", ",", "schema", "=", "None", ")", ":", "activity_table", "=", "get_activity_table", "(", "schema", "=", "schema", ")", "data", "=", "{", "column_name", ":", "default_value", "}", "query", "=", "(", "activity_table", ".", "update", "(", ")", ".", "values", "(", "old_data", "=", "sa", ".", "case", "(", "[", "(", "sa", ".", "cast", "(", "activity_table", ".", "c", ".", "old_data", ",", "sa", ".", "Text", ")", "!=", "'{}'", ",", "activity_table", ".", "c", ".", "old_data", "+", "data", ")", ",", "]", ",", "else_", "=", "sa", ".", "cast", "(", "{", "}", ",", "JSONB", ")", ")", ",", "changed_data", "=", "sa", ".", "case", "(", "[", "(", "sa", ".", "and_", "(", "sa", ".", "cast", "(", "activity_table", ".", "c", ".", "changed_data", ",", "sa", ".", "Text", ")", "!=", "'{}'", ",", "activity_table", ".", "c", ".", "verb", "!=", "'update'", ")", ",", "activity_table", ".", "c", ".", "changed_data", "+", "data", ")", "]", ",", "else_", "=", "activity_table", ".", "c", ".", "changed_data", ")", ",", ")", ".", "where", "(", "activity_table", ".", "c", ".", "table_name", "==", "table", ")", ")", "return", "conn", ".", "execute", "(", "query", ")" ]
Adds given column to `activity` table jsonb data columns. In the following example we reflect the changes made to our schema to activity table. :: import sqlalchemy as sa from alembic import op from postgresql_audit import add_column def upgrade(): op.add_column('article', sa.Column('created_at', sa.DateTime())) add_column(op, 'article', 'created_at') :param conn: An object that is able to execute SQL (either SQLAlchemy Connection, Engine or Alembic Operations object) :param table: The table to remove the column from :param column_name: Name of the column to add :param default_value: The default value of the column :param schema: Optional name of schema to use.
[ "Adds", "given", "column", "to", "activity", "table", "jsonb", "data", "columns", "." ]
train
https://github.com/kvesteri/postgresql-audit/blob/91b497ced2e04dd44bb757b02983d2a64a2b1514/postgresql_audit/migrations.py#L156-L220