code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
table = self._table lpw, rpw = table.left_padding_widths, table.right_padding_widths wep = table.width_exceed_policy list_of_rows = [] if (wep is WidthExceedPolicy.WEP_STRIP or wep is WidthExceedPolicy.WEP_ELLIPSIS): # Let's strip the row delimiter = '' if wep is WidthExceedPolicy.WEP_STRIP else '...' row_item_list = [] for index, row_item in enumerate(row): left_pad = table._column_pad * lpw[index] right_pad = table._column_pad * rpw[index] clmp_str = (left_pad + self._clamp_string(row_item, index, delimiter) + right_pad) row_item_list.append(clmp_str) list_of_rows.append(row_item_list) elif wep is WidthExceedPolicy.WEP_WRAP: # Let's wrap the row string_partition = [] for index, row_item in enumerate(row): width = table.column_widths[index] - lpw[index] - rpw[index] string_partition.append(textwrap(row_item, width)) for row_items in zip_longest(*string_partition, fillvalue=''): row_item_list = [] for index, row_item in enumerate(row_items): left_pad = table._column_pad * lpw[index] right_pad = table._column_pad * rpw[index] row_item_list.append(left_pad + row_item + right_pad) list_of_rows.append(row_item_list) if len(list_of_rows) == 0: return [[''] * table.column_count] else: return list_of_rows
def _get_row_within_width(self, row)
Process a row so that it is clamped by column_width. Parameters ---------- row : array_like A single row. Returns ------- list of list: List representation of the `row` after it has been processed according to width exceed policy.
2.512804
2.47726
1.014348
width = (self._table.column_widths[column_index] - self._table.left_padding_widths[column_index] - self._table.right_padding_widths[column_index]) if termwidth(row_item) <= width: return row_item else: if width - len(delimiter) >= 0: clamped_string = (textwrap(row_item, width-len(delimiter))[0] + delimiter) else: clamped_string = delimiter[:width] return clamped_string
def _clamp_string(self, row_item, column_index, delimiter='')
Clamp `row_item` to fit in column referred by column_index. This method considers padding and appends the delimiter if `row_item` needs to be truncated. Parameters ---------- row_item: str String which should be clamped. column_index: int Index of the column `row_item` belongs to. delimiter: str String which is to be appended to the clamped string. Returns ------- str The modified string which fits in it's column.
3.273836
3.531307
0.927089
try: responses = set() for text in wrap(string=str(message), length=wrap_length): response = self.http_session.post( url="{}/im/sendIM".format(self.api_base_url), data={ "r": uuid.uuid4(), "aimsid": self.token, "t": target, "message": text, "mentions": ( mentions if isinstance(mentions, six.string_types) or not hasattr(mentions, "__iter__") else ",".join(mentions) ), "parse": json.dumps([p.value for p in parse]) if parse is not None else None, "updateMsgId": update_msg_id }, timeout=self.timeout_s ) try: self.__sent_im_cache[response.json()["response"]["data"]["msgId"]] = text except (LookupError, TypeError): self.log.exception("Error while getting 'msgId'!") responses.add(response) return tuple(responses) except ReadTimeout: self.log.exception("Timeout while sending request!")
def send_im(self, target, message, mentions=None, parse=None, update_msg_id=None, wrap_length=5000)
Send text message. :param target: Target user UIN or chat ID. :param message: Message text. :param mentions: Iterable with UINs to mention in message. :param parse: Iterable with several values from :class:`icq.constant.MessageParseType` specifying which message items should be parsed by target client (making preview, snippets, etc.). Specify empty iterable to avoid parsing message at target client. By default all types are included. :param update_msg_id: Message ID to update. :param wrap_length: Maximum length of symbols in one message. Text exceeding this length will be sent in several messages. :return: Tuple of HTTP responses.
4.001891
3.659959
1.093425
return random.choice(tuple(sequence) if isinstance(sequence, set) else sequence)
def random_choice(sequence)
Same as :meth:`random.choice`, but also supports :class:`set` type to be passed as sequence.
5.383836
3.775205
1.426104
if context is None or not isinstance(context, dict): context = {} markdown_html = _transform_markdown_into_html(text) sanitised_markdown_html = _sanitise_markdown_html(markdown_html) return mark_safe(sanitised_markdown_html)
def render_markdown(text, context=None)
Turn markdown into HTML.
3.528884
3.383115
1.043087
warning = ( "wagtailmarkdown.utils.render() is deprecated. Use " "wagtailmarkdown.utils.render_markdown() instead." ) warnings.warn(warning, WagtailMarkdownDeprecationWarning, stacklevel=2) return render_markdown(text, context)
def render(text, context=None)
Depreceated call to render_markdown().
3.139538
2.629669
1.193891
def run(self, parent, blocks): block = blocks.pop(0).split('\n') header = block[0].strip() seperator = block[1].strip() rows = block[2:] # Get format type (bordered by pipes or not) border = False if header.startswith('|'): border = True # Get alignment of columns align = [] for c in self._split_row(seperator, border): if c.startswith(':') and c.endswith(':'): align.append('center') elif c.startswith(':'): align.append('left') elif c.endswith(':'): align.append('right') else: align.append(None) # Build table table = etree.SubElement(parent, 'table') table.set('class', 'wftable') thead = etree.SubElement(table, 'thead') self._build_row(header, thead, align, border) tbody = etree.SubElement(table, 'tbody') for row in rows: self._build_row(row.strip(), tbody, align, border)
Parse a table block and build table.
null
null
null
def _build_row(self, row, parent, align, border): tr = etree.SubElement(parent, 'tr') tag = 'td' if parent.tag == 'thead': tag = 'th' cells = self._split_row(row, border) # We use align here rather than cells to ensure every row # contains the same number of columns. for i, a in enumerate(align): c = etree.SubElement(tr, tag) try: c.text = cells[i].strip() except IndexError: c.text = "" if a: c.set('align', a)
Given a row of text, build table cells.
null
null
null
def _split_row(self, row, border): if border: if row.startswith('|'): row = row[1:] if row.endswith('|'): row = row[:-1] return row.split('|')
split a row of text into list of cells.
null
null
null
def extendMarkdown(self, md, md_globals): md.parser.blockprocessors.add('table', TableProcessor(md.parser), '<hashheader')
Add an instance of TableProcessor to BlockParser.
null
null
null
static_dirs = set() for finder in settings.STATICFILES_FINDERS: finder = finders.get_finder(finder) if hasattr(finder, 'storages'): for storage in finder.storages.values(): static_dirs.add(storage.location) if hasattr(finder, 'storage'): static_dirs.add(finder.storage.location) return static_dirs
def get_all_static()
Get all the static files directories found by ``STATICFILES_FINDERS`` :return: set of paths (top-level folders only)
2.568366
2.213956
1.16008
if self.infile is None and "{infile}" in self.command: if self.filename is None: self.infile = NamedTemporaryFile(mode='wb', suffix=self.infile_ext) self.infile.write(self.content.encode(self.default_encoding)) self.infile.flush() self.options += ( ('infile', self.infile.name), ) return super(BaseCompiler, self).input(**kwargs)
def input(self, **kwargs)
Specify temporary input file extension. Browserify requires explicit file extension (".js" or ".json" by default). https://github.com/substack/node-browserify/issues/1469
4.476688
4.071254
1.099585
'''this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick''' obj_type = type(obj) try: # this works for hashables return hash((obj_type, obj)) except: # this works for object containers since graphdb # wants to identify different containers # instead of the sum of their current internals return hash((obj_type, id(obj)))
def graph_hash(obj)
this hashes all types to a hash without colissions. python's hashing algorithms are not cross type compatable but hashing tuples with the type as the first element seems to do the trick
18.128866
5.621908
3.224682
''' use this function to store a python object in the database ''' assert not isinstance(item, RamGraphDBNode) item_hash = graph_hash(item) if item_hash not in self.nodes: self.nodes[item_hash] = RamGraphDBNode(item) return self.nodes[item_hash]
def store_item(self, item)
use this function to store a python object in the database
4.933382
4.234187
1.165131
''' use this to store a relation between two objects ''' self.__require_string__(name) #print('storing relation', src, name, dst) # make sure both items are stored self.store_item(src).link(name, self.store_item(dst))
def store_relation(self, src, name, dst)
use this to store a relation between two objects
7.117507
6.282228
1.132959
''' can be both used as (src, relation, dest) for a single relation or (src, relation) to delete all relations of that type from the src ''' self.__require_string__(relation) if src in self and target in self: self._get_item_node(src).unlink(relation, self._get_item_node(target))
def delete_relation(self, src, relation, target)
can be both used as (src, relation, dest) for a single relation or (src, relation) to delete all relations of that type from the src
8.572301
4.235788
2.02378
''' removes an item from the db ''' for relation, dst in self.relations_of(item, True): self.delete_relation(item, relation, dst) #print(item, relation, dst) for src, relation in self.relations_to(item, True): self.delete_relation(src, relation, item) #print(src, relation, item) h = self._item_hash(item) if item in self: #print('deleting item:', item) self.nodes[h].clear() del self.nodes[h]
def delete_item(self, item)
removes an item from the db
3.647026
3.704955
0.984364
''' list all relations the originate from target ''' relations = (target if isinstance(target, RamGraphDBNode) else self._get_item_node(target)).outgoing if include_object: for k in relations: for v in relations[k]: if hasattr(v, 'obj'): # filter dead links yield k, v.obj else: yield from relations
def relations_of(self, target, include_object=False)
list all relations the originate from target
8.474442
6.957356
1.218055
''' list all relations pointing at an object ''' relations = self._get_item_node(target).incoming if include_object: for k in relations: for v in relations[k]: if hasattr(v, 'obj'): # filter dead links yield v.obj, k else: yield from relations
def relations_to(self, target, include_object=False)
list all relations pointing at an object
7.014351
6.030647
1.163118
''' display the entire of objects with their (id, value, node) ''' for key in self.nodes: node = self.nodes[key] value = node.obj print(key, '-', repr(value), '-', node)
def show_objects(self)
display the entire of objects with their (id, value, node)
10.201654
4.436408
2.29953
''' list every relation in the database as (src, relation, dst) ''' for node in self.iter_nodes(): for relation, target in self.relations_of(node.obj, True): yield node.obj, relation, target
def list_relations(self)
list every relation in the database as (src, relation, dst)
7.305188
5.109131
1.42983
''' display every relation in the database as (src, relation, dst) ''' for src_node in self.iter_nodes(): for relation in src_node.outgoing: for dst_node in src_node.outgoing[relation]: print(repr(src_node.obj), '-', relation, '-', repr(dst_node.obj))
def show_relations(self)
display every relation in the database as (src, relation, dst)
4.556345
3.246459
1.403481
''' use this to filter VLists, simply provide a filter function and what relation to apply it to ''' assert type(relation).__name__ in {'str','unicode'}, 'where needs the first arg to be a string' assert callable(filter_fn), 'filter_fn needs to be callable' return VList(i for i in self if relation in i._relations() and any(filter_fn(_()) for _ in i[relation]))
def where(self, relation, filter_fn)
use this to filter VLists, simply provide a filter function and what relation to apply it to
8.659863
4.820359
1.796518
''' use this to filter VLists, simply provide a filter function to filter the current found objects ''' assert callable(filter_fn), 'filter_fn needs to be callable' return VList(i for i in self if filter_fn(i()))
def _where(self, filter_fn)
use this to filter VLists, simply provide a filter function to filter the current found objects
11.839749
3.595491
3.292944
'''use this to filter VLists with kv pairs''' out = self for k,v in kwargs.items(): out = out.where(k, lambda i:i==v) return out
def _where(self, **kwargs)
use this to filter VLists with kv pairs
9.212753
3.674157
2.507447
''' creates a file at the given path and sets the permissions to user only read/write ''' from os.path import isfile if not isfile(path): # only do the following if the file doesn't exist yet from os import chmod from stat import S_IRUSR, S_IWUSR open(path, "a").close() # create the file attempt(lambda: chmod(path, (S_IRUSR | S_IWUSR)))
def _create_file(path='')
creates a file at the given path and sets the permissions to user only read/write
4.714748
3.626764
1.299988
''' use this function to store a python object in the database ''' #print('storing item', item) item_id = self._id_of(item) #print('item_id', item_id) if item_id is None: #print('storing item', item) blob = self.serialize(item) with self._write_lock: self._execute( 'INSERT into objects (code) values (?);', (blob,) ) self.autocommit()
def store_item(self, item)
use this function to store a python object in the database
4.532237
4.094862
1.106811
''' removes an item from the db ''' for relation in self.relations_of(item): self.delete_relation(item, relation) for origin, relation in self.relations_to(item, True): self.delete_relation(origin, relation, item) with self._write_lock: self._execute(''' DELETE from objects where code=? ''', (self.serialize(item),)) self.autocommit()
def delete_item(self, item)
removes an item from the db
5.861659
5.974267
0.981151
''' use this to store a relation between two objects ''' self.__require_string__(name) #print('storing relation', src, name, dst) # make sure both items are stored self.store_item(src) self.store_item(dst) with self._write_lock: #print(locals()) # run the insertion self._execute( 'insert into relations select ob1.id, ?, ob2.id from objects as ob1, objects as ob2 where ob1.code=? and ob2.code=?;', (name, self.serialize(src), self.serialize(dst)) ) self.autocommit()
def store_relation(self, src, name, dst)
use this to store a relation between two objects
5.132977
4.88001
1.051837
''' deletes a single relation between objects ''' self.__require_string__(relation) src_id = self._id_of(src) dst_id = self._id_of(dst) with self._write_lock: self._execute(''' DELETE from relations where src=? and name=? and dst=? ''', (src_id, relation, dst_id)) self.autocommit()
def _delete_single_relation(self, src, relation, dst)
deletes a single relation between objects
4.3648
4.399338
0.992149
''' can be both used as (src, relation, dest) for a single relation or (src, relation) to delete all relations of that type from the src ''' self.__require_string__(relation) if len(targets): for i in targets: self._delete_single_relation(src, relation, i) else: # delete all connections of that relation from src for i in list(self.find(src, relation)): self._delete_single_relation(src, relation, i)
def delete_relation(self, src, relation, *targets)
can be both used as (src, relation, dest) for a single relation or (src, relation) to delete all relations of that type from the src
4.864101
3.040769
1.599628
''' returns back all elements the target has a relation to ''' query = 'select ob1.code from objects as ob1, objects as ob2, relations where relations.dst=ob1.id and relations.name=? and relations.src=ob2.id and ob2.code=?' # src is id not source :/ for i in self._execute(query, (relation, self.serialize(target))): yield self.deserialize(i[0])
def find(self, target, relation)
returns back all elements the target has a relation to
7.11492
5.922777
1.201281
''' list all relations the originate from target ''' if include_object: _ = self._execute(''' select relations.name, ob2.code from relations, objects as ob1, objects as ob2 where relations.src=ob1.id and ob2.id=relations.dst and ob1.code=? ''', (self.serialize(target),)) for i in _: yield i[0], self.deserialize(i[1]) else: _ = self._execute(''' select distinct relations.name from relations, objects where relations.src=objects.id and objects.code=? ''', (self.serialize(target),)) for i in _: yield i[0]
def relations_of(self, target, include_object=False)
list all relations the originate from target
3.420922
3.084463
1.109082
''' list all relations pointing at an object ''' if include_object: _ = self._execute(''' select name, (select code from objects where id=src) from relations where dst=? ''', (self._id_of(target),)) for i in _: yield self.deserialize(i[1]), i[0] else: _ = self._execute(''' select distinct name from relations where dst=? ''', (self._id_of(target),)) for i in _: yield i[0]
def relations_to(self, target, include_object=False)
list all relations pointing at an object
4.218765
3.876534
1.088283
''' generate tuples containing (relation, object_that_applies) ''' return gen.chain( ((r,i) for i in self.find(target,r)) for r in self.relations_of(target) )
def connections_of(self, target)
generate tuples containing (relation, object_that_applies)
12.569838
5.298519
2.37233
''' list the entire of objects with their (id, serialized_form, actual_value) ''' for i in self._execute('select * from objects'): _id, code = i yield _id, code, self.deserialize(code)
def list_objects(self)
list the entire of objects with their (id, serialized_form, actual_value)
15.150829
5.078071
2.98358
''' list every relation in the database as (src, relation, dst) ''' _ = self._execute('select * from relations').fetchall() for i in _: #print(i) src, name, dst = i src = self.deserialize( next(self._execute('select code from objects where id=?',(src,)))[0] ) dst = self.deserialize( next(self._execute('select code from objects where id=?',(dst,)))[0] ) yield src, name, dst
def list_relations(self)
list every relation in the database as (src, relation, dst)
3.856265
3.279962
1.175704
# OSX if platform.system() == "Darwin": # scraped from /usr/include, not exported by python's socket module TCP_KEEPALIVE = 0x10 sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec) if platform.system() == "Windows": sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000)) if platform.system() == "Linux": sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
def set_keep_alive(self, sock, after_idle_sec=5, interval_sec=60, max_fails=5)
This function instructs the TCP socket to send a heart beat every n seconds to detect dead connections. It's the TCP equivalent of the IRC ping-pong protocol and allows for better cleanup / detection of dead TCP connections. It activates after 1 second (after_idle_sec) of idleness, then sends a keepalive ping once every 3 seconds(interval_sec), and closes the connection after 5 failed ping (max_fails), or 15 seconds
1.888818
1.934698
0.976286
buf_len = len(self.buf) replies = [] reply = b"" chop = 0 skip = 0 i = 0 buf_len = len(self.buf) for i in range(0, buf_len): ch = self.buf[i:i + 1] if skip: skip -= 1 i += 1 continue nxt = i + 1 if nxt < buf_len: if ch == b"\r" and self.buf[nxt:nxt + 1] == b"\n": # Append new reply. if reply != b"": if encoding == "unicode": replies.append(encode_str(reply, encoding)) else: replies.append(reply) reply = b"" # Truncate the whole buf if chop is out of bounds. chop = nxt + 1 skip = 1 i += 1 continue reply += ch i += 1 # Truncate buf. if chop: self.buf = self.buf[chop:] return replies
def parse_buf(self, encoding="unicode")
Since TCP is a stream-orientated protocol, responses aren't guaranteed to be complete when they arrive. The buffer stores all the data and this function splits the data into replies based on the new line delimiter.
2.895297
2.743449
1.055349
# Socket is disconnected. if not self.connected: return # Recv chunks until network buffer is empty. repeat = 1 wait = 0.2 chunk_no = 0 max_buf = self.max_buf max_chunks = self.max_chunks if fixed_limit is not None: max_buf = fixed_limit max_chunks = fixed_limit while repeat: chunk_size = self.chunk_size while True: # Don't exceed buffer size. buf_len = len(self.buf) if buf_len >= max_buf: break remaining = max_buf - buf_len if remaining < chunk_size: chunk_size = remaining # Don't allow non-blocking sockets to be # DoSed by multiple small replies. if chunk_no >= max_chunks and not self.blocking: break try: chunk = self.s.recv(chunk_size) except socket.timeout as e: self.debug_print("Get chunks timed out.") self.debug_print(e) # Timeout on blocking sockets. err = e.args[0] self.debug_print(err) if err == "timed out": repeat = 0 break except ssl.SSLError as e: # Will block on non-blocking SSL sockets. if e.errno == ssl.SSL_ERROR_WANT_READ: self.debug_print("SSL_ERROR_WANT_READ") break else: self.debug_print("Get chunks ssl error") self.close() return except socket.error as e: # Will block on nonblocking non-SSL sockets. err = e.args[0] if err == errno.EAGAIN or err == errno.EWOULDBLOCK: break else: # Connection closed or other problem. self.debug_print("get chunks other closing") self.close() return else: if chunk == b"": self.close() return # Avoid decoding errors. self.buf += chunk # Otherwise the loop will be endless. if self.blocking: break # Used to avoid DoS of small packets. chunk_no += 1 # Repeat is already set -- manual skip. if not repeat: break else: repeat = 0 # Block until there's a full reply or there's a timeout. if self.blocking: if fixed_limit is None: # Partial response. if self.delimiter not in self.buf: repeat = 1 time.sleep(wait)
def get_chunks(self, fixed_limit=None, encoding="unicode")
This is the function which handles retrieving new data chunks. It's main logic is avoiding a recv call blocking forever and halting the program flow. To do this, it manages errors and keeps an eye on the buffer to avoid overflows and DoS attacks. http://stackoverflow.com/questions/16745409/what-does-pythons-socket-recv-return-for-non-blocking-sockets-if-no-data-is-r http://stackoverflow.com/questions/3187565/select-and-ssl-in-python
3.730265
3.624535
1.029171
def validate_node(self, node_ip, node_port=None, same_nodes=1): self.debug_print("Validating: " + node_ip) # Is this a valid IP? if not is_ip_valid(node_ip) or node_ip == "0.0.0.0": self.debug_print("Invalid node ip in validate node") return 0 # Is this a valid port? if node_port != 0 and node_port is not None: if not is_valid_port(node_port): self.debug_print("Invalid node port in validate port") return 0 if not self.enable_duplicate_ip_cons: # Don't connect to ourself. if (node_ip == "127.0.0.1" or node_ip == get_lan_ip(self.interface) or node_ip == self.wan_ip): self.debug_print("Cannot connect to ourself.") return 0 # No, really: don't connect to ourself. if node_ip == self.passive_bind and node_port == self.passive_port: self.debug_print("Error connecting to same listen server.") return 0 # Don't connect to same nodes. if same_nodes: for node in self.outbound + self.inbound: try: addr, port = node["con"].s.getpeername() if node_ip == addr: self.debug_print("Already connected to this node.") return 0 except Exception as e: print(e) return 0 return 1
Don't accept connections from self to passive server or connections to already connected nodes.
null
null
null
def bootstrap(self): # Disable bootstrap. if not self.enable_bootstrap: return None # Avoid raping the rendezvous server. t = time.time() if self.last_bootstrap is not None: if t - self.last_bootstrap <= rendezvous_interval: self.debug_print("Bootstrapped recently") return None self.last_bootstrap = t self.debug_print("Searching for nodes to connect to.") try: connection_slots = self.max_outbound - (len(self.outbound)) if connection_slots > 0: # Connect to rendezvous server. rendezvous_con = self.rendezvous.server_connect() # Retrieve random nodes to bootstrap with. rendezvous_con.send_line("BOOTSTRAP " + str(self.max_outbound * 2)) choices = rendezvous_con.recv_line(timeout=2) if choices == "NODES EMPTY": rendezvous_con.close() self.debug_print("Node list is empty.") return self else: self.debug_print("Found node list.") # Parse node list. choices = re.findall("(?:(p|s)[:]([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+)[:]([0-9]+))+\s?", choices) rendezvous_con.s.close() # Attempt to make active simultaneous connections. passive_nodes = [] for node in choices: # Out of connection slots. if not connection_slots: break # Add to list of passive nodes. node_type, node_ip, node_port = node self.debug_print(str(node)) if node_type == "p": passive_nodes.append(node) # Use passive to make up the remaining cons. i = 0 while i < len(passive_nodes) and connection_slots > 0: node_type, node_ip, node_port = passive_nodes[i] con = self.add_node(node_ip, node_port, "passive") if con is not None: connection_slots -= 1 self.debug_print("Con successful.") else: self.debug_print("Con failed.") i += 1 except Exception as e: self.debug_print("Unknown error in bootstrap()") error = parse_exception(e) log_exception(self.error_log_path, error) return self
When the software is first started, it needs to retrieve a list of nodes to connect to the network to. This function asks the server for N nodes which consists of at least N passive nodes and N simultaneous nodes. The simultaneous nodes are prioritized if the node_type for the machine running this software is simultaneous, with passive nodes being used as a fallback. Otherwise, the node exclusively uses passive nodes to bootstrap. This algorithm is designed to preserve passive node's inbound connection slots.
null
null
null
def advertise(self): # Advertise is disabled. if not self.enable_advertise: self.debug_print("Advertise is disbled!") return None # Direct net server is reserved for direct connections only. if self.net_type == "direct" and self.node_type == "passive": return None # Net isn't started!. if not self.is_net_started: raise Exception("Please call start() before you call advertise()") # Avoid raping the rendezvous server with excessive requests. t = time.time() if self.last_advertise is not None: if t - self.last_advertise <= advertise_interval: return None if len(self.inbound) >= self.min_connected: return None self.last_advertise = t # Tell rendezvous server to list us. try: # We're a passive node. if self.node_type == "passive" and\ self.passive_port is not None and\ self.enable_advertise: self.rendezvous.passive_listen(self.passive_port, self.max_inbound) if self.node_type == "simultaneous": self.rendezvous.simultaneous_listen() except Exception as e: error = parse_exception(e) log_exception(self.error_log_path, error) return self
This function tells the rendezvous server that our node is ready to accept connections from other nodes on the P2P network that run the bootstrap function. It's only used when net_type == p2p
null
null
null
def determine_node(self): # Manually set node_type as simultaneous. if self.node_type == "simultaneous": if self.nat_type != "unknown": return "simultaneous" # Get IP of binding interface. unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"] if self.passive_bind in unspecific_bind: lan_ip = get_lan_ip(self.interface) else: lan_ip = self.passive_bind # Passive node checks. if lan_ip is not None \ and self.passive_port is not None and self.enable_forwarding: self.debug_print("Checking if port is forwarded.") # Check port isn't already forwarded. if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): msg = "Port already forwarded. Skipping NAT traversal." self.debug_print(msg) self.forwarding_type = "forwarded" return "passive" else: self.debug_print("Port is not already forwarded.") # Most routers. try: self.debug_print("Trying UPnP") UPnP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "UPnP" self.debug_print("Forwarded port with UPnP.") else: self.debug_print("UPnP failed to forward port.") except Exception as e: # Log exception. error = parse_exception(e) log_exception(self.error_log_path, error) self.debug_print("UPnP failed to forward port.") # Apple devices. try: self.debug_print("Trying NATPMP.") NatPMP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "NATPMP" self.debug_print("Port forwarded with NATPMP.") else: self.debug_print("Failed to forward port with NATPMP.") self.debug_print("Falling back on TCP hole punching or" " proxying.") except Exception as e: # Log exception error = parse_exception(e) log_exception(self.error_log_path, error) self.debug_print("Failed to forward port with NATPMP.") # Check it worked. if self.forwarding_type != "manual": return "passive" # Fail-safe node types. if self.nat_type != "unknown": return "simultaneous" else: return "active"
Determines the type of node based on a combination of forwarding reachability and NAT type.
null
null
null
def start(self): self.debug_print("Starting networking.") self.debug_print("Make sure to iterate over replies if you need" " connection alive management!") # Register a cnt + c handler signal.signal(signal.SIGINT, self.stop) # Save WAN IP. self.debug_print("WAN IP = " + str(self.wan_ip)) # Check rendezvous server is up. try: rendezvous_con = self.rendezvous.server_connect() rendezvous_con.close() except: raise Exception("Unable to connect to rendezvous server.") # Started no matter what # since LAN connections are always possible. self.start_passive_server() # Determine NAT type. if self.nat_type == "unknown": self.debug_print("Determining NAT type.") nat_type = self.rendezvous.determine_nat() if nat_type is not None and nat_type != "unknown": self.nat_type = nat_type self.rendezvous.nat_type = nat_type self.debug_print("NAT type = " + nat_type) else: self.debug_print("Unable to determine NAT type.") # Check NAT type if node is simultaneous # is manually specified. if self.node_type == "simultaneous": if self.nat_type not in self.rendezvous.predictable_nats: self.debug_print("Manual setting of simultanous specified but" " ignored since NAT does not support it.") self.node_type = "active" else: # Determine node type. self.debug_print("Determining node type.") # No checks for manually specifying passive # (there probably should be.) if self.node_type == "unknown": self.node_type = self.determine_node() # Prevent P2P nodes from running as simultaneous. if self.net_type == "p2p": if self.node_type == "simultaneous": self.debug_print("Simultaneous is not allowed for P2P") self.node_type = "active" self.disable_simultaneous() self.debug_print("Node type = " + self.node_type) # Close stray cons from determine_node() tests. self.close_cons() # Set net started status. self.is_net_started = 1 # Initialise our UNL details. self.unl = UNL( net=self, dht_node=self.dht_node, wan_ip=self.wan_ip ) # Nestled calls. return self
This function determines node and NAT type, saves connectivity details, and starts any needed servers to be a part of the network. This is usually the first function called after initialising the Net class.
null
null
null
def stop(self, signum=None, frame=None): self.debug_print("Stopping networking.") if self.passive is not None: try: self.passive.shutdown(1) except: pass self.passive.close() self.passive = None if self.last_advertise is not None: self.rendezvous.leave_fight() for con in self: con.close() if signum is not None: raise Exception("Process was interrupted.")
Just let the threads timeout by themselves. Otherwise mutex deadlocks could occur. for unl_thread in self.unl.unl_threads: unl_thread.exit()
null
null
null
def send_remote_port(self): msg = "REMOTE TCP %s" % (str(self.transport.getPeer().port)) self.send_line(msg)
Sends the remote port mapped for the connection. This port is surprisingly often the same as the locally bound port for an endpoint because a lot of NAT types preserve the port.
null
null
null
def cleanup_candidates(self, node_ip): if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: elapsed = int(time.time() - candidate["time"]) if elapsed > self.challege_timeout: old_candidates.append(candidate) for candidate in old_candidates: self.factory.candidates[node_ip].remove(candidate)
Removes old TCP hole punching candidates for a designated node if a certain amount of time has passed since they last connected.
null
null
null
def propogate_candidates(self, node_ip): if node_ip in self.factory.candidates: old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Not connected. if not candidate["con"].connected: continue # Already sent -- updated when they accept this challenge. if candidate["propogated"]: continue # Notify node of challege from client. msg = "CHALLENGE %s %s %s" % ( candidate["ip_addr"], " ".join(map(str, candidate["predictions"])), candidate["proto"]) self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) old_candidates.append(candidate)
Used to progate new candidates to passive simultaneous nodes.
null
null
null
def synchronize_simultaneous(self, node_ip): for candidate in self.factory.candidates[node_ip]: # Only if candidate is connected. if not candidate["con"].connected: continue # Synchronise simultaneous node. if candidate["time"] -\ self.factory.nodes["simultaneous"][node_ip]["time"] >\ self.challege_timeout: msg = "RECONNECT" self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) return self.cleanup_candidates(node_ip) self.propogate_candidates(node_ip)
Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt.
null
null
null
def connectionLost(self, reason): try: self.connected = False if debug: print(self.log_entry("CLOSED =", "none")) # Every five minutes: cleanup t = time.time() if time.time() - self.factory.last_cleanup >= self.cleanup: self.factory.last_cleanup = t # Delete old passive nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["passive"]): passive_node = self.factory.nodes["passive"][node_ip] # Gives enough time for passive nodes to receive clients. if t - passive_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["passive"][node_ip] # Delete old simultaneous nodes. old_node_ips = [] for node_ip in list(self.factory.nodes["simultaneous"]): simultaneous_node =\ self.factory.nodes["simultaneous"][node_ip] # Gives enough time for passive nodes to receive clients. if t - simultaneous_node["time"] >= self.node_lifetime: old_node_ips.append(node_ip) for node_ip in old_node_ips: del self.factory.nodes["simultaneous"][node_ip] # Delete old candidates and candidate structs. old_node_ips = [] for node_ip in list(self.factory.candidates): # Record old candidates. old_candidates = [] for candidate in self.factory.candidates[node_ip]: # Hole punching is ms time sensitive. # Candidates older than this is safe to assume # they're not needed. if node_ip not in self.factory.nodes["simultaneous"] \ and t - candidate["time"] >= self.challenge_timeout * 5: old_candidates.append(candidate) # Remove old candidates. for candidate in old_candidates: self.factory.candidates[node_ip].remove(candidate) # Record old node IPs. if not len(self.factory.candidates[node_ip]) and \ node_ip not in self.factory.nodes["simultaneous"]: old_node_ips.append(node_ip) # Remove old node IPs. for node_ip in old_node_ips: del self.factory.candidates[node_ip] except Exception as e: error = parse_exception(e) log_exception(error_log_path, error) print(self.log_entry("ERROR =", error))
Mostly handles clean-up of node + candidate structures. Avoids memory exhaustion for a large number of connections.
null
null
null
def get_external_ip(self): random.shuffle(self.server_list) myip = '' for server in self.server_list[:3]: myip = self.fetch(server) if myip != '': return myip else: continue return ''
This function gets your IP from a random server
null
null
null
def fetch(self, server): t = None socket_default_timeout = socket.getdefaulttimeout() opener = urllib.build_opener() opener.addheaders = [('User-agent', "Mozilla/5.0 (X11; Linux x86_64; rv:24.0)" " Gecko/20100101 Firefox/24.0")] try: # Close url resource if fetching not finished within timeout. t = Timer(self.timeout, self.handle_timeout, [self.url]) t.start() # Open URL. if version_info[0:2] == (2, 5): # Support for Python 2.5.* using socket hack # (Changes global socket timeout.) socket.setdefaulttimeout(self.timeout) self.url = opener.open(server) else: self.url = opener.open(server, timeout=self.timeout) # Read response. content = self.url.read() # Didn't want to import chardet. Prefered to stick to stdlib if PY3K: try: content = content.decode('UTF-8') except UnicodeDecodeError: content = content.decode('ISO-8859-1') p = '(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(' p += '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[' p += '01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)' m = re.search( p, content) myip = m.group(0) if len(myip) > 0: return myip else: return '' except Exception as e: print(e) return '' finally: if self.url is not None: self.url.close() self.url = None if t is not None: t.cancel() # Reset default socket timeout. if socket.getdefaulttimeout() != socket_default_timeout: socket.setdefaulttimeout(socket_default_timeout)
This function gets your IP from a specific server
null
null
null
def connect(self, their_unl, events, force_master=1, hairpin=1, nonce="0" * 64): parms = (their_unl, events, force_master, hairpin, nonce) t = Thread(target=self.connect_handler, args=parms) t.start() self.unl_threads.append(t)
A new thread is spawned because many of the connection techniques rely on sleep to determine connection outcome or to synchronise hole punching techniques. If the sleep is in its own thread it won't block main execution.
null
null
null
n = self.statx_n(self.data_points) if n < 1: return Decimal("0") avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) for k in range(0, self.clean_steps): min_val = avg - sdev max_val = avg + sdev cleaned_data_points = [] for i in range(0, n): v = self.data_points[i] if v < min_val or v > max_val: continue cleaned_data_points.append(v) self.data_points = cleaned_data_points[:] n = self.statx_n(self.data_points) if n < 2: break avg = self.statx_avg(self.data_points) sdev = self.statx_sdev(self.data_points) if sdev <= self.max_sdev or n < self.min_data: break if sdev > self.max_sdev or n < self.min_data: return Decimal("0") return avg
def calculate_clock_skew(self)
Computer average and standard deviation using all the data points.
2.595875
2.50814
1.03498
def attend_fight(self, mappings, node_ip, predictions, ntp): # Bind listen server socket. mappings = self.add_listen_sock(mappings) log.debug(mappings) # Walk to fight. self.simultaneous_cons = [] predictions = predictions.split(" ") self.simultaneous_fight(mappings, node_ip, predictions, ntp) # Return hole made in opponent. if len(self.simultaneous_cons): # Close unneeded holes. try: # Return open hole. return self.simultaneous_cons[0] except: # Try accept a connection. log.debug("No holes found") for mapping in mappings: # Check if there's a new con. s = mapping["listen"] r, w, e = select.select( [s], [], [], 0 ) # Find socket. for found_sock in r: # Not us. if found_sock != s: continue # Accept a new con from the listen queue. log.debug("Accept logic works!") client, address = s.accept() con = Sock(blocking=0) con.set_sock(client) return con return None
This function is for starting and managing a fight once the details are known. It also handles the task of returning any valid connections (if any) that may be returned from threads in the simultaneous_fight function.
null
null
null
def sequential_connect(self): # Connect to rendezvous server. try: mappings = sequential_bind(self.mapping_no + 1, self.interface) con = self.server_connect(mappings[0]["sock"]) except Exception as e: log.debug(e) log.debug("this err") return None # First mapping is used to talk to server. mappings.remove(mappings[0]) # Receive port mapping. msg = "SOURCE TCP %s" % (str(mappings[0]["source"])) con.send_line(msg) reply = con.recv_line(timeout=2) remote_port = self.parse_remote_port(reply) if not remote_port: return None # Generate port predictions. predictions = "" if self.nat_type != "random": mappings = self.predict_mappings(mappings) for mapping in mappings: predictions += str(mapping["remote"]) + " " predictions = predictions.rstrip() else: predictions = "1337" return [con, mappings, predictions]
Sequential connect is designed to return a connection to the Rendezvous Server but it does so in a way that the local port ranges (both for the server and used for subsequent hole punching) are allocated sequentially and predictably. This is because Delta+1 type NATs only preserve the delta value when the source ports increase by one.
null
null
null
def simultaneous_listen(self): # Close socket. if self.server_con is not None: self.server_con.s.close() self.server_con = None # Reset predictions + mappings. self.mappings = None self.predictions = None # Connect to rendezvous server. parts = self.sequential_connect() if parts is None: return 0 con, mappings, predictions = parts con.blocking = 0 con.timeout = 0 con.s.settimeout(0) self.server_con = con self.mappings = mappings self.predictions = predictions # Register simultaneous node with server. msg = "SIMULTANEOUS READY 0 0" ret = self.server_con.send_line(msg) if not ret: return 0 return 1
This function is called by passive simultaneous nodes who wish to establish themself as such. It sets up a connection to the Rendezvous Server to monitor for new hole punching requests.
null
null
null
def predict_mappings(self, mappings): if self.nat_type not in self.predictable_nats: msg = "Can't predict mappings for non-predictable NAT type." raise Exception(msg) for mapping in mappings: mapping["bound"] = mapping["sock"].getsockname()[1] if self.nat_type == "preserving": mapping["remote"] = mapping["source"] if self.nat_type == "delta": max_port = 65535 mapping["remote"] = int(mapping["source"]) + self.delta # Overflow or underflow = wrap port around. if mapping["remote"] > max_port: mapping["remote"] -= max_port if mapping["remote"] < 0: mapping["remote"] = max_port - -mapping["remote"] # Unknown error. if mapping["remote"] < 1 or mapping["remote"] > max_port: mapping["remote"] = 1 mapping["remote"] = str(mapping["remote"]) return mappings
This function is used to predict the remote ports that a NAT will map a local connection to. It requires the NAT type to be determined before use. Current support for preserving and delta type mapping behaviour.
null
null
null
def throw_punch(self, args, tries=1): # Parse arguments. if len(args) != 3: return 0 sock, node_ip, remote_port = args if sock is None or node_ip is None or remote_port is None: return 0 # Generous timeout. con = Sock(blocking=1, interface=self.interface) con.set_sock(sock) local = 0 if is_ip_private(node_ip): tries = 20 # 20 local = 1 source_port = sock.getsockname()[1] error = 0 log.debug("Throwing punch") for i in range(0, tries): # Attempt to connect. try: con.connect(node_ip, remote_port) log.debug("Sim open success!") # FATALITY. # Atomic operation so mutex not required. # Record hole made. con.set_blocking(blocking=0, timeout=5) self.simultaneous_cons.append(con) return 1 except Exception as e: # Punch was blocked, opponent is strong. e = str(parse_exception(e)) log.debug(e) error = 1 continue if error: sock.close() return 0
Attempt to open a hole by TCP hole punching. This function is called by the simultaneous fight function and its the code that handles doing the actual hole punching / connecting.
null
null
null
def simultaneous_fight(self, my_mappings, node_ip, predictions, origin_ntp): # Get current network time accurate to # ~50 ms over WAN (apparently.) p = request_priority_execution() log.debug("Getting NTP") if self.sys_clock is not None: our_ntp = self.sys_clock.time() else: our_ntp = get_ntp() log.debug("Our ntp = " + str(our_ntp)) if our_ntp is None: return 0 # Synchronize code execution to occur at their NTP time + delay. current = float(our_ntp) future = float(origin_ntp) + float(self.ntp_delay) sleep_time = future - current # Check sleep time: log.debug("Waiting for fight") if sleep_time < 0: log.debug("We missed the meeting! It happened " + str(-sleep_time) + "seconds ago!") return 0 if sleep_time >= 300: log.debug("Future sleep time is too great!") return 0 busy_wait(sleep_time) release_priority_execution(p) log.debug("At fight") # Can you dodge my special? threads = [] log.debug("Mapping len " + str(len(my_mappings))) for mapping in my_mappings: # Tried all predictions. prediction_len = len(predictions) if not prediction_len: break # Throw punch. prediction = predictions[0] if self.nat_type == "delta": self.throw_punch([mapping["sock"], node_ip, prediction]) else: # Thread params. args = ([ mapping["sock"], node_ip, prediction ], 20) # Start thread. t = Thread( target=self.throw_punch, args=args ) threads.append(t) t.start() predictions.remove(prediction) # Wait for threads to finish. for t in threads: t.join() return 1
TCP hole punching algorithm. It uses network time servers to synchronize two nodes to connect to each other on their predicted remote ports at the exact same time. One thing to note is how sensitive TCP hole punching is to timing. To open a successful connection both sides need to have their SYN packets cross the NAT before the other side's SYN arrives. Round-trip time for connections is 0 - 1000ms depending on proximity. That's a very small margin of error for hole punching, hence using NTP. See "TCP Hole Punching" http://www.ietf.org/rfc/rfc5128.txt and http://en.wikipedia.org/wiki/TCP_hole_punching for more details.
null
null
null
def simultaneous_challenge(self, node_ip, node_port, proto): parts = self.sequential_connect() if parts is None: log.debug("Sequential connect failed") return None con, mappings, predictions = parts # Tell server to list ourselves as a candidate for node. msg = "CANDIDATE %s %s %s" % (node_ip, str(proto), predictions) con.send_line(msg) reply = con.recv_line(timeout=10) log.debug(reply) if "PREDICTION SET" not in reply: log.debug("Prediction set failed") return None # Wait for node to accept and give us fight time. # FIGHT 192.168.0.1 4552 345 34235 TCP 123123123.1\ reply = con.recv_line(timeout=10) log.debug(reply) con.s.close() p = "^FIGHT ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) ((?:[0-9]+\s?)+)" p += " (TCP|UDP) ([0-9]+(?:[.][0-9]+)?)$" parts = re.findall(p, reply) if not len(parts): log.debug("Invalid parts length") return None node_ip, predictions, proto, ntp = parts[0] log.debug("Received fight details") log.debug(str(parts[0])) log.debug("Attending fight now") return self.attend_fight(mappings, node_ip, predictions, ntp)
Used by active simultaneous nodes to attempt to initiate a simultaneous open to a compatible node after retrieving its details from bootstrapping. The function advertises itself as a potential candidate to the server for the designated node_ip. It also waits for a response from the node (if any) and attends any arranged fights.
null
null
null
def parse_remote_port(self, reply): remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply) if not len(remote_port): remote_port = 0 else: remote_port = int(remote_port[0][1]) if remote_port < 1 or remote_port > 65535: remote_port = 0 return remote_port
Parses a remote port from a Rendezvous Server's response.
null
null
null
if port is None or port < 1024 or port > 65535: port = random.randint(1024, 65535) assert(1024 <= port <= 65535) while True: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.bind(('', port)) # Try to open port except socket.error as e: if e.errno in (98, 10048): # 98, 10048 means address already bound return get_unused_port(None) raise e s.close() return port
def get_unused_port(port=None)
Checks if port is already in use.
2.170149
2.105793
1.030561
if sys.version_info < (3, 0, 0): if type(interface) == str: interface = unicode(interface) else: if type(interface) == bytes: interface = interface.decode("utf-8") # Get ID of interface that handles WAN stuff. default_gateway = get_default_gateway(interface) gateways = netifaces.gateways() wan_id = None if netifaces.AF_INET in gateways: gw_list = gateways[netifaces.AF_INET] for gw_info in gw_list: if gw_info[0] == default_gateway: wan_id = gw_info[1] break # Find LAN IP of interface for WAN stuff. interfaces = netifaces.interfaces() if wan_id in interfaces: families = netifaces.ifaddresses(wan_id) if netifaces.AF_INET in families: if_info_list = families[netifaces.AF_INET] for if_info in if_info_list: if "addr" in if_info: return if_info["addr"] if platform.system() == "Linux": if ip is not None: return ip.routes["8.8.8.8"]["prefsrc"] return None
def get_lan_ip(interface="default")
Execution may reach here if the host is using virtual interfaces on Linux and there are no gateways which suggests the host is a VPS or server. In this case
2.958377
2.987049
0.990401
if n == 2: try: ip = myip() ip = extract_ip(ip) if is_ip_valid(ip): return ip except Exception as e: print(str(e)) return None # Fail-safe: use centralized server for IP lookup. from pyp2p.net import forwarding_servers for forwarding_server in forwarding_servers: url = "http://" + forwarding_server["addr"] + ":" url += str(forwarding_server["port"]) url += forwarding_server["url"] url += "?action=get_wan_ip" try: r = urlopen(url, timeout=5) response = r.read().decode("utf-8") response = extract_ip(response) if is_ip_valid(response): return response except Exception as e: print(str(e)) continue time.sleep(1) return get_wan_ip(n + 1)
def get_wan_ip(n=0)
That IP module sucks. Occasionally it returns an IP address behind cloudflare which probably happens when cloudflare tries to proxy your web request because it thinks you're trying to DoS. It's better if we just run our own infrastructure.
3.138692
3.089561
1.015902
def get_gateway_addr(): try: import netifaces return netifaces.gateways()["default"][netifaces.AF_INET][0] except ImportError: shell_command = 'netstat -rn' if os.name == "posix": pattern = \ re.compile('(?:default|0\.0\.0\.0|::/0)\s+([\w\.:]+)\s+.*UG') elif os.name == "nt": if platform.version().startswith("6.1"): pattern = re.compile(".*?0.0.0.0[ ]+0.0.0.0[ ]+(.*?)[ ]+?.*?\n") else: pattern = re.compile(".*?Default Gateway:[ ]+(.*?)\n") system_out = os.popen(shell_command, 'r').read() if not system_out: raise NATPMPNetworkError(NATPMP_GATEWAY_CANNOT_FIND, error_str(NATPMP_GATEWAY_CANNOT_FIND)) match = pattern.search(system_out) if not match: raise NATPMPNetworkError(NATPMP_GATEWAY_CANNOT_FIND, error_str(NATPMP_GATEWAY_CANNOT_FIND)) addr = match.groups()[0].strip() return addr
Use netifaces to get the gateway address, if we can't import it then fall back to a hack to obtain the current gateway automatically, since Python has no interface to sysctl(). This may or may not be the gateway we should be contacting. It does not guarantee correct results. This function requires the presence of netstat on the path on POSIX and NT.
null
null
null
def get_gateway_socket(gateway): if not gateway: raise NATPMPNetworkError(NATPMP_GATEWAY_NO_VALID_GATEWAY, error_str(NATPMP_GATEWAY_NO_VALID_GATEWAY)) response_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) response_socket.setblocking(0) response_socket.connect((gateway, NATPMP_PORT)) return response_socket
Takes a gateway address string and returns a non-blocking UDP socket to communicate with its NAT-PMP implementation on NATPMP_PORT. e.g. addr = get_gateway_socket('10.0.1.1')
null
null
null
def get_public_address(gateway_ip=None, retry=9): if gateway_ip is None: gateway_ip = get_gateway_addr() addr_request = PublicAddressRequest() addr_response = send_request_with_retry(gateway_ip, addr_request, response_data_class= PublicAddressResponse, retry=retry, response_size=12) if addr_response.result != 0: # sys.stderr.write("NAT-PMP error %d: %s\n" % # (addr_response.result, # error_str(addr_response.result))) # sys.stderr.flush() raise NATPMPResultError(addr_response.result, error_str(addr_response.result), addr_response) addr = addr_response.ip return addr
A high-level function that returns the public interface IP of the current host by querying the NAT-PMP gateway. IP is returned as string. Takes two possible keyword arguments: gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification.
null
null
null
def map_tcp_port(public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): return map_port(NATPMP_PROTOCOL_TCP, public_port, private_port, lifetime, gateway_ip=gateway_ip, retry=retry, use_exception=use_exception)
A high-level wrapper to map_port() that requests a mapping for a public TCP port on the NAT to a private TCP port on this host. Returns the complete response on success. public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True.
null
null
null
def map_udp_port(public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): return map_port(NATPMP_PROTOCOL_UDP, public_port, private_port, lifetime, gateway_ip=gateway_ip, retry=retry, use_exception=use_exception)
A high-level wrapper to map_port() that requests a mapping for a public UDP port on the NAT to a private UDP port on this host. Returns the complete response on success. public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True.
null
null
null
def map_port(protocol, public_port, private_port, lifetime=3600, gateway_ip=None, retry=9, use_exception=True): if protocol not in [NATPMP_PROTOCOL_UDP, NATPMP_PROTOCOL_TCP]: raise ValueError("Must be either NATPMP_PROTOCOL_UDP or " "NATPMP_PROTOCOL_TCP") if gateway_ip is None: gateway_ip = get_gateway_addr() response = None port_mapping_request = PortMapRequest(protocol, private_port, public_port, lifetime) port_mapping_response = \ send_request_with_retry(gateway_ip, port_mapping_request, response_data_class=PortMapResponse, retry=retry) if port_mapping_response.result != 0 and use_exception: raise NATPMPResultError(port_mapping_response.result, error_str(port_mapping_response.result), port_mapping_response) return port_mapping_response
A function to map public_port to private_port of protocol. Returns the complete response on success. protocol - NATPMP_PROTOCOL_UDP or NATPMP_PROTOCOL_TCP public_port - the public port of the mapping requested private_port - the private port of the mapping requested lifetime - the duration of the mapping in seconds. Defaults to 3600, per specification. gateway_ip - the IP to the NAT-PMP compatible gateway. Defaults to using auto-detection function get_gateway_addr() retry - the number of times to retry the request if unsuccessful. Defaults to 9 as per specification. use_exception - throw an exception if an error result is received from the gateway. Defaults to True.
null
null
null
proto = proto.upper() valid_protos = ["TCP", "UDP"] if proto not in valid_protos: raise Exception("Invalid protocol for forwarding.") valid_ports = range(1, 65535) if src_port not in valid_ports: raise Exception("Invalid port for forwarding.") # Source port is forwarded to same destination port number. if dest_port is None: dest_port = src_port # Use UPnP binary for forwarding on Windows. if platform.system() == "Windows": cmd = "upnpc-static.exe -a %s %s %s %s" % (get_lan_ip(), str(src_port), str(dest_port), proto) out, err = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() if "is not recognized" in err: raise Exception("Missing upnpc-static.exe") return # Find gateway address. gateway_addr = self.find_gateway() if gateway_addr is None: raise Exception("Unable to find UPnP compatible gateway.") # Get control URL. rhost = re.findall('([^/]+)', gateway_addr) res = urlopen(gateway_addr, timeout=self.timeout).read().decode("utf-8") res = res.replace('\r', '') res = res.replace('\n', '') res = res.replace('\t', '') pres = res.split('<serviceId>urn:upnp-org:serviceId:WANIPConn1' '</serviceId>') p2res = pres[1].split('</controlURL>') p3res = p2res[0].split('<controlURL>') ctrl = p3res[1] rip = res.split('<presentationURL>') rip1 = rip[1].split('</presentationURL>') router_ip = rip1[0] port_map_desc = "PyP2P" msg = \ '<?xml version="1.0"?><s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body><u:AddPortMapping xmlns:u="urn:schemas-upnp-org:service:WANIPConnection:1"><NewRemoteHost></NewRemoteHost><NewExternalPort>' \ + str(src_port) \ + '</NewExternalPort><NewProtocol>' + str(proto) + '</NewProtocol><NewInternalPort>' \ + str(dest_port) + '</NewInternalPort><NewInternalClient>' + str(dest_ip) \ + '</NewInternalClient><NewEnabled>1</NewEnabled><NewPortMappingDescription>' + str(port_map_desc) + '</NewPortMappingDescription><NewLeaseDuration>0</NewLeaseDuration></u:AddPortMapping></s:Body></s:Envelope>' # Attempt to add new port map. x = 'http://' + rhost[1] + '/' + ctrl if sys.version_info >= (3, 0, 0): msg = bytes(msg, "utf-8") req = Request('http://' + rhost[1] + '/' + ctrl, msg) req.add_header('SOAPAction', '"urn:schemas-upnp-org:service:WANIPConnection:1#AddPortMapping"' ) req.add_header('Content-type', 'application/xml') res = urlopen(req, timeout=self.timeout)
def forward_port(self, proto, src_port, dest_ip, dest_port=None)
Creates a new mapping for the default gateway to forward ports. Source port is from the perspective of the original client. For example, if a client tries to connect to us on port 80, the source port is port 80. The destination port isn't necessarily 80, however. We might wish to run our web server on a different port so we can have the router forward requests for port 80 to another port (what I call the destination port.) If the destination port isn't specified, it defaults to the source port. Proto is either TCP or UDP. Function returns None on success, otherwise it raises an exception.
2.452584
2.423999
1.011792
self.stop() self.initialize() self.handle = self.loop.call_at(self.get_next(), self.call_next)
def start(self)
Start scheduling
7.062679
6.388937
1.105455
if self.handle is not None: self.handle.cancel() self.handle = self.future = self.croniter = None
def stop(self)
Stop scheduling
7.641145
6.621982
1.153906
self.initialize() self.future = asyncio.Future(loop=self.loop) self.handle = self.loop.call_at(self.get_next(), self.call_func, *args) return self.future
def next(self, *args)
yield from .next()
4.369461
4.202692
1.039681
if self.croniter is None: self.time = time.time() self.datetime = datetime.now(self.tz) self.loop_time = self.loop.time() self.croniter = croniter(self.spec, start_time=self.datetime)
def initialize(self)
Initialize croniter and related times
4.609856
3.538367
1.30282
return self.loop_time + (self.croniter.get_next(float) - self.time)
def get_next(self)
Return next iteration time related to loop time
14.83296
7.819675
1.896877
if self.handle is not None: self.handle.cancel() next_time = self.get_next() self.handle = self.loop.call_at(next_time, self.call_next) self.call_func()
def call_next(self)
Set next hop in the loop. Call task
3.780114
3.69857
1.022047
asyncio.gather( self.cron(*args, **kwargs), loop=self.loop, return_exceptions=True ).add_done_callback(self.set_result)
def call_func(self, *args, **kwargs)
Called. Take care of exceptions using gather
5.681886
4.81136
1.180931
result = result.result()[0] if self.future is not None: if isinstance(result, Exception): self.future.set_exception(result) else: self.future.set_result(result) self.future = None elif isinstance(result, Exception): raise result
def set_result(self, result)
Set future's result if needed (can be an exception). Else raise if needed.
2.633615
2.282123
1.15402
match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I) if not match or not match.group(0).strip(): return mdict = match.groupdict() return sum( MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)
def timeparse(sval)
Parse a time expression, returning it as a number of seconds. If possible, the return value will be an `int`; if this is not possible, the return will be a `float`. Returns `None` if a time expression cannot be parsed from the given string. Arguments: - `sval`: the string value to parse >>> timeparse('1m24s') 84 >>> timeparse('1.2 minutes') 72 >>> timeparse('1.2 seconds') 1.2
4.466319
4.781457
0.934092
lines = re.split('[\r\n]+', text) processed = [] for line in lines: all_caps = line.upper() == line words = re.split('[\t ]', line) tc_line = [] for word in words: if callback: new_word = callback(word, all_caps=all_caps) if new_word: # Address #22: If a callback has done something # specific, leave this string alone from now on tc_line.append(_mark_immutable(new_word)) continue if all_caps: if UC_INITIALS.match(word): tc_line.append(word) continue if APOS_SECOND.match(word): if len(word[0]) == 1 and word[0] not in 'aeiouAEIOU': word = word[0].lower() + word[1] + word[2].upper() + word[3:] else: word = word[0].upper() + word[1] + word[2].upper() + word[3:] tc_line.append(word) continue match = MAC_MC.match(word) if match: tc_line.append("%s%s" % (match.group(1).capitalize(), titlecase(match.group(2),callback,small_first_last))) continue if INLINE_PERIOD.search(word) or (not all_caps and UC_ELSEWHERE.match(word)): tc_line.append(word) continue if SMALL_WORDS.match(word): tc_line.append(word.lower()) continue if "/" in word and "//" not in word: slashed = map( lambda t: titlecase(t,callback,False), word.split('/') ) tc_line.append("/".join(slashed)) continue if '-' in word: hyphenated = map( lambda t: titlecase(t,callback,small_first_last), word.split('-') ) tc_line.append("-".join(hyphenated)) continue if all_caps: word = word.lower() # Just a normal word that needs to be capitalized tc_line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word)) if small_first_last and tc_line: if not isinstance(tc_line[0], Immutable): tc_line[0] = SMALL_FIRST.sub(lambda m: '%s%s' % ( m.group(1), m.group(2).capitalize() ), tc_line[0]) if not isinstance(tc_line[-1], Immutable): tc_line[-1] = SMALL_LAST.sub( lambda m: m.group(0).capitalize(), tc_line[-1] ) result = " ".join(tc_line) result = SUBPHRASE.sub(lambda m: '%s%s' % ( m.group(1), m.group(2).capitalize() ), result) processed.append(result) return "\n".join(processed)
def titlecase(text, callback=None, small_first_last=True)
Titlecases input text This filter changes all words to Title Caps, and attempts to be clever about *un*capitalizing SMALL words like a/an/the in the input. The list of "SMALL words" which are not capped comes from the New York Times Manual of Style, plus 'vs' and 'v'.
2.741679
2.753904
0.995561
'''Handler for command line invocation''' # Try to handle any reasonable thing thrown at this. # Consume '-f' and '-o' as input/output, allow '-' for stdin/stdout # and treat any subsequent arguments as a space separated string to # be titlecased (so it still works if people forget quotes) parser = argparse.ArgumentParser() in_group = parser.add_mutually_exclusive_group() in_group.add_argument('string', nargs='*', default=[], help='String to titlecase') in_group.add_argument('-f', '--input-file', help='File to read from to titlecase') parser.add_argument('-o', '--output-file', help='File to write titlecased output to)') args = parser.parse_args() if args.input_file is not None: if args.input_file == '-': ifile = sys.stdin else: ifile = open(args.input_file) else: ifile = sys.stdin if args.output_file is not None: if args.output_file == '-': ofile = sys.stdout else: ofile = open(args.output_file, 'w') else: ofile = sys.stdout if len(args.string) > 0: in_string = ' '.join(args.string) else: with ifile: in_string = ifile.read() with ofile: ofile.write(titlecase(in_string))
def cmd()
Handler for command line invocation
3.09459
3.063864
1.010029
parser.add_option( '--eradicate-aggressive', default=False, help=( 'Enables aggressive mode for eradicate; ' 'this may result in false positives' ), action='store_true', type=None, )
def add_options(cls, parser: OptionManager) -> None
``flake8`` api method to register new plugin options. See :class:`.Configuration` docs for detailed options reference. Arguments: parser: ``flake8`` option parser instance.
6.128753
5.935016
1.032643
if self.filename != STDIN: buffer = StringIO() options = _Options(aggressive=self.options.eradicate_aggressive) fix_file(self.filename, options, buffer) traceback = buffer.getvalue() if traceback: yield 1, 0, self._error(traceback), type(self)
def run(self) -> Generator[Tuple[int, int, str, type], None, None]
Runs the checker. ``fix_file()`` only mutates the buffer object. It is the only way to find out if some error happened.
7.345452
6.33246
1.159968
yielded = set() for value in g: if value not in yielded: yield value yielded.add(value)
def unique(g)
Yield values yielded by ``g``, removing any duplicates. Example ------- >>> list(unique(iter([1, 3, 1, 2, 3]))) [1, 3, 2]
3.113294
4.090847
0.761039
for type_ in t.mro(): try: return vars(type_)[name] except KeyError: pass raise AttributeError(name)
def static_get_type_attr(t, name)
Get a type attribute statically, circumventing the descriptor protocol.
3.637497
3.471002
1.047967
message = "\nclass {C} received conflicting default implementations:".format( C=typename, ) for attrname, interfaces in conflicts.items(): message += dedent( ).format( attr=attrname, interfaces=bulleted_list(sorted(map(getname, interfaces))), ) return InvalidImplementation(message)
def _conflicting_defaults(typename, conflicts)
Format an error message for conflicting default implementations. Parameters ---------- typename : str Name of the type for which we're producing an error. conflicts : dict[str -> list[Interface]] Map from strings to interfaces providing a default with that name. Returns ------- message : str User-facing error message.
9.150398
8.540711
1.071386
missing = [] mistyped = {} mismatched = {} for name, iface_sig in self._signatures.items(): try: # Don't invoke the descriptor protocol here so that we get # staticmethod/classmethod/property objects instead of the # functions they wrap. f = static_get_type_attr(type_, name) except AttributeError: missing.append(name) continue impl_sig = TypedSignature(f) if not issubclass(impl_sig.type, iface_sig.type): mistyped[name] = impl_sig.type if not compatible(impl_sig.signature, iface_sig.signature): mismatched[name] = impl_sig return missing, mistyped, mismatched
def _diff_signatures(self, type_)
Diff our method signatures against the methods provided by type_. Parameters ---------- type_ : type The type to check. Returns ------- missing, mistyped, mismatched : list[str], dict[str -> type], dict[str -> signature] # noqa ``missing`` is a list of missing interface names. ``mistyped`` is a list mapping names to incorrect types. ``mismatched`` is a dict mapping names to incorrect signatures.
4.683328
3.892531
1.203157
raw_missing, mistyped, mismatched = self._diff_signatures(type_) # See if we have defaults for missing methods. missing = [] defaults_to_use = {} for name in raw_missing: try: defaults_to_use[name] = self._defaults[name].implementation except KeyError: missing.append(name) if not any((missing, mistyped, mismatched)): return defaults_to_use raise self._invalid_implementation(type_, missing, mistyped, mismatched)
def verify(self, type_)
Check whether a type implements ``self``. Parameters ---------- type_ : type The type to check. Raises ------ TypeError If ``type_`` doesn't conform to our interface. Returns ------- None
5.1785
5.973287
0.866943
assert missing or mistyped or mismatched, "Implementation wasn't invalid." message = "\nclass {C} failed to implement interface {I}:".format( C=getname(t), I=getname(self), ) if missing: message += dedent( ).format( I=getname(self), missing_methods=self._format_missing_methods(missing) ) if mistyped: message += dedent( ).format( I=getname(self), mismatched_types=self._format_mismatched_types(mistyped), ) if mismatched: message += dedent( ).format( I=getname(self), mismatched_methods=self._format_mismatched_methods(mismatched), ) return InvalidImplementation(message)
def _invalid_implementation(self, t, missing, mistyped, mismatched)
Make a TypeError explaining why ``t`` doesn't implement our interface.
2.648067
2.661037
0.995126
if name is None: name = existing_class.__name__ + 'Interface' if subset is None: subset = set(dir(existing_class)) - TRIVIAL_CLASS_ATTRIBUTES return InterfaceMeta( name, (Interface,), {name: static_get_type_attr(existing_class, name) for name in subset}, )
def from_class(cls, existing_class, subset=None, name=None)
Create an interface from an existing class. Parameters ---------- existing_class : type The type from which to extract an interface. subset : list[str], optional List of methods that should be included in the interface. Default is to use all attributes not defined in an empty class. name : str, optional Name of the generated interface. Default is ``existing_class.__name__ + 'Interface'``. Returns ------- interface : type A new interface class with stubs generated from ``existing_class``.
4.446798
4.62382
0.961715
return all([ positionals_compatible( takewhile(is_positional, impl_sig.parameters.values()), takewhile(is_positional, iface_sig.parameters.values()), ), keywords_compatible( valfilter(complement(is_positional), impl_sig.parameters), valfilter(complement(is_positional), iface_sig.parameters), ), ])
def compatible(impl_sig, iface_sig)
Check whether ``impl_sig`` is compatible with ``iface_sig``. Parameters ---------- impl_sig : inspect.Signature The signature of the implementation function. iface_sig : inspect.Signature The signature of the interface function. In general, an implementation is compatible with an interface if any valid way of passing parameters to the interface method is also valid for the implementation. Consequently, the following differences are allowed between the signature of an implementation method and the signature of its interface definition: 1. An implementation may add new arguments to an interface iff: a. All new arguments have default values. b. All new arguments accepted positionally (i.e. all non-keyword-only arguments) occur after any arguments declared by the interface. c. Keyword-only arguments may be reordered by the implementation. 2. For type-annotated interfaces, type annotations my differ as follows: a. Arguments to implementations of an interface may be annotated with a **superclass** of the type specified by the interface. b. The return type of an implementation may be annotated with a **subclass** of the type specified by the interface.
3.51894
3.865681
0.910303
func = kwargs['func'] del kwargs['func'] return aggregate_np(*args, func=lambda x: func(x), **kwargs)
def aggregate_group_loop(*args, **kwargs)
wraps func in lambda which prevents aggregate_numpy from recognising and optimising it. Instead it groups and loops.
5.232708
4.204672
1.244499
cmp_pos = 0 steps = 1 if len(group_idx) < 1: return 0 for i in range(len(group_idx)): if group_idx[cmp_pos] != group_idx[i]: cmp_pos = i steps += 1 return steps
def step_count(group_idx)
Return the amount of index changes within group_idx.
3.056719
2.819071
1.0843
ilen = step_count(group_idx) + 1 indices = np.empty(ilen, np.int64) indices[0] = 0 indices[-1] = group_idx.size cmp_pos = 0 ri = 1 for i in range(len(group_idx)): if group_idx[cmp_pos] != group_idx[i]: cmp_pos = i indices[ri] = i ri += 1 return indices
def step_indices(group_idx)
Return the edges of areas within group_idx, which are filled with the same value.
3.123912
3.075365
1.015786
_valgetter = cls._valgetter_scalar if scalar else cls._valgetter valgetter = nb.njit(_valgetter) outersetter = nb.njit(cls._outersetter) _cls_inner = nb.njit(cls._inner) if nans: def _inner(ri, val, ret, counter, mean): if not np.isnan(val): _cls_inner(ri, val, ret, counter, mean) inner = nb.njit(_inner) else: inner = _cls_inner def _loop(group_idx, a, ret, counter, mean, outer, fill_value, ddof): # fill_value and ddof need to be present for being exchangeable with loop_2pass size = len(ret) rng = range(len(group_idx) - 1, -1 , -1) if reverse else range(len(group_idx)) for i in rng: ri = group_idx[i] if ri < 0: raise ValueError("negative indices not supported") if ri >= size: raise ValueError("one or more indices in group_idx are too large") val = valgetter(a, i) inner(ri, val, ret, counter, mean) outersetter(outer, i, ret[ri]) return nb.njit(_loop, nogil=True)
def callable(cls, nans=False, reverse=False, scalar=False)
Compile a jitted function doing the hard part of the job
4.077472
4.108189
0.992523
jitfunc = nb.njit(self.func, nogil=True) def _loop(sortidx, group_idx, a, ret): size = len(ret) group_idx_srt = group_idx[sortidx] a_srt = a[sortidx] indices = step_indices(group_idx_srt) for i in range(len(indices) - 1): start_idx, stop_idx = indices[i], indices[i + 1] ri = group_idx_srt[start_idx] if ri < 0: raise ValueError("negative indices not supported") if ri >= size: raise ValueError("one or more indices in group_idx are too large") ret[ri] = jitfunc(a_srt[start_idx:stop_idx]) return nb.njit(_loop, nogil=True)
def callable(self, nans=False)
Compile a jitted function and loop it over the sorted data.
3.701173
3.461611
1.069205
alias = dict((k, k) for k in funcs_common) alias.update(_alias_str) alias.update((fn, fn) for fn in _alias_builtin.values()) alias.update(_alias_builtin) for d in extra: alias.update(d) alias.update((k, k) for k in set(alias.values())) # Treat nan-functions as firstclass member and add them directly for key in set(alias.values()): if key not in funcs_no_separate_nan: key = 'nan' + key alias[key] = key return alias
def get_aliasing(*extra)
The assembles the dict mapping strings and functions to the list of supported function names: e.g. alias['add'] = 'sum' and alias[sorted] = 'sort' This funciton should only be called during import.
5.775379
5.650438
1.022112