content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def rx_filter(objs: list, attr: str, prompt: str) -> list: """ Filter a list of dicts based on user-entered regex match to one of their values. """ while True: search_term = input(prompt+" ") # Prefer exact match first -- otherwise can never select an item that's a substring of another! matches = [obj for obj in objs if obj[attr] == search_term] # matches = [obj for obj in objs if attr(obj) == search_term] if matches: return matches rx_flags = 0 # If search doesn't have uppercase letters, make it case-insensitive. if search_term == search_term.lower(): rx_flags |= re.IGNORECASE rx = re.compile(search_term, rx_flags) matches = [obj for obj in objs if rx.search(obj[attr])] # matches = [obj for obj in objs if rx.search(attr(obj))] if matches: return matches print("No matches, try again.")
f0c6dd5609020054da7895e577483c911d9aaea3
703,942
def _get_usb_hub_map(device_info_list): """Creates a map of usb hub addresses to device_infos by port. Args: device_info_list (list): list of known usb_connections dicts. Returns: dict: map of usb hub addresses to device_infos by port """ map_usb_hub_ports = {} for device_info in device_info_list: hub_address = device_info['usb_hub_address'] port = device_info['usb_hub_port'] if hub_address: if hub_address not in map_usb_hub_ports: map_usb_hub_ports[hub_address] = {} if not map_usb_hub_ports[hub_address].get( port) or device_info['ftdi_interface'] == 2: map_usb_hub_ports[hub_address][port] = device_info return map_usb_hub_ports
eaadc4713a41fdf38cea4fce35806d1d8772df27
703,943
import re def parse_pgsql_logs(data): """ Parse the pgsql benchmark data from ripsaw and return the data in list format Args: data (str): log data from pgsql bench run Returns: list_data (list): data digestable by scripts with below format e.g.: [ {1: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, {2: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, {3: {'num_clients': '2','num_threads': '7','latency_avg': '7', 'lat_stddev': '0', 'tps_incl': '234', 'tps_excl': '243'}, ] where keys{1,2,3} are run-IDs """ match = data.split("PGBench Results") list_data = [] for i in range(2, len(match)): log = "".join(match[i].split("\n")) pgsql_data = dict() pgsql_data[i - 1] = {} clients = re.search(r"scaling_factor\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["scaling_factor"] = clients.group(1) clients = re.search(r"number_of_clients\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["num_clients"] = clients.group(1) threads = re.search(r"number_of_threads\':\s+(\d+)", log) if threads and threads.group(1): pgsql_data[i - 1]["num_threads"] = threads.group(1) clients = re.search(r"number_of_transactions_per_client\':\s+(\d+),", log) if clients and clients.group(1): pgsql_data[i - 1]["number_of_transactions_per_client"] = clients.group(1) clients = re.search( r"number_of_transactions_actually_processed\':\s+(\d+),", log ) if clients and clients.group(1): pgsql_data[i - 1][ "number_of_transactions_actually_processed" ] = clients.group(1) lat_avg = re.search(r"latency_average_ms\':\s+(\d+)", log) if lat_avg and lat_avg.group(1): pgsql_data[i - 1]["latency_avg"] = lat_avg.group(1) lat_stddev = re.search(r"latency_stddev_ms\':\s+(\d+)", log) if lat_stddev and lat_stddev.group(1): pgsql_data[i - 1]["lat_stddev"] = lat_stddev.group(1) tps_incl = re.search(r"tps_incl_con_est\':\s+(\w+)", log) if tps_incl and tps_incl.group(1): pgsql_data[i - 1]["tps_incl"] = tps_incl.group(1) tps_excl = re.search(r"tps_excl_con_est\':\s+(\w+)", log) if tps_excl and tps_excl.group(1): pgsql_data[i - 1]["tps_excl"] = tps_excl.group(1) list_data.append(pgsql_data) return list_data
5bd5cd43432b17be6bd52004b151b32a0f574980
703,945
def jd2gdate(myjd): """Julian date to Gregorian calendar date and time of day. The input and output are for the proleptic Gregorian calendar. Parameters ---------- myjd: julian date (float). Returns ------- y, m, d, f : int, int, int, float Four element tuple containing year, month, day and the fractional part of the day in the Gregorian calendar. The first three are integers, and the last part is a float. """ jd_i = int(myjd) f = myjd-jd_i # Set JD to noon of the current date. Fractional part is the # fraction from midnight of the current date. if -0.5 < f < 0.5: f += 0.5 elif f >= 0.5: jd_i += 1 f -= 0.5 elif f <= -0.5: jd_i -= 1 f += 1.5 l = jd_i + 68569 n = int((4 * l) / 146097.0) l -= int(((146097 * n) + 3) / 4.0) i = int((4000 * (l + 1)) / 1461001) l -= int((1461 * i) / 4.0) - 31 j = int((80 * l) / 2447.0) day = l - int((2447 * j) / 80.0) l = int(j / 11.0) month = j + 2 - (12 * l) year = 100 * (n - 49) + i + l return int(year), int(month), int(day), f
f43a299fd8627804893eb5b6266d6a016c191d72
703,946
def get_policy_targets(context, presentation): """ Returns our target node templates and groups if we have them. """ node_templates = [] groups = [] our_targets = presentation.targets if our_targets: all_node_templates = \ context.presentation.get('service_template', 'topology_template', 'node_templates') \ or {} all_groups = \ context.presentation.get('service_template', 'topology_template', 'groups') \ or {} for our_target in our_targets: if our_target in all_node_templates: node_templates.append(all_node_templates[our_target]) elif our_target in all_groups: groups.append(all_groups[our_target]) return node_templates, groups
f483b9749c25b7d56c0e0a02a6787d936782e470
703,948
import torch def isPD(B): """Check whether a matrix is positive definite. Args: B ([torch.Tensor]): [Input matrix.] Returns: [bool]: [Returns True if matrix is positive definite, otherwise False.] """ try: _ = torch.cholesky(B) return True except RuntimeError: return False
c51dc4f6f48ac7417f49ef41b81f3b04816b9279
703,950
def convert_TriMap_to_SelectedLEDs( best_led_config ): """ Returns a lookup dict of the selected LEDs. """ d = {} for tri_num in best_led_config: for led_num in best_led_config[tri_num]: d[led_num] = True return d
521a1be0d11cb8198944e437d20d4ac0349c8856
703,951
def get_table_id(table): """ Returns id column of the cdm table :param table: cdm table name :return: id column name for the table """ return table + '_id'
33fd8f445f15fb7e7c22535a31249abf6f0c819b
703,954
from typing import Sequence from typing import Hashable def all_items_present(sequence: Sequence[Hashable], values: Sequence[Hashable]) -> bool: """ Check whether all provided `values` are present at any index in the provided `sequence`. Arguments: sequence: An iterable of Hashable values to check for values in. values: An iterable of Hashable values for whose presence to check `sequence` for. Returns: `True` if all `values` are present somewhere in `sequence`, else `False`. """ return all(k in sequence for k in values)
f43a881159ccf147d3bc22cfeb261620fff67d7a
703,955
def reorder(rules): """ Set in ascending order a list of rules, based on their score. """ return(sorted(rules, key = lambda x : x.score))
cf4ff3b8d8aacd5e868ee468b37071fed2c1d67e
703,956
import re def extract_floats(string): """Extract all real numbers from the string into a list (used to parse the CMI gateway's cgi output).""" return [float(t) for t in re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', string)]
0dc26261d45bd0974e925df5ed660a6e31adf30c
703,957
def binary(n, digits): """Returns a tuple of (digits) integers representing the integer (n) in binary. For example, binary(3,3) returns (0, 1, 1)""" t = [] for i in range(digits): n, r = divmod(n, 2) t.append(r) return tuple(reversed(t))
bc52a985b86954b1d23bb80a14c56b3e3dfb7c59
703,958
import re def _parse_uci_regression_dataset(name_str): """Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2. """ pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
dd2158e1a5ceeba25a088b07ff8064e8016ae551
703,966
import re def get_params(proto): """ get the list of parameters from a function prototype example: proto = "int main (int argc, char ** argv)" returns: ['int argc', 'char ** argv'] """ paramregex = re.compile('.*\((.*)\);') a = paramregex.findall(proto)[0].split(', ') #a = [i.replace('const ', '') for i in a] return a
37841b2503f53353fcbb881993e8b486c199ea58
703,967
import inspect def list_module_public_functions(mod, excepted=()): """ Build the list of all public functions of a module. Args: mod: Module to parse excepted: List of function names to not include. Default is none. Returns: List of public functions declared in this module """ return [t[1] for t in inspect.getmembers(mod, inspect.isfunction) if not t[0].startswith('_') and inspect.getmodule(t[1]) == mod and not t[0] in excepted]
d27dc869cf12701bcb7d2406d60a51a8539a9e1b
703,968
def strRT(R, T): """Returns a string for a rotation/translation pair in a readable form. """ x = "[%6.3f %6.3f %6.3f %6.3f]\n" % ( R[0,0], R[0,1], R[0,2], T[0]) x += "[%6.3f %6.3f %6.3f %6.3f]\n" % ( R[1,0], R[1,1], R[1,2], T[1]) x += "[%6.3f %6.3f %6.3f %6.3f]\n" % ( R[2,0], R[2,1], R[2,2], T[2]) return x
2d7ec1bf2ebd5a03472b7b6155ed43fdcc71f76a
703,971
def general_value(value): """Checks if value is generally valid Returns: 200 if ok, 700 if ',' in value, 701 if '\n' in value""" if ',' in value: return 700 elif '\n' in value: return 701 else: return 200
5cf8388294cae31ca70ce528b38ca78cdfd85c2c
703,973
def padZeros(numberString, numZeros, insertSide): """Return a string padded with zeros on the left or right side.""" if insertSide == 'left': return '0' * numZeros + numberString elif insertSide == 'right': return numberString + '0' * numZeros
d0c2d08a392e4792b13a64d076c8fb6aff1572cb
703,982
def _resample_event_obs(obs, fx, obs_data): """ Resample the event observation. Parameters ---------- obs : datamodel.Observation The Observation being resampled. fx : datamodel.EventForecast The corresponding Forecast. obs_data : pd.Series Timeseries data of the event observation. Returns ------- obs_resampled : pandas.Series Timeseries data of the Observation resampled to match the Forecast. Raises ------ RuntimeError If the Forecast and Observation do not have the same interval length. """ if fx.interval_length != obs.interval_length: raise ValueError("Event observation and forecast time-series " "must have matching interval length.") else: obs_resampled = obs_data return obs_resampled
1c66ae124aaa2e732c7d0ec3e733ae2b5caaa6cb
703,985
def _arg_raw(dvi, delta): """Return *delta* without reading anything more from the dvi file""" return delta
041cfaaf23c6e229b60d5278e8cf27352e078a65
703,986
def _read_tmpfd(fil): """Read from a temporary file object Call this method only when nothing more will be written to the temporary file - i.e., all the writing has already been done. """ fil.seek(0) return fil.read()
08648325e7e0e9bcd543d3238cb4630ac284f6ed
703,988
def evens(input): """ Returns a list with only the even elements of data Example: evens([0, 1, 2, 3, 4]) returns [0,2,4] Parameter input: The data to process Precondition: input an iterable, each element an int """ result = [] for x in input: if x % 2 == 0: result.append(x) return result
8a219f8815d95a18bea148eaae117f3356a77d4b
703,989
def piece_not(piece: str) -> str: """ helper function to return the other game piece that is not the current game piece Preconditions: - piece in {'x', 'o'} >>> piece_not('x') 'o' >>> piece_not('o') 'x' """ return 'x' if piece == 'o' else 'o'
18bb3b45accf98d4f914e3f50372c4c083c1db4d
703,992
from typing import List from typing import Dict from typing import Tuple from typing import Callable def enum_options(values: List[Dict]) -> Tuple[List[str], int, Callable]: """Enumerate options of a enum parameter for display in a select box. Returns a 3-tuple containing a list of option value, the list index of the default option, and a function that provides a mapping from option values to their names (identifier). Parameters ---------- values: list of dict List of enumeration values from the parameter declaration. Returns ------- (list, int, callable) """ options = list() default_index = 0 mapping = dict() for i, obj in enumerate(values): identifier = obj['value'] options.append(identifier) mapping[identifier] = obj['name'] if obj.get('isDefault', False): default_index = i def mapfunc(value: str) -> str: """Mapping for option values to thier identifier.""" return mapping[value] return options, default_index, mapfunc
c1214f319847b40705f425e529420a5916debe6e
703,994
def find_elements(node, xpath, allow_zero=True, allow_multiple=True): """Attempt to find child elements in a node by xpath. Raise exceptions if conditions are violated. Return a (possibly empty) list of elements.""" all_elements = node.findall(xpath) if (len(all_elements) == 0 and not allow_zero) or (len(all_elements) > 1 and not allow_multiple): raise AssertionError(f'Found {len(all_elements)} instances of {xpath} in {node}, which is not allowed') return all_elements
2255fcd63f35837c647dd6dca81ab648d59addc8
703,996
def get_text(original, token, replace): """Convenience function for getting the text to use for a match when formatting. If ``replace`` is False, returns the part of ``original`` between ``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns ``token.text``. """ if replace: return token.text else: return original[token.startchar:token.endchar]
b0e2e53611cb5b26b04d0e8350f1ae88a6b56056
704,002
def calc_Ti(Te, Tg, n): """Calcuate the infectious period.""" return (Tg - Te) * 2.0 * n / (n + 1.0)
7ba0176b821032e4f69ff7fe368393c2773c7d0d
704,003
def _get_indent(line): """Return the indentation in front of a line""" indent = line.replace(line.lstrip(), '') return indent
6a5dd97d4c5702a55b8b1ddaad91c5ecb99458fa
704,004
def problems(mod, msg): """Ansible module exist with an error.""" return mod.exit_json(changed=False, failed=True, msg=msg)
3ec1a8c8843ba9b33d47e61d1b775aadb32ab45e
704,006
def to_sql_name(name): """ Ensure ``name`` is a valid SQL name. """ return name.lower().replace(' ', '_')
f3157d9444793d0af05e27317fdab2aa55531b84
704,007
def polygon_to_points( poly ): """ Plotting helper, which rearranges polygon vertices into lists of X and Y coordinates. The first point is duplicated at the end of each list, to make a closed path. :Parameters: poly: tuple of ((x1,y1),(x2,y2),...) The coordinates of the vertices of the polygon. :Returns: (xlist, ylist): list of 2 tuples ((x1, x2, ..., x1), (y1, y2, ..., y1)) """ xlist = [] ylist = [] for vertex in poly: xlist.append(vertex[0]) ylist.append(vertex[1]) xlist.append(xlist[0]) ylist.append(ylist[0]) return (xlist, ylist)
958621cb7ff2d4fe22c4482e07a6a7ba614f9fc1
704,009
import re def alphanumerical(string): """ A function to filter a string to only allow alphanumerical characters. """ pattern = re.compile('[^a-zA-Z0-9]+') return pattern.sub('', string)
3acd64f629e601c72421ac73955a61c1426d3a9d
704,010
def decode_dbkey(dbkey): """ Decodes dbkey and returns tuple ( username, dbkey )""" if isinstance(dbkey, str) and ':' in dbkey: return dbkey.split(':') else: return None, dbkey
de2a076b36e4ea82412478384b65845af225b1ab
704,014
def sum_of_vals(vals): """ :param vals: an iterable such as list :return: sum of the values from the iterable """ sum = 0 for val in vals: sum += val return sum
aecc28e505acbea02f5ec41aec03b2db7bc3baad
704,018
def time_sa_to_s(t, clock_freq): """ convert from time in samples to time in seconds """ return float(t / clock_freq)
eabf76cda8529dc9c9ad0acc6466c5037062b295
704,019
def kdcompare(r, p, depth): """ Returns the branch of searching on a k-d tree Input r: root p: point depth : starting depth of search Output A value of -1 (left branch), or 1 (right) """ k = len(p) dim = depth%k if p[dim] <= r.point[dim]: return -1 else: return 1
c11aa24718b8a2d8d9e39852ca53e09118d3c215
704,021
def _has_class(domElement, className): """ Helper function to test if the provided element has the provided class """ return className in domElement.get_attribute('class').split(" ")
9a20557cc8d3e3dc91ac33764a6d94139b70f6f2
704,022
def format_datestamp(datestamp): """Format datestamp to an OAI-PMH compliant format. Parameters ---------- datestamp: datetime.datetime A datestamp. Return ------ str: Formatted datestamp. """ return datestamp.strftime('%Y-%m-%dT%H:%M:%SZ')
f050dd4f18691034c0414a4d9fa51629b0208d6a
704,023
import base64 def encode_base64(data: bytes) -> bytes: """ Creates a url safe base64 representation of an input string, strips new lines.""" return base64.urlsafe_b64encode(data).replace(b"\n", b"")
472ff045dc1df4ad5fe2a0e3001477a4d1c738fd
704,025
def yellow_bold(payload): """ Format payload as yellow. """ return '\x1b[33;1m{0}\x1b[39;22m'.format(payload)
2ae528c9dcc5a4f9b5f685f201f4d6696387a256
704,026
def normalize(rendered): """Return the input string without non-functional spaces or newlines.""" out = ''.join([line.strip() for line in rendered.splitlines() if line.strip()]) out = out.replace(', ', ',') return out
02a87a7a5e596b45d15bb2559403e92cb69a2f1d
704,028
import aiohttp import asyncio async def test_download_speed(session: aiohttp.ClientSession, url: str) -> int: """Count the amount of data successfully downloaded.""" result = 0 try: async with session.get(url) as resp: while True: chunk = await resp.content.read(56) if not chunk: break else: result += len(chunk) except asyncio.CancelledError: pass finally: return result
c6ca9504f90cbb9091051931054f12f8498b8535
704,029
def compare_nodal_prices(df_dcopf, df_mppdc): """Find max absolute difference in nodal prices between DCOPF and MPPDC models Parameters ---------- df_dcopf : pandas DataFrame Results from DCOPF model df_mppdc : pandas DataFrame Results from MPPDC model Returns ------- max_price_difference : float Maximum difference between nodal prices for DCOPF and MPPDC models over all nodes and scenarios """ # DCOPF model # ----------- df_tmp_1 = df_dcopf.reset_index().copy() # Filter price records df_tmp_1 = df_tmp_1[df_tmp_1['index'].str.contains(r'\.POWER_BALANCE\[')] # Extract values df_tmp_1['Value'] = df_tmp_1.apply(lambda x: x['Constraint']['Dual'], axis=1) # Extract node and scenario IDs df_tmp_1['NODE_ID'] = df_tmp_1['index'].str.extract(r'\.POWER_BALANCE\[(\d+)\]').astype(int) df_tmp_1['SCENARIO_ID'] = df_tmp_1['SCENARIO_ID'].astype(int) # Prices at each node for each scenario df_dcopf_prices = df_tmp_1.set_index(['SCENARIO_ID', 'NODE_ID'])['Value'] # MPPDC model # ----------- df_tmp_2 = df_mppdc.reset_index().copy() # Filter price records df_tmp_2 = df_tmp_2[df_tmp_2['index'].str.contains(r'\.lambda_var\[')] # Extract values df_tmp_2['Value'] = df_tmp_2.apply(lambda x: x['Variable']['Value'], axis=1) # Extract node and scenario IDs df_tmp_2['NODE_ID'] = df_tmp_2['index'].str.extract(r'\.lambda_var\[(\d+)\]').astype(int) df_tmp_2['SCENARIO_ID'] = df_tmp_2['index'].str.extract(r'LL_DUAL\[(\d+)\]').astype(int) # Prices at each node for each scenario df_mppdc_prices = df_tmp_2.set_index(['SCENARIO_ID', 'NODE_ID'])['Value'] # Compute difference between models # --------------------------------- max_price_difference = df_dcopf_prices.subtract(df_mppdc_prices).abs().max() print('Maximum difference between nodal prices over all nodes and scenarios: {0}'.format(max_price_difference)) return max_price_difference
2368bb7f8534ac466ab7858fa1056e3fe5f48f16
704,030
def add(a, b): """A dummy function to add two variables""" return a + b
4914b8d73e6808d93e8e8ee98902ad3b093f1ce6
704,031
def truncate(s, eps): """ Find the smallest k such that sum(s[:k]**2) \geq 1-eps. """ mysum = 0.0 k=-1 while (mysum < 1-eps): k += 1 mysum += s[k]**2 return k+1
fc9b5984316e969961b496fd54425e4f52f025ff
704,032
def ObjectToDict(obj): """Converts an object into a dict.""" keys = [ k for k in dir(obj) if not k.startswith("__") ] return { k : getattr(obj, k) for k in keys }
f2679daab84d5cee2c7f319d1d34f3c669971cd6
704,043
def _unravel(nodes,tets,index): """Returns a list containing the node coordinates of the tet stored in the 'index' position in the 'tets' list.""" return [nodes[tets[index][0]],nodes[tets[index][1]],nodes[tets[index][2]],nodes[tets[index][3]]]
e8428de351db2a84a4875a81b47d07b03a67efd9
704,045
import types import asyncio def loop_apply_coroutine(loop, func: types.FunctionType, *args, **kwargs) -> object: """ Call a function with the supplied arguments. If the result is a coroutine, use the supplied loop to run it. """ if asyncio.iscoroutinefunction(func): future = asyncio.ensure_future( func(*args, **kwargs), loop=loop) loop.run_until_complete(future) return future.result() else: return func(*args, **kwargs)
d77a70540237f690e712e30b93b53b363907b678
704,049
def zipmap(keys, vals): """ Return a ``dict`` with the keys mapped to the corresponding ``vals``. """ return dict(zip(keys, vals))
a058e5a4e462416f48d83b3c288a0cd8d6b000ef
704,050
def maxsubarray(list): """ Naive approach to calculating max subarray Iterating all possible subarrays Complexity (n = list size) Time complexity: O(n^2) Space complexity: O(1) """ maxStart = 0 maxEnd = 0 maxSum = list[0] for i in range (len(list)): currentSum = 0 for j in range (i, len(list)): currentSum += list[j] if currentSum > maxSum: maxSum = currentSum maxStart = i maxEnd = j return (maxSum, maxStart, maxEnd)
71b4a12d02fab45fc14890ae4a34a0dc50d6a7b4
704,052
from typing import Sequence def is_sequence(obj): """Is the object a *non-str* sequence? Checking whether an object is a *non-string* sequence is a bit unwieldy. This makes it simple. """ return isinstance(obj, Sequence) and not isinstance(obj, str)
06129c6122fec0290edb34cadc75b68199738435
704,053
def getAxisList(var): """ Returns a list of coordinates from: var """ return [var.coords[key] for key in var.dims]
eae9b971bcbf021ef2203dd6cb21df6343d0f19a
704,054
def skip_nothing(name, dirpath): """Always returns :obj:`False`. """ return False
9e846b7060af43b2c4165e6530fcabc66415615b
704,056
def centtoinch(cents): """Cents to inch.""" return .3937*cents
517142a29242246721abd05638c8ecbefcd888cb
704,059
def get_top_element_count(mol, top): """ Returns the element count for the molecule considering only the atom indices in ``top``. Args: mol (Molecule): The molecule to consider. top (list): The atom indices to consider. Returns: dict: The element count, keys are tuples of (element symbol, isotope number), values are counts. """ if not isinstance(top, list): top = list(top) element_count = {} for i, atom in enumerate(mol.atoms): if i in top: key = (atom.element.symbol, atom.element.isotope) if key in element_count: element_count[key] += 1 else: element_count[key] = 1 return element_count
f7293c1d154346c955052ceff0ee59483538bdc3
704,060
def at_least_one_shift_each(cur_individual): """ checks if there is at least one of each shift: 01, 10, 11 """ num_entrega = 0 num_recogida = 0 num_dual = 0 while cur_individual: shift = cur_individual[:2] cur_individual = cur_individual[2:] if shift == '01': num_entrega += 1 elif shift == '10': num_recogida += 1 elif shift == '11': num_dual += 1 if num_entrega > 0 and num_recogida > 0 and num_dual > 0: return True return False
070fe16e779ab30bcee7873ec01876962f30ec91
704,062
from typing import Optional from typing import Callable from typing import Iterator from typing import Any import inspect def iterpoints(resp: dict, parser: Optional[Callable] = None) -> Iterator[Any]: """Iterates a response JSON yielding data point by point. Can be used with both regular and chunked responses. By default, returns just a plain list of values representing each point, without column names, or other metadata. In case a specific format is needed, an optional ``parser`` argument can be passed. ``parser`` is a function/callable that takes data point values and, optionally, a ``meta`` parameter containing which takes a dictionary containing all or a subset of the following: ``{'columns', 'name', 'tags', 'statement_id'}``. Sample parser functions: .. code:: python # Function optional meta argument def parser(*x, meta): return dict(zip(meta['columns'], x)) # Namedtuple (callable) from collections import namedtuple parser = namedtuple('MyPoint', ['col1', 'col2', 'col3']) :param resp: Dictionary containing parsed JSON (output from InfluxDBClient.query) :param parser: Optional parser function/callable :return: Generator object """ for statement in resp['results']: if 'series' not in statement: continue for series in statement['series']: if parser is None: return (x for x in series['values']) elif 'meta' in inspect.signature(parser).parameters: meta = {k: series[k] for k in series if k != 'values'} meta['statement_id'] = statement['statement_id'] return (parser(*x, meta=meta) for x in series['values']) else: return (parser(*x) for x in series['values']) return iter([])
000c2c873ab38378bb42945ed3304213b254061a
704,064
def convert_output_key(name): """ Convert output name into IE-like name :param name: output name to convert :return: IE-like output name """ if not isinstance(name, tuple): return name if len(name) != 2: raise Exception('stats name should be a string name or 2 elements tuple ' 'with string as the first item and port number the second') return '{}.{}'.format(*name)
d5c59766c615e0e7b45f173948692050a7b890e3
704,065
def in_polygon(point, polygon): """Simple wrapper on the within method of shapely points Params: point (POINT) a shapely point object polygon (POLYGON) a shapely polygon object (the target overlap area) Returns: (bool) whether or not the point is within polygon expressed as a boolean """ return point.within(polygon)
0a26d023672162a53affddfe23a89361d900d9a0
704,066
def set_spines(ax, plot_params): """ Sets spines of the shift graph to be invisible if chosen by the user Parameters ---------- ax: Matplotlib ax Current ax of the shift graph plot_parms: dict Dictionary of plotting parameters. Here `invisible_spines` is used """ spines = plot_params["invisible_spines"] if spines: for spine in spines: if spine in {"left", "right", "top", "bottom"}: ax.spines[spine].set_visible(False) else: print("invalid spine argument") return ax
4e74ce30f52d465e9470f608cd8c909dfae4d0a5
704,069
def matches(G, queue): """ If the sequence in 'queue' correspond to a node in 'G', return the sequence id, otherwise return False """ if not queue: return False seq = 0 for a in queue: try: seq = G[seq][a] except KeyError: return False return seq
859f43f6457b4add4cda88495a8dc6c4a559e5d5
704,071
def getPortList(chute): """ Get a list of ports to expose in the format expected by create_container. Uses the port binding dictionary from the chute host_config section. The keys are expected to be integers or strings in one of the following formats: "port" or "port/protocol". Example: port_bindings = { "1111/udp": 1111, "2222": 2222 } getPortList returns [(1111, 'udp'), (2222, 'tcp')] """ if not hasattr(chute, 'host_config') or chute.host_config == None: config = {} else: config = chute.host_config ports = [] for port in config.get('port_bindings', {}).keys(): if isinstance(port, int): ports.append((port, 'tcp')) continue parts = port.split('/') if len(parts) == 1: ports.append((int(parts[0]), 'tcp')) else: ports.append((int(parts[0]), parts[1])) # If the chute is configured to host a web service, check # whether there is already an entry in the list for the # web port. If not, we should add one. web_port = chute.getWebPort() if web_port is not None: if not any(p[0] == web_port for p in ports): ports.append((web_port, 'tcp')) return ports
eb97568befca72f6d9cb6c455d8d9ad03b13eebf
704,072
import re def camelcase_to_snakecase(value: str) -> str: """ Convert a string from snake_case to camelCase. >>> camelcase_to_snakecase('') '' >>> camelcase_to_snakecase('foo') 'foo' >>> camelcase_to_snakecase('fooBarBaz') 'foo_bar_baz' >>> camelcase_to_snakecase('foo_bar_baz') 'foo_bar_baz' >>> camelcase_to_snakecase('_fooBarBaz') '_foo_bar_baz' >>> camelcase_to_snakecase('__fooBarBaz_') '__foo_bar_baz_' """ value = re.sub(r"[\-\.\s]", "_", value) if not value: return value return value[0].lower() + re.sub( r"[A-Z]", lambda matched: "_" + matched.group(0).lower(), value[1:] )
05fe02739e8152bc64ab35bd842162b5d7c3ab4c
704,075
import uuid def generate_unique_str(allow_dashes=True): """ Generate unique string using uuid package Args: allow_dashes (bool, optional): If true use uuid4() otherwise use hex that will skip dash in names. Defaults to True. """ if allow_dashes: unique_str = str(uuid.uuid4()) else: unique_str = uuid.uuid4().hex return unique_str
9a08364837ea719454b885fcb344631005e7a610
704,079
def remove_batch_from_layout(layout): """ The tf-mesh layout splits across batch size, remove it. Useful for prediction steps, when you no longer want large batches. :param layout: string describing tf-mesh layout :return: layout minus batch dimension """ layout = layout.split(',') ret_layout = "" for i in layout: if "batch" in i: pass else: ret_layout += f"{i}," return ret_layout[:-1]
44d032504055e1133a6dc97ea040ff44ea2ac327
704,081
def _knapsack01_recur(val, wt, wt_cap, n): """0-1 Knapsack Problem by naive recursion. Time complexity: O(2^n), where n is the number of items. Space complexity: O(n). """ if n < 0 or wt_cap == 0: return 0 if wt[n] > wt_cap: # Cannot be put. max_val = _knapsack01_recur(val, wt, wt_cap, n - 1) else: # Can be put: to put or not to put. val_in = val[n] + _knapsack01_recur(val, wt, wt_cap - wt[n], n - 1) val_ex = _knapsack01_recur(val, wt, wt_cap, n - 1) max_val = max(val_in, val_ex) return max_val
88f73b2e2f577b5e17a4ba235699ad542dfc7f0d
704,082
def auc_step(X, Y): """Compute area under curve using step function (in 'post' mode).""" if len(X) != len(Y): raise ValueError( "The length of X and Y should be equal but got " + "{} and {} !".format(len(X), len(Y)) ) area = 0 for i in range(len(X) - 1): delta_X = X[i + 1] - X[i] area += delta_X * Y[i] return area
886f410a35a49a7098c1f2dcd145b54d1b54d423
704,084
def calculate_num_modules(slot_map): """ Reads the slot map and counts the number of modules we have in total :param slot_map: The Slot map containing the number of modules. :return: The number of modules counted in the config. """ return sum([len(v) for v in slot_map.values()])
efbb82a54843f093a5527ebb6a1d4c4b75668ebb
704,085
import filecmp def files_differ(path_a, path_b): """ True if the files at `path_a` and `path_b` have different content. """ return not filecmp.cmp(path_a, path_b)
ea0382e619228cd0fc042a9003c34f33bd53f313
704,089
def m2(topic_srs, topic_vol, sharpe, ref_vol, cum=False, annual_factor=1): """Calcs m2 return which is a port to mkt vol adjusted return measure. The Sharpe ratio can be difficult to interpret since it's a ratio, so M2 converts a Sharpe to a return number. Args: topic_srs (Pandas DataFrame of float): The series of interest. topic_vol (Pandas DataFrame of float): The volatility of the topic series. sharpe (Pandas DataFrame of float): The Sharpe ratio of the topic. ref_vol (Pandas DataFrame of float): The reference series' volatility. The M2 return calculated with be comparable to this reference series' return. cum (bool, optional): Boolean flag to inidicate calculating a cumulative value. (default is False) annual_factor (float, optional): The factor used to annualize the M2 value. (default is 1) Returns: Pantas DataFrame of float: M2 return. """ return (topic_srs + (sharpe * (ref_vol - topic_vol))) * annual_factor
8b05b0419db895d1de756cfb8751b9311cd43eca
704,090
def find_largest(line: str) -> int: """Return the largest value in line, which is a whitespace-delimited string of integers that each end with a '.'. >>> find_largest('1. 3. 2. 5. 2.') 5 """ # The largest value seen so far. largest = -1 for value in line.split(): # Remove the trailing period. v = int(value[:-1]) # If we find a larger value, remember it. if v > largest: largest = v return largest
95ceb1e79812e9ef9c7338f393e9d22224eb5a03
704,093
def strify(iterable_struct, delimiter=','): """ Convert an iterable structure to comma separated string. :param iterable_struct: an iterable structure :param delimiter: separated character, default comma :return: a string with delimiter separated """ return delimiter.join(map(str, iterable_struct))
3c8337691c9008449a86e1805fe703d6da73a523
704,098
def generate_pattern_eq_ipv4(value): """ makes a pattern to check an ip address """ return "ipv4-addr:value = '" + value + "'"
36b4a09363512709c3bdf8046ea52f8ba14aa8e7
704,099
import torch def undo_imagenet_preprocess(image): """ Undo imagenet preprocessing Input: - image (pytorch tensor): image after imagenet preprocessing in CPU, shape = (3, 224, 224) Output: - undo_image (pytorch tensor): pixel values in [0, 1] """ mean = torch.Tensor([0.485, 0.456, 0.406]).view((3, 1, 1)) std = torch.Tensor([0.229, 0.224, 0.225]).view((3, 1, 1)) undo_image = image * std undo_image += mean return undo_image
57d4cfc365c4e6c2dcfd37c8a2c500465daa421a
704,102
def collapse(html): """Remove any indentation and newlines from the html.""" return ''.join([line.strip() for line in html.split('\n')]).strip()
a5a55691f2f51401dbd8b933562266cbed90c63d
704,103
import pytz def localtime(utc_dt, tz_str): """ Convert utc datetime to local timezone datetime :param utc_dt: datetime, utc :param tz_str: str, pytz e.g. 'US/Eastern' :return: datetime, in timezone of tz """ tz = pytz.timezone(tz_str) local_dt = tz.normalize(utc_dt.astimezone(tz)) return local_dt
f48844c72895813fdcd3913cfe7de0e6f6d0ac3c
704,107
def HTTP405(environ, start_response): """ HTTP 405 Response """ start_response('405 METHOD NOT ALLOWED', [('Content-Type', 'text/plain')]) return ['']
f07522ac904ec5ab1367ef42eb5afe8a2f0d1fce
704,110
def get_percentage(numerator, denominator, precision = 2): """ Return a percentage value with the specified precision. """ return round(float(numerator) / float(denominator) * 100, precision)
7104f6bf2d88f9081913ec3fbae596254cdcc878
704,114
def _calculate_verification_code(hash: bytes) -> int: """ Verification code is a 4-digit number used in mobile authentication and mobile signing linked with the hash value to be signed. See https://github.com/SK-EID/MID#241-verification-code-calculation-algorithm """ return ((0xFC & hash[0]) << 5) | (hash[-1] & 0x7F)
173f9653f9914672160fb263a04fff7130ddf687
704,115
def suggest_parameters_DRE_NMNIST(trial, list_lr, list_bs, list_opt, list_wd, list_multLam, list_order): """ Suggest hyperparameters. Args: trial: A trial object for optuna optimization. list_lr: A list of floats. Candidates of learning rates. list_bs: A list of ints. Candidates of batch sizes. list_opt: A list of strings. Candidates of optimizers. list_wd: A list of floats. weight decay list_multLam: A list of floats. Prefactor of the second term of BARR. list_order: A list of integers. Order of SPRT-TANDEM. Returns: learning_rate: A float. batch_size: An int. name_optimizer: A string. weight_decay: A float. param_multLam: A float. order_sprt: An int. """ # load yaml interprrets, e.g., 1e-2 as a string... for iter_idx in range(len(list_lr)): list_lr[iter_idx] = float(list_lr[iter_idx]) learning_rate = trial.suggest_categorical('learning_rate', list_lr) batch_size = trial.suggest_categorical('batch_size', list_bs) name_optimizer = trial.suggest_categorical('optimizer', list_opt) weight_decay = trial.suggest_categorical('weight_decay', list_wd) param_multLam = trial.suggest_categorical('param_multLam', list_multLam) order_sprt = trial.suggest_categorical('order_sprt', list_order) return learning_rate, batch_size, name_optimizer,\ weight_decay, param_multLam, order_sprt
627855f5fe8fd15d43cc7c8ca3da22b704b5907e
704,119
def _contains_atom(example, atoms, get_atoms_fn): """Returns True if example contains any atom in atoms.""" example_atoms = get_atoms_fn(example) for example_atom in example_atoms: if example_atom in atoms: return True return False
c9e60d956585c185f9fb62cc0d11f169e6b79f88
704,121
def get_diff_level(files): """Return the lowest hierarchical file parts level at which there are differences among file paths.""" for i, parts in enumerate(zip(*[f.parts for f in files])): if len(set(parts)) > 1: return i
c9c3f774712684c6817c8bb5b3bf9c101e1df8fa
704,122
def get_max(list_tuples): """ Returns from a list a tuple which has the highest value as first element. If empty, it returns -2's """ if len(list_tuples) == 0: return (-2, -2, -2, -2) # evaluate the max result found = max(tup[0] for tup in list_tuples) for result in list_tuples: if result[0] == found: return result
91c662d5865de346a1ac73025ced78a996077111
704,123
import six def remove_nulls_from_dict(d): """ remove_nulls_from_dict function recursively remove empty or null values from dictionary and embedded lists of dictionaries """ if isinstance(d, dict): return {k: remove_nulls_from_dict(v) for k, v in six.iteritems(d) if v} if isinstance(d, list): return [remove_nulls_from_dict(entry) for entry in d if entry] else: return d
dd0da02eae06ceccc1347e6ac87dcb65bdc44126
704,125
def safe_divide(num, denom): """Divides the two numbers, avoiding ZeroDivisionError. Args: num: numerator denom: demoninator Returns: the quotient, or 0 if the demoninator is 0 """ try: return num / denom except ZeroDivisionError: return 0
144cf6bf8b53ab43f3ab2e16e7dd2c95f5408035
704,130
def convert_units(P, In='cm', Out='m'): """ Quickly convert distance units between meters, centimeters and millimeters """ c = {'m':{'mm':1000.,'cm':100.,'m':1.}, 'cm':{'mm':10.,'cm':1.,'m':0.01}, 'mm':{'mm':1.,'cm':0.1,'m':0.001}} return c[In][Out]*P
bc318011ffc71d575c7e7276c2dede467a84dc2c
704,132
def F16(x): """Rosenbrock function""" sum = 0 for i in range(len(x)-1): sum += 100*(x[i+1]-x[i]**2)**2+(x[i]+1)**2 return sum
7421ad45568a8b86aff41fc5c8466ae6ce7aeb9d
704,134
def div_up(a, b): """Return the upper bound of a divide operation.""" return (a + b - 1) // b
e297f2d08972ebc667d1f3eadca25ef885ef5453
704,137
def solar_elevation_angle(solar_zenith_angle): """Returns Solar Angle in Degrees, with Solar Zenith Angle, solar_zenith_angle.""" solar_elevation_angle = 90 - solar_zenith_angle return solar_elevation_angle
f896c5d0608171f3e5bd37cede1965fe57846d07
704,139
def parse_ucsc_file_index(stream, base_url): """Turn a UCSC DCC files.txt index into a dictionary of name-value pairs """ file_index = {} for line in stream: filename, attribute_line = line.split('\t') filename = base_url + filename attributes = {} for assignment in attribute_line.split(';'): name, value = assignment.split('=') attributes[name.strip()] = value.strip() file_index[filename] = attributes return file_index
2d74bae9c7f2584ff8d859c8d2781faa3f6631b5
704,142
def update_internal_subnets( self, ipv4_list: list = [ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "224.0.0.0/4", ], ipv6_list: list = [], segment_ipv4_list: list = [], non_default_routes: bool = False, ) -> bool: """Update the list of internal subnets to use to classify internet traffic. .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - internalSubnets - POST - /gms/internalSubnets2 Any traffic not matching the internal subnets will be classified as internet traffic. This list will be pushed to all appliances. User can configure up to 512 subnets in each ipv4 and ipv6 entry. .. warning:: This will overwrite current subnets! :param ipv4_list: List of ipv4 networks in CIDR format for all VRFs, defaults to ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "169.254.0.0/16", "224.0.0.0/4"] :type ipv4_list: list, optional :param ipv6_list: List of ipv6 networks in CIDR format, defaults to [] :type ipv6_list: list, optional :param segment_ipv4_list: List of ipv4 networks each prefaced with related VRF id #, e.g. For VRF 1 only ["1:192.168.0.0/16"], defaults to [] :type segment_ipv4_list: list, optional :param non_default_routes: Treat non-default routes as internal subnets, defaults to False :param non_default_routes: bool, optional :return: Returns True/False based on successful call. :rtype: bool """ data = { "ipv4": ipv4_list, "ipv6": ipv6_list, "segmentIpv4": segment_ipv4_list, "nonDefaultRoutes": non_default_routes, } return self._post( "/gms/internalSubnets2", data=data, expected_status=[204], return_type="bool", )
ce1a11f2cbdb01c81fb01a13ba3d73c7ce5d0cf6
704,143
from operator import add def expand_to_point(b1, p1): """ Expand bbox b1 to contain p1: [(x,y),(x,y)] """ for p in p1: b1 = add(b1, (p[0], p[1], p[0], p[1])) return b1
5a79646403f7f9c2397aadb4f1826d8309eb8dcb
704,145
def format_fields(field_data, include_empty=True): """Format field labels and values. Parameters ---------- field_data : |list| of |tuple| 2-tuples of field labels and values. include_empty : |bool|, optional Whether fields whose values are |None| or an empty |str| should be included in the formatted fields. Returns ------- str Formatted field labels and values. Examples -------- >>> field_data = [('Name', 'Jane'), ('Age', 30), ('DOB', None)] >>> format_fields(field_data, include_empty=True) Name: Jane Age: 30 DOB: None >>> field_data = [('Name', 'Jane'), ('Age', 30), ('DOB', None)] >>> format_fields(field_data, include_empty=False) Name: Jane Age: 30 """ max_label = 0 for (label, value) in field_data: label_length = len(label) if label_length > max_label: max_label = label_length fields = [] for (label, value) in field_data: empty = str(value).strip() in ['', 'None'] if not empty or include_empty: label_length = len(label.strip()) extra_spaces = ' ' * (max_label - label_length) label_sep = ':' + extra_spaces + ' ' joined_field = label_sep.join([label, str(value)]) fields.append(joined_field) return '\n'.join(fields)
cce4b5279e01c33fec0f83c6f86141c33012fc4c
704,155
import re def only_bf(txt): """ Strip a string from all characters, except brainfuck chars """ return re.sub(r"[^\.,<>\+-\]\[]", "", txt)
8c32b11d511f5c7b92d7454dcbfea09627ddf172
704,156
import re import string def validate_word(word, text): """Check if something is a valid "word" submission with previous existing text. Return (valid, formatted_word, message), where valid is a boolean, formatted_word is the word ready to be added to existing text (adding a space if applicable for example), and message is an error message if the word was not valid. It can be a word, or ?, !, . for now. Can make it a little more complicated later.""" if not text: if re.fullmatch("[a-zA-Z']+", word): return (True, string.capwords(word), "") else: return (False, "", "Story must begin with a word") if word == "": return (False, "", "You have to write something!") if re.fullmatch("[a-zA-Z']+", word): if text[-1] in ["?", ".", "!", "\n"]: return (True, (' ' + string.capwords(word)), "") else: return (True, (' ' + word), "") if re.fullmatch("\-[a-zA-Z']+", word): if not text[-1].isalpha(): return (False, "", "You can only hyphenate after a word.") if re.search("\-'", word): return(False, "", "An apostrophe cannot directly follow a hyphen.") else: return (True, word, "") if re.search(",", word): if re.fullmatch(", [a-zA-Z']+", word): if text[-1].isalpha(): return (True, word, "") else: return (False, "", "A comma can only come after a word.") else: return (False, "", "Invalid comma use.") if word in ["?", ".", "!"]: if text[-1].isalpha(): return (True, word, "") else: return (False, "", "Sentence-ending punctuation can only go after a word.") if " " in word: return (False, "", "Word cannot contain spaces except after a comma.") else: return (False, "", "Not a valid word for some reason (disallowed characters?)")
658873c8cbf446cbe53ec5f806db668ceecaa2cf
704,158
def process_content_updates(results): """Process Content Updates Args: results (Element): XML results from firewall Returns: max_app_version (str): A string containing the latest App-ID version """ app_version_list = [] version_list = results.findall('./result/content-updates/entry') for version in version_list: app_version = version.find('./version').text app_version_list.append(app_version) max_app_version = max(app_version_list) return max_app_version
021c9ac9246034874a1fe274fb49aabfa0f15d61
704,159
def inverso(x): """ El inverso de un número. .. math:: \\frac{1}{x} Args: x (float): Número a invertir. Returns: float: El inverso. """ return 1 / x
16f2cb9466efa661d3ee8b10b6a0d637273f6b7c
704,161
def galeshapley(suitor_pref_dict, reviewer_pref_dict, max_iteration): """ The Gale-Shapley algorithm. This is known to provide a unique, stable suitor-optimal matching. The algorithm is as follows: (1) Assign all suitors and reviewers to be unmatched. (2) Take any unmatched suitor, s, and their most preferred reviewer, r. - If r is unmatched, match s to r. - Else, if r is matched, consider their current partner, r_partner. - If r prefers s to r_partner, unmatch r_partner from r and match s to r. - Else, leave s unmatched and remove r from their preference list. (3) Go to (2) until all suitors are matched, then end. Parameters ---------- suitor_pref_dict : dict A dictionary with suitors as keys and their respective preference lists as values reviewer_pref_dict : dict A dictionary with reviewers as keys and their respective preference lists as values max_iteration : int An integer as the maximum iterations Returns ------- matching : dict The suitor-optimal (stable) matching with suitors as keys and the reviewer they are matched with as values """ suitors = list(suitor_pref_dict.keys()) matching = dict() rev_matching = dict() for i in range(max_iteration): if len(suitors) <= 0: break for s in suitors: r = suitor_pref_dict[s][0] if r not in matching.values(): matching[s] = r rev_matching[r] = s else: r_partner = rev_matching.get(r) if reviewer_pref_dict[r].index(s) < reviewer_pref_dict[r].index(r_partner): del matching[r_partner] matching[s] = r rev_matching[r] = s else: suitor_pref_dict[s].remove(r) suitors = list(set(suitor_pref_dict.keys()) - set(matching.keys())) return matching
5b52cb165d15a0992b58c38958daf222d8d642cd
704,166
from pathlib import Path def create_upload_file(tmp_path): """Create temporary text file for upload.""" file_path = Path(tmp_path, "test_upload_1.txt") with open(file_path, "w") as f: f.write("Hello World") return file_path
50b707f59736ae1b1e06018aedec451b578eafc8
704,168
def get_path_up_down(path_source, path_target): """paths for up/down NOTE: both lists always show the LOWER level element even for the so for path up, it shows the source, for path down the target! Args: path_source(list) path_target(list) """ # find common part of path path_shared = [] for pu, pd in zip(path_source, path_target): if pu != pd: break path_shared.append(pu) n = len(path_shared) # root is always shared peak = path_shared[-1] path_down = path_target[n:] path_up = list(reversed(path_source[n:])) return path_up, peak, path_down
ba4719b42e0703ea0ac885de29b36466b7eb3676
704,169