content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def clean_dict(dictionary: dict) -> dict: """Recursively removes `None` values from `dictionary` Args: dictionary (dict): subject dictionary Returns: dict: dictionary without None values """ for key, value in list(dictionary.items()): if isinstance(value, dict): clean_dict(value) elif value is None: dictionary.pop(key) return dictionary
3968b6d354116cca299a01bf2c61d7b2d9610da9
703,773
def tag_tranfsers(df): """Tag txns with description indicating tranfser payment.""" df = df.copy() tfr_strings = [' ft', ' trf', 'xfer', 'transfer'] exclude = ['fee', 'interest'] mask = (df.transaction_description.str.contains('|'.join(tfr_strings)) & ~df.transaction_description.str.contains('|'.join(exclude))) df.loc[mask, 'tag'] = 'transfers' return df
4fdfd775ec423418370776c34fac809a513f91b5
703,774
def remove_dead_exceptions(graph): """Exceptions can be removed if they are unreachable""" def issubclassofmember(cls, seq): for member in seq: if member and issubclass(cls, member): return True return False for block in list(graph.iterblocks()): if not block.canraise: continue exits = [] seen = [] for link in block.exits: case = link.exitcase # check whether exceptions are shadowed if issubclassofmember(case, seen): continue # see if the previous case can be merged while len(exits) > 1: prev = exits[-1] if not (issubclass(prev.exitcase, link.exitcase) and prev.target is link.target and prev.args == link.args): break exits.pop() exits.append(link) seen.append(case) block.recloseblock(*exits)
fc0c810eef726f0979678e3003051c99775a981d
703,775
import os def get_wiki_img(): """ Returns a path to local image. """ this = os.path.dirname(__file__) img = os.path.join(this, "wiki.png") if not os.path.exists(img): raise FileNotFoundError("Unable to find '{}'.".format(img)) return img
ef522391665830019f7b48f545291d81b528bd45
703,777
import os def establecer_destino_archivo_imagen(instance, filename): """ Establece la ruta de destino para el archivo de imagen cargado a la instancia. """ # Almacena el archivo en: # 'app_reservas/contingencia/<id_imagen>' ruta_archivos_ubicacion = 'app_reservas/contingencia/' filename = '{0!s}_{1!s}'.format(instance.id, filename) return os.path.join(ruta_archivos_ubicacion, filename)
13e233d113ac3232a6e76725b13ef2befcd47feb
703,778
def have_same_SNP_order(dict_A, dict_B): """ Checks if two dictionaries have the same SNP order. """ have_same_order = [k for k in dict_A.keys() if k != "ext"] == [k for k in dict_B.keys() if k != "ext"] return have_same_order
b885dee561e9a61bb50e814401ee088593c2517b
703,779
def define_plot_id(plot_name, plot_center): """Define plot id, keeping track of coordinates.""" plot_id = f"{plot_name}_X{int(plot_center[0])}_Y{int(plot_center[1])}" return plot_id
8f239a121598157c620ee8eef902e1d89218d01e
703,780
def calculate_mean(i, peaklocationstart, peaklocationend): """ This function is for calculating the mean over the specified area and returns this mean. """ length = peaklocationend - peaklocationstart mean = 0 if i: for interval in i: if interval[0] < peaklocationstart and interval[1] > peaklocationend: interval_length = peaklocationend - peaklocationstart else: if interval[1] > peaklocationend: interval_length = peaklocationend - interval[0] elif interval[0] < peaklocationstart: interval_length = interval[1] - peaklocationstart else: interval_length = interval[1] - interval[0] mean += interval_length * interval[2] mean = mean / length return mean
fe57d6ab202c9c7da9894fd0d5aaab2a18ff113e
703,781
def swag(print_swag=True): """Swag!""" output = (""" ( ( ( )⧹ ))⧹ ) . ) ( )⧹ ) ( ( (()/(()/` ) /( )⧹ (()/(( )⧹))( . /(_)/(_)( )(_)) (((_) /(_))⧹ ((_)()⧹ ) (_))(_))(_(_()) )⧹___(_))((_)_(())⧹_)() | _ |_ _|_ _| (/ __| _ | __⧹ ⧹((_)/ / | _/| | | | | (__| | _| ⧹ ⧹/⧹/ / |_| |___| |_| ⧹___|_|_|___| ⧹_/⧹_/ ~ take a REST, and top-up your Fuel ~ """) if print_swag: print(output) return output
c75d804e331f61ca4b779a7a05bd0d42298b0dec
703,782
def afwd(data): """ AFWD - Request 4WD status """ return data
47823c8e8d306c8fc84eea20ad703bc997498049
703,785
def centroid_points_xy(points): """Compute the centroid of a set of points lying in the XY-plane. Warning ------- Duplicate points are **NOT** removed. If there are duplicates in the sequence, they should be there intentionally. Parameters ---------- points : list of list A sequence of points represented by their XY(Z) coordinates. Returns ------- list XYZ coordinates of the centroid (Z = 0.0). Examples -------- >>> centroid_points_xy() """ p = len(points) x, y = list(zip(*points))[:2] return [sum(x) / p, sum(y) / p, 0.0]
8da6c4154ff0632942108f9b63240c86438f3eb4
703,786
def should_force_reinit(config): """Configs older than 2.0.0 should be replaced""" ver = config.get("cli_version", "0.0.0") return int(ver.split(".")[0]) < 2
12b704fe2f3d2ef7cedf497a4a2e3a92321f52b2
703,787
def check_tag_list(tag_list): """ Makes a list of any tags entered on the command line. """ tags = [] if len(tag_list) != 0: tags = tag_list[0].split(",") return tags
24fb7d5c1f408ef9825a8efb461dc88483dcba9e
703,788
def remove_leading_character(string, character): """ If "string" starts with "character", strip that leading character away. Only removes the first instance :param string: :param character: :return: String without the specified, leading character """ if string.startswith(character): return string[1:] else: return string
4af4d6f86b9a6ed8975c4564904d2e1ca9e6d15a
703,789
def _rfind(lst, item): """ Returns the index of the last occurance of <item> in <lst>. Returns -1 if <item> is not in <l>. ex: _rfind([1,2,1,2], 1) == 2 """ try: return (len(lst) - 1) - lst[::-1].index(item) except ValueError: return -1
ab165a6795b0a495d24288d8e757c16ba9c968a4
703,790
def map_box(sbox, dbox, v): """ sbox is (lat1, lat2, long1, long2), dbox is (x1, x2, y1, y2), v is (lat, long). result is (x, y) """ xscale = abs(dbox[1]-dbox[0])/abs(sbox[3]-sbox[2]) yscale = abs(dbox[3]-dbox[2])/abs(sbox[1]-sbox[0]) x = (v[1]-sbox[2]+dbox[0])*xscale y = (v[0]-sbox[0]+dbox[2])*yscale return x,y
de1cb095d03eacc4930f37a05c5b01ebd983baca
703,791
def recognize_po_file(filename: str) -> bool: """ Recognize .po file """ if filename.endswith(".po"): return True return False
9993e1d0f1a45f1ce60709650a7381df00ebdce0
703,792
def multiply_nums(n1, n2): """Function to multiplies two numbers. n1 : Must be a numeric type n2 : Must be a numeric type """ result = n1 * n2 return result
05549b4780fec2e2ba14719ebbb93e45491710e7
703,793
def up_to(s, i, c): """Il faudrait commenter, je ne sais plus ce que ça fait.""" t = [] k = i while s[k] != c: t.append(s[k]) k += 1 return "".join(t)
ec4ffc90949b3b66c74c52a0019981564ace8aef
703,794
def main(): """CollatzPy CLI""" return 0
51f497ce25f4e90da4f0178510ba25d568edfc0b
703,795
import zipfile import json def _GetVersionFromCrx(crx_path): """Retrieves extension version from CRX archive. Args: crx_path: path to CRX archive to extract version from. """ with zipfile.ZipFile(crx_path, 'r') as crx_zip: manifest_contents = crx_zip.read('manifest.json') version = json.loads(manifest_contents)['version'] return version
e6b612f94b0fa4e62f5ecb8df297a6255294ec5f
703,796
import hashlib def safe_md5(open_file, block_size=2**20): """Computes an md5 sum without loading the file into memory This method is based on the answers given in: http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python """ md5 = hashlib.md5() data = True while data: data = open_file.read(block_size) if data: md5.update(data) return md5
3711eba8479fabc69f30063cfc6fb585345bab66
703,797
import random def generator_order_code(sex=None): """ 生成顺序码 :param sex: :return: """ order_code = random.randint(101, 1000) if sex == 1: order_code = order_code - 1 if order_code % 2 == 0 else order_code if sex == 0: order_code = order_code if order_code % 2 == 0 else order_code - 1 return str(order_code)
52be4dd5d9a40d261511adbe57271824a09592bf
703,798
def get_licence(html): """ Searches the HTML content for a mention of a CC licence. """ if "creative-commons" in html or "Creative Commons" in html: licence = "CC" else: licence = "N/A" return licence
07dcd2439455fd23b034e11204d2a474c9502cdf
703,799
def sum_temp(s, n): """ :param s: int, sum of total value of temperature :param n: int, latest information to add up :return: s + n """ s += n return s
4dc7da032fd91da86d73bf545fdf527497c12cd5
703,800
def mock_random_choice(seq): """Always returns first element from the sequence.""" # We could try to mock a particular |seq| to be a list with a single element, # but it does not work well, as random_choice returns a 'mock.mock.MagicMock' # object that behaves differently from the actual type of |seq[0]|. return seq[0]
a889c7ca32b6d494493000134c1d9d26fe5e97c3
703,801
def V_6_3_3(b, h0, ft): """ 不配置箍筋和弯起钢筋的一般板类受弯构件斜截面承载力 """ if h0<800: beta_h = 1 elif h0<2000: beta_h = (800/h0)**0.25 else: beta_h = (800/2000)**0.25 return 0.7*beta_h*ft*b*h0
79e9d97fcc755ed163cedb5f775d07abb6c26595
703,802
def get_lsf_grid_name(fibre_number): """ Return the appropriate LSF name (a, b, c, or d) to use, given a mean fiber number. :param fiber_number: The mean fiber number of observations. :returns: A one-length string describing which LSF grid to use ('a', 'b', 'c', or 'd'). """ if 50 >= fibre_number >= 1: return "d" if 145 >= fibre_number > 50: return "c" if 245 >= fibre_number > 145: return "b" if 300 >= fibre_number > 245: return "a"
009b20027f895e19c5b6cabb4476cf41a222e465
703,803
def deprecated(func): """Print a deprecation warning once on first use of the function. >>> @deprecated # doctest: +SKIP ... def f(): ... pass >>> f() # doctest: +SKIP f is deprecated """ count = [0] def wrapper(*args, **kwargs): count[0] += 1 if count[0] == 1: print(func.__name__, 'is deprecated') return func(*args, **kwargs) return wrapper
882b26592fa620be65eb7e1306abdf1d138ca022
703,804
import logging import json def rest_error_message(error, jid): """Returns exception error message as valid JSON string to caller :param error: Exception, error message :param jid: string, job ID :return: JSON string """ logging.exception(error) e = str(error) return json.dumps({'user_id': 'admin', 'result': {'error': e}, '_id': jid})
7422c77be37ed473ed15acc5fdfae9e85ff90812
703,805
def Dict(val): """ Build a dict for key/value pairs. """ return dict(val)
47864a91183070a7f8ce285e330d1278828b8352
703,806
def _ww3_ounf_contents(run_date, run_type): """ :param run_date: :py:class:`arrow.Arrow` :param str run_type: :return: ww3_ounf.inp file contents :rtype: str """ start_date = ( run_date.format("YYYYMMDD") if run_type == "nowcast" else run_date.shift(days=+1).format("YYYYMMDD") ) run_hours = {"nowcast": 24, "forecast": 36, "forecast2": 30} output_interval = 1800 # seconds output_count = int(run_hours[run_type] * 60 * 60 / output_interval) contents = f"""$ WAVEWATCH III NETCDF Grid output post-processing $ $ First output time (YYYYMMDD HHmmss), output increment (s), number of output times {start_date} 000000 {output_interval} {output_count} $ $ Fields N by name HS LM WND CUR FP T02 DIR DP WCH WCC TWO FOC USS $ $ netCDF4 output $ real numbers $ swell partitions $ one file 4 4 0 1 2 T $ $ File prefix $ number of characters in date $ IX, IY range $ SoG_ww3_fields_ 8 1 1000000 1 1000000 """ return contents
62618639265e419b5ad1ff9c7364e6d83aeca1c0
703,807
def ts_css(text): """applies nice css to the type string""" return '<span class="ts">%s</span>' % text
a505f4ffc8359bc886f0011295fb5309529be5bf
703,808
import typing def noop(val: typing.Any, *_args, **_kwargs) -> typing.Any: """A function does nothing. >>> noop(1) 1 """ return val
99841c0b291a654d83741500e83441482f59d45a
703,809
import getpass import base64 def get_headers(gargs): """Get the required headers. """ headers = { 'Content-type': 'application/json' } if gargs.no_passwd: return headers if gargs.passwdfile is not None: passwd = open(gargs.passwdfile, "r").read().strip() auth_str = "%s:%s" % (gargs.user, passwd) elif gargs.auth is not None: auth_str = gargs.auth else: passwd = getpass.getpass("Password: ") auth_str = "%s:%s" % (gargs.user, passwd) auth = base64.b64encode(auth_str.encode('utf-8')) headers['Authorization'] = 'Basic %s' % auth.decode('utf-8') return headers
04d7a9da9e30fbfdf86b0a09d23dc178a0615d27
703,810
def dict_to_capabilities(caps_dict): """Convert a dictionary into a string with the capabilities syntax.""" return ','.join("%s:%s" % tpl for tpl in caps_dict.items())
12a321ba5f337f8da116ec7adec68d717fbc776f
703,811
def mean(vector): """ Calculates the arithmetic mean of the given vector. Args: ----- vector : list A non-empty list/array of numbers to be averaged. Returns: -------- mean : float The arithmetic mean of the given vector. """ return sum(vector) / len(vector)
71bd9a37cb0bfb166632866d0a29be9b14236364
703,812
def guess_cloudwatch_log_group(alarm_name): """ Guess the name of the CloudWatch log group most likely to contain logs about the error. """ if alarm_name.startswith("loris-"): return "platform/loris" if alarm_name.startswith("catalogue-api-romulus"): return "ecs/catalogue_api_gw-romulus" if alarm_name.startswith("catalogue-api-remus"): return "ecs/catalogue_api_gw-remus" if alarm_name.startswith("lambda-") and alarm_name.endswith("-errors"): # e.g. lambda-ecs_ec2_instance_tagger-errors lambda_name = alarm_name[len("lambda-") : -len("-errors")] return f"/aws/lambda/{lambda_name}" raise ValueError(f"Unable to guess log group name for alarm name={alarm_name!r}")
94822c16ce6c84b154be40581eb74e25e2cbe898
703,813
def update_attrs(orig, keys, override): """Utility function for altering and adding the specified attributes to a particular repository rule invocation. This is used to make a rule reproducible. Args: orig: dict of actually set attributes (either explicitly or implicitly) by a particular rule invocation keys: complete set of attributes defined on this rule override: dict of attributes to override or add to orig Returns: dict of attributes with the keys from override inserted/updated """ result = {} for key in keys: if getattr(orig, key) != None: result[key] = getattr(orig, key) result["name"] = orig.name result.update(override) return result
82498f78604924c281da1fab372a871d5f224010
703,814
def get_mapped_to_elements(mapper): """ The mapper list contains all the element names that have been mapped to by other elements """ mapper_list = [] for element in mapper: for list_element in mapper[element]: if list_element not in mapper_list: mapper_list.append(list_element) return mapper_list
6f8d940997f4b871b6934db0592663448343e031
703,815
def normalization(data, dmin=0, dmax=1, save_centering=False): """ Normalization in [a, b] interval or with saving centering x` = (b - a) * (xi - min(x)) / (max(x) - min(x)) + a Args: data (np.ndarray): data for normalization dmin (float): left interval dmax (float): right interval save_centering (bool): if True -- will save data centering and just normalize by lowest data Returns: np.ndarray: normalized data """ # checking on errors if dmin >= dmax: raise Exception("Left interval 'dmin' must be fewer than right interval 'dmax'") if save_centering: return data / abs(min(data)) else: min_x = min(data) max_x = max(data) return (data - min_x) * (dmax - dmin) / (max_x - min_x) + dmin
acfa7aaae1bb7eb5752751f5c929ddb7868ccf49
703,816
import os def relpath(path: str, start: str = os.curdir) -> str: """Return a relative version of a path""" try: return os.path.relpath(path, start) except ValueError: return path
cd9daffa197a0443eb49ca515c805902eb404554
703,817
def named(name): """Change the name of something (via a decorator).""" def decorator(obj): obj.__name__ = name return obj return decorator
5b4873e7e6475e23ab13cd1fc203d6c79622d96d
703,818
def split_repo_and_dir(repo): """ Split the input string org-name/repo-name/subdir-name/more/sub/dirs (where '/subdir-name/more/sub/dirs' is optional) into org-name/repo-name and subdir-name/more/sub/dirs The second part might be the empty string if no subdir-name was given. """ parts = repo.split('/') if len(parts) == 2: return [repo, ''] return ['/'.join(parts[0:2]), '/'.join(parts[2:])]
c5cfb58fa0780af0391fc07fa78279af4f5c2790
703,819
def calculateOnlineVariance(data): """ Returns the variance of the given list. :param data: A list of numbers to be measured (ie. the window) :returns: The variance of the data. """ n, mean, M2 = 0, 0, 0 for x in data: n = n + 1 delta = x - mean mean = mean + delta/n M2 = M2 + delta*(x-mean) variance = M2/(n-1) return variance
bf8d70cd736471e4723db07fb609aff6a7ccec50
703,820
def clamp(x, xmin, xmax): """Constrain a value to lie between two further values, element-wise. The returned value is computed as `min(max(x, xmin), xmax)`. The arguments can be scalars or :class:`~taichi.Matrix`, as long as they can be broadcasted to a common shape. Args: x (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify the value to constrain. y (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify the lower end of the range into which to constrain `x`. a (:mod:`~taichi.types.primitive_types`, :class:`~taichi.Matrix`): Specify the upper end of the range into which to constrain `x`. Returns: The value of `x` constrained to the range `xmin` to `xmax`. Example:: >>> v = ti.Vector([0, 0.5, 1.0, 1.5]) >>> ti.clamp(v, 0.5, 1.0) [0.5, 0.5, 1.0, 1.0] >>> x = ti.Matrix([[0, 1], [-2, 2]], ti.f32) >>> y = ti.Matrix([[1, 2], [1, 2]], ti.f32) >>> ti.clamp(x, 0.5, y) [[0.5, 1.0], [0.5, 2.0]] """ return min(xmax, max(xmin, x))
80b4ef6224031502cf444b86141398ba60c77bda
703,821
def is_struct(struct): """ Checks if the message is a data structure or an rpc request/response""" return (not struct.name.endswith("Request") and not struct.name.endswith("Response"))
0dbce36cad826988cc18d86a31b91f2090d5e338
703,822
import pandas as pd from typing import Union from pathlib import Path import tarfile def read_tarfile_csv(path: Union[str, Path], inner_path: str, sep: str = "\t", **kwargs): """Read an inner CSV file from a tar archive. :param path: The path to the tar archive :param inner_path: The path inside the tar archive to the dataframe :param sep: The separator in the dataframe. Overrides Pandas default to use a tab. :param kwargs: Additional kwargs to pass to :func:`pandas.read_csv`. :return: A dataframe :rtype: pandas.DataFrame """ with tarfile.open(path) as tar_file: with tar_file.extractfile(inner_path) as file: # type: ignore return pd.read_csv(file, sep=sep, **kwargs)
24b3183da5787c095e78fc1bc7a1ed4e4012d6d2
703,823
def get_enum_map(mri, name): """ Returns enum value to name and name to enum value map. @param mri mri instance @param name name of enum """ ret = {} tdm = mri.engine.tdm n = len(name) + 1 roles = tdm.getByHierarchicalName(name) constants = roles.getConstants() for c in constants: _name = c.getName() v = c.getConstantValue() ret[_name[n:]] = v ret[v] = _name[n:] return ret
e781790c7e7f5a24e12fb986e2e45d735157a7b6
703,824
def zfill_to_collection_size(index: int, collection_size: int) -> str: """ Prepends amount of zeroes required for indexes to be string-sortable in terms of given collection size. Examples: for 10 items prepends up to 1 zero: 1 -> "01", 10 -> "10" for 100 items prepends up to 2 zeroes: 7 -> "007", "13" -> "013" """ positions = len(str(collection_size)) return str(index).zfill(positions)
df086ba9c4485dd0073c9a9b4485cb0c0d423859
703,825
def partition(predicate, values): """ Splits the values into two sets, based on the return value of the function (True/False). e.g.: >>> partition(lambda x: x > 3, range(5)) [0, 1, 2, 3], [4] """ results = ([], []) for item in values: results[predicate(item)].append(item) return results
27184fd908ab2d214db86b612e2e5cbec9393a07
703,826
def _parse_exclude_images_commands(commands, experiments, reflections): """Parse a list of list of command line options. e.g. commands = [['1:101:200'], ['0:201:300']] or commands = [[101:200]] allowable for a single experiment. builds and returns a list of tuples (exp_id, (start, stop)) """ ranges_to_remove = [] for com in commands: vals = com[0].split(":") if len(vals) == 2: if len(experiments) > 1: raise ValueError( "Exclude images must be in the form experimentnumber:start:stop for multiple experiments" ) else: ranges_to_remove.append( (experiments[0].identifier, (int(vals[0]), int(vals[1]))) ) else: if len(vals) != 3: raise ValueError( "Exclude images must be input in the form experimentnumber:start:stop, or start:stop for a single experiment" ) dataset_id = int(vals[0]) for table in reflections: if dataset_id in table.experiment_identifiers(): expid = table.experiment_identifiers()[dataset_id] ranges_to_remove.append((expid, (int(vals[1]), int(vals[2])))) break return ranges_to_remove
e1decbcce826d1fe32a9b5c98b745f43e754a3b4
703,827
def cmd_issuer_hash(cert): """Returns hash of certificate issuer. """ return cert.get_issuer().hash()
d35d35c39ba9c33c5b0015bb9f4d4ddf433cd71d
703,829
def strip_c(buf, dia): """This is the ugliest python function I've ever written and I'm ashamed that it exists. Can you tell that it's an almost line for line translation of a C program? The two embedded functions were macros. """ pos = bytes(buf, 'ascii', errors='replace') single_q = double_q = slash = escape = skip = False space = 0 buf = bytearray(len(pos) + 1) buf[0] = ord(b' ') i, x = 0, 1 def check_quote(tocheck, other): nonlocal skip, escape if not other: if tocheck: if not escape: tocheck = False skip = True else: tocheck = True return tocheck, other def QUOTE(): nonlocal double_q, single_q return double_q or single_q while i < len(pos): ch = chr(pos[i]) if ch == '/': if not QUOTE(): if slash: x -= 1 end = i + pos[i:].find(b'\n') if end < 0: dia.error("Failed to find end of comment") return while pos[end - 1] == '\\': end = pos[end+1:].find(b'\n') i = end if chr(buf[x-1]) == '\n': skip = True else: slash = True elif ch == '*': if not QUOTE() and slash: x -= 1 end = i + pos[i:].find(b'*/') if end < 0: dia.error("Failed to find end of comment") return i = end + 2 try: ch = chr(pos[i]) except IndexError: break if ch == '\n' and chr(buf[x-1]) == '\n': skip = True slash = False elif ch == '\n': if not escape: slash = double_q = False if (chr(buf[x-1]) == '\n'): skip = True elif ch == '#': slash = False endln = i + pos[i+1:].find(b'\n') if chr(buf[x-1]) == '\n' and endln > 0: tmp = i + 1 if chr(pos[i+1]).isspace(): while chr(pos[tmp]).isspace() and tmp < endln: tmp += 1 thing = bytes(pos[tmp:tmp + 7]) if thing == b'include': i = endln + 2 continue elif ch == '\\': pass elif ch == '"': double_q, single_q = check_quote(double_q, single_q) slash = False elif ch == "'": single_q, double_q = check_quote(single_q, double_q) slash = False else: slash = False escape = not escape if (ch == '\\') else False skip = True if (skip) else (ch.isspace() and chr(buf[x-1]) == '\n') space = space + 1 if (ch.isspace() and not skip) else 0 if skip: skip = False elif not QUOTE() and space < 2: buf[x] = ord(ch) x += 1 i += 1 return bytes(buf[:x])
0d23d9826fdbba09b06d3a866d54bcda13d43509
703,830
def map_bool(to_bool) -> bool: """Maps value to boolean from a string. Parameters ---------- to_bool: str Value to be converted to boolean. Returns ------- mapped_bool: bool Boolean value converted from string. Example ------- >>> boolean_string = "True" # can also be lower case >>> boolean_value = map_bool(boolean_string) """ try: boolean_map = {"true": True, "false": False} mapped_bool = boolean_map[to_bool.lower()] except KeyError: raise KeyError("Boolean Value Expected got '{}'".format(to_bool)) return mapped_bool
4e3bb175f653174a56cb6ddc72ba7bcc56755826
703,831
def fmt_val(val, shorten=True): """Format a value for inclusion in an informative text string. """ val = repr(val) max = 50 if shorten: if len(val) > max: close = val[-1] val = val[0:max-4] + "..." if close in (">", "'", '"', ']', '}', ')'): val = val + close return val
c8a10f187d971f8b3f4222549375642b6c12a6a6
703,832
def generate_ranklist(data, rerank_lists): """ Create a reranked lists based on the data and rerank documents ids. Args: data: (Raw_data) the dataset that contains the raw data rerank_lists: (list<list<int>>) a list of rerank list in which each element represents the original rank of the documents in the initial list. Returns: qid_list_map: (map<list<int>>) a map of qid with the reranked document id list. """ if len(rerank_lists) != len(data.initial_list): raise ValueError("The number of queries in rerank ranklists number must be equal to the initial list," " %d != %d." % (len(rerank_lists)), len(data.initial_list)) qid_list_map = {} for i in range(len(data.qids)): if len(rerank_lists[i]) != len(data.initial_list[i]): raise ValueError("The number of docs in each rerank ranklists must be equal to the initial list," " %d != %d." % (len(rerank_lists[i]), len(data.initial_list[i]))) # remove duplicated docs and organize rerank list index_list = [] index_set = set() for idx in rerank_lists[i]: if idx not in index_set: index_set.add(idx) index_list.append(idx) # doc idxs that haven't been observed in the rerank list will be put at # the end of the list for idx in range(len(rerank_lists[i])): if idx not in index_set: index_list.append(idx) # get new ranking list qid = data.qids[i] did_list = [] new_list = [data.initial_list[i][idx] for idx in index_list] # remove padding documents for ni in new_list: if ni >= 0: did_list.append(data.dids[ni]) qid_list_map[qid] = did_list return qid_list_map
3ad3431efff6a7ad81b649a8654d5ec30184de74
703,833
def desktop_extra_assigner(self, user): """Assign the extra packages name of the selected desktop. Arguments --------- user: "Dictionary containing user's answers" Returns ------- "String containing question for the desktop extras" """ choice = ['Gnome extra', 'KDE applications', 'Deepin extra', 'Mate extra', 'XFCE goodies'] question = self.trad('Do you wish to install {extra}').format( extra=choice[user['desktop']]) return question
529910c70e7dfd83ab58a4937668d35607457271
703,834
def str_or_list_like(x): """Determine if x is list-list (list, tuple) using duck-typing. Here is a set of Attributes for different classes | x | type(x) | x.strip | x.__getitem__ | x.__iter__ | | aa | <class 'str'> | True | True | True | | ['a', 'b'] | <class 'list'> | False | True | True | | ('a', 'b') | <class 'tuple'> | False | True | True | | {'b', 'a'} | <class 'set'> | False | False | True | | {'a': 1, 'b': 2} | <class 'dict'> | False | True | True | """ if hasattr(x, "strip"): return "str" elif hasattr(x, "__getitem__") or hasattr(x, "__iter__"): return "list_like" else: return "others"
5ea7a6ff90f702c766401d0a973ac02347c66ade
703,835
def readlist(infile): """Read each row of file as an element of the list""" with open(infile, 'r') as f: list_of_rows = [r for r in f.readlines()] return list_of_rows
50ea79f3c64e5e90a0f8b3bfd4cd8108304d57b2
703,836
def read_popularity(path): """ :param path: a path of popularity file. A file contains '<id>,<rank>' rows. :return: a set of popularity object ids """ ids = set() for line in open(path): try: ident = int(line.split(",", maxsplit=1)[0]) except (AttributeError, IndexError): continue ids.add(ident) return ids
a97f20b129bd7849a4bf9a91d40c23ad664b500b
703,837
def classify(tree, input): """classify the input using the given decision tree""" # if this is a leaf node, return its value if tree in [True, False]: return tree # otherwise find the correct subtree attribute, subtree_dict = tree subtree_key = input.get(attribute) # None if input is missing attribute if subtree_key not in subtree_dict: # if no subtree for key, subtree_key = None # we'll use the None subtree subtree = subtree_dict[subtree_key] # choose the appropriate subtree return classify(subtree, input)
66b7558ac8658aa83b1796c17a637daa5a2309bb
703,838
import string def names_to_usernames(names): """ Take the given list of names and convert it to usernames. "John Doe" -> "john.doe" Each name is stripped before conversion, then split by spaces. If the name contains anything except letters and spaces, raise an exception. If duplicate names or invalid characters are found, raise an exception. """ allowed_chars = set(string.ascii_letters + " ") usernames = set() for name in names: name = name.strip() # Empty or comment. if not name or name.startswith("#"): continue # Illegal characters. if not set(name).issubset(allowed_chars): raise Exception("Invalid characters found: %s" % name) name_parts = name.lower().split() # Invalid name format (expected full name). if len(name_parts) <= 1: raise Exception("Too few parts: %s" % name_parts) # Convert to username. username = ".".join(name_parts) if username in usernames: raise Exception("Duplicate: %s" % username) usernames.add(username) return list(usernames)
0156f8402541e64dc1ed2b62d21bfcfbda55f167
703,839
def vessel_tip_coupling_data_to_str(data_list): """A list of vessel tip data elements is converted into a string.""" s = [] for v in data_list: s.append('VesselTipData(') s.append(' p = Point(x={}, y={}, z={}),'.format(v.p.x, v.p.y, v.p.z)) s.append(' vertex_id = {},'.format(v.vertex_id)) s.append(' pressure = {},'.format(v.pressure)) s.append(' concentration = {},'.format(v.concentration)) s.append(' R2 = {},'.format(v.R2)) s.append(' radius_first = {},'.format(v.radius_first)) s.append(' radius_last = {},'.format(v.radius_last)) s.append(' level = {}'.format(v.level)) s.append('),') return '\n'.join(s)
6768afa9497e5343bc20736a963d81c7ec298867
703,840
import re def price_quantity_us_number(price): """Extract the numeric quantity of the price, assuming the number uses dot for decimal and comma for thousands, etc.""" p = re.sub('[^0-9.]', '', price.strip()) return p
9e35d8096bd3edfe80b6fae6ab0641107828a50b
703,841
import typing import re def build_global_regexes() -> typing.Dict[str, typing.Pattern]: """ Returns a list where each element is a tuple of ``(label, possible_regexes)``. """ nums = r'-?\d+' ip_atom = r'(({0})|({0}:)|(:{0})|({0}:{0})|({0}-{0})|(:))?'.format(nums) ip = r'^{0}(,{0})*$'.format(ip_atom) ip_qform = r'^\[{0}(,{0})*\]$'.format(ip_atom) sp = r'^{0}(,{0})*$'.format(ip_qform[1:-1]) return { 'ip': re.compile(ip), # unquoted form only, quoted form is-a sp 'sp': re.compile(sp), }
43ce3ef0b0bdd6afee39e1cb710272eab859e123
703,842
def _ratio_sample_rate(ratio): """ :param ratio: geodesic distance ratio to Euclid distance :return: value between 0.008 and 0.144 for ration 1 and 1.1 """ return 20 * (ratio - 0.98) ** 2
1cd2989937a992e2f558b01be6fadebc66c50782
703,843
from typing import Counter def trip_finder(hand): """ Takes a 5-card hand, concats, only takes ranks If we get 3 of a kind only, returns list: [6, trip rank, 0, 0, 0, 0] """ cards = ''.join(hand)[::2] if Counter(cards).most_common(1)[0][1] == 3: if Counter(cards).most_common(2)[1][1] == 1: return [6, Counter(cards).most_common(1)[0][0], 0, 0, 0, 0] else: return [0, 0, 0, 0, 0, 0] else: return [0, 0, 0, 0, 0, 0]
42281d141bdb41e0c745610257cc4376a6262d64
703,844
def beale(position): """ optimum at (3.0, 0.5) = 0 :param position: :return: """ x, y = position return (1.5 - x + x * y) ** 2 + (2.25 - x + x * y ** 2) ** 2 + (2.625 - x + x * y ** 3) ** 2
bb5bc6d50b793155f81fdd75f8a1be8889ab7839
703,846
def naive_log_ctz(x: int) -> int: """Count trailing zeros, in a O(log(zeros)) steps. Args: x: An int. Returns: The number of trailing zeros in x, as an int. This implementation is much faster than the naive linear implementation, as it performs a logarithmic number of steps relative to the number of trailing zeros in x. Unlike the linear implementation, this one avoids looking at the high bits of x if it can, so it only returns the number of zeros, not the remaining significant bits of x. We still say this implementation is "naive" because it does not try to use any specific optimization tricks besides the logarithmic algorithm. >>> naive_log_ctz(0) 0 >>> naive_log_ctz(1) 0 >>> naive_log_ctz(-1) 0 >>> naive_log_ctz(2) 1 >>> naive_log_ctz(-2) 1 >>> naive_log_ctz(40) # 0b101000 = 2**3 * 5 3 >>> naive_log_ctz(-40) # 0b1..1011000 3 >>> naive_log_ctz(37 << 100) 100 # Of course the behavior should match for all integers... >>> all(naive_ctz2(x)[0] == naive_log_ctz(x) for x in range(1024)) True """ if x == 0: return 0 else: zmask = 1 zscale = 1 low_bits = x & zmask while low_bits == 0: zmask = (zmask << zscale) | zmask zscale <<= 1 low_bits = x & zmask zscale >>= 1 zmask >>= zscale zeros : int = 0 while zscale > 0: if low_bits & zmask == 0: low_bits >>= zscale zeros += zscale zscale >>= 1 zmask >>= zscale return zeros
dfa4c0fb890bbb13803c653a3f7f65b25bb3158f
703,847
def create_header(multiobj_bool, constr_func): """ Creates header to save data. Args: multiobj_bool (:obj:`bool`): True if multiobjective function is used. constr_func (:obj:`list`) : Constraint functions applied. Returns: Header. """ if multiobj_bool: header = "iter,f0val,fvirg,f0val2,fvirg2" else: header = "iter,f0val,fvirg" for func in constr_func: header += ',' + func return header
519e66c8437f972cd3ad6d604bc01ab858c8abed
703,848
def colour_by_year(year, train_thresh, update1_thresh, update2_thresh, colours=None): """ Assign/return a colour depending on the year the data point was published. Parameters ---------- year : publication year of data point train_thresh : Last year threshold to assign to training set update1_thresh : Last year threshold to assign to update1 set update2_thresh : Last year threshold to assign to update2 set colours : List of colours for training, update1, update2 and test set Returns ------- Colour based on the publication year """ if colours is None: colours = ["navy", "plum", "mediumaquamarine", "green"] if year <= train_thresh: return colours[0] elif year <= update1_thresh: return colours[1] elif year <= update2_thresh: return colours[2] elif year <= 2020: return colours[3]
179b4a5d7f8cccaaa398fdffe43c59d02478dff2
703,849
def as_int(val): """ Tries to convert a string to an int. Returns None if string is empty """ try: return(int(val)) except ValueError: return(None)
87586fbc47c37354e34d10116b86d013a98d20b9
703,850
def get_value(obj, key, default=None): """ Returns dictionary item value by name for dictionary objects or property value by name for other types. Also list of lists obj is supported. :param obj: dict or object :param key: dict item or property name :param default: default value :return: """ if isinstance(obj, dict): return obj.get(key, default) elif hasattr(obj, '__iter__'): for item in obj: if hasattr(obj, '__iter__') and len(item) > 1 and item[0] == key: return item[1] return default else: return getattr(obj, key, default)
5885fa5ae944fd6967c21b97cd7955a94ad0dce2
703,851
def validate_edges(g, max_kmph=200): """ Make sure all edges can possibly be traversed with reasonable speed """ good_edges, bad_edges = [], [] for u, v, data in g.edges(data=True): dur, dist_km = data.get("duration"), data.get("distance") # Distances < 1km are not precise enough if None in [dur, dist_km] or dist_km < 1.0: good_edges.append((u, v)) continue # At least one minute dur_hrs = max(abs(dur.total_seconds()), 60) / 60.0 / 60.0 # Consider bad edge if train speed would have to be faster than max_kmph to pass edge # print("Required speed: %f (%f km in %f hrs)" % (dist_km/dur_hrs, dist_km, dur_hrs)) if dist_km / dur_hrs > max_kmph: bad_edges.append((u, v)) else: good_edges.append((u, v)) return good_edges, bad_edges
3d0b176f1de7fd7e81a72c44b9d3775f6daaa17c
703,852
import pathlib from typing import List def _write_dataset_files( root_path: pathlib.Path, namespace: str, datasets: List[str] ) -> str: """Write the repo content containing the datasets.""" repo_path = root_path / namespace # Create all datasets for ds_name in datasets: ds_path = repo_path / ds_name / f'{ds_name}.py' ds_path.parent.mkdir(parents=True) # Create the containing dir ds_path.touch() # Create the file # Additional noisy files should be ignored (repo_path / '__init__.py').touch() (repo_path / 'empty_dir').mkdir() return str(repo_path)
49baafff58a08802830208382180ce32d8aaf8c0
703,853
import argparse import sys def _parse_arguments() -> argparse.Namespace: """Plot argument parser. """ parser = argparse.ArgumentParser(description="Plot") parser.add_argument( "--inputs", type=str, required=True, help= "comma-separated list of input data filenames (e.g., --input input1,input2)\n" + "The data for multiple files is concatenated into a single graph.") parser.add_argument("--output", type=str, required=True, help="output plot filename (e.g., --output output)") parser.add_argument("--plot_name", type=str, required=True, help="plot name (e.g., --plot_name name)") parser.add_argument("--print_available_benchmarks", type=bool, required=False, help="print the existing list of benchmarks in the data") parser.add_argument("--benchmarks_to_plot", type=str, required=False, help="comma-separated names of benchmarks to plot", default='all') parser.add_argument("--sizes_to_plot", type=str, required=False, help="semicolon-separated lost of problem sizes to plot " "(e.g., --sizes_to_plot=\"m=32,n=48;m=90,n=32\")", default='all') parser.add_argument("--num_sizes_to_plot", type=int, required=False, help="sample the given number of problem sizes to plot", default=-1) parser.add_argument("--metric_to_plot", type=str, required=True, choices=["gflop_per_s_per_iter", "gbyte_per_s_per_iter"]) parser.add_argument("--group_by_strides_and_dilations", type=bool, required=False, help="plot separate bars for strides and dilations") ############################################################################### # Not used atm ############################################################################### parser.add_argument("--peak_compute", type=int, nargs="?", help="peak compute (e.g., --peak_compute 192)", default=192) parser.add_argument("--peak_bandwidth_hi",\ type=int, nargs="?", help="high peak bandwidth (e.g., --peak_bandwidth_hi 281)", default=281) parser.add_argument("--peak_bandwidth_lo", type=int, nargs="?", help="low peak bandwidth (e.g., -peak_bandwidth_lo 281)", default=281) return parser.parse_args(sys.argv[1:])
f69c69040d9003e32b27e3195218cad06c807e69
703,854
import json def transform_group_roles_data(data, okta_org_id): """ Transform user role data :param data: data returned by Okta server :param okta_org_id: okta organization id :return: Array of dictionary containing role properties """ role_data = json.loads(data) user_roles = [] for role in role_data: role_props = {} role_props["label"] = role["label"] role_props["type"] = role["type"] role_props["id"] = "{}-{}".format(okta_org_id, role["type"]) user_roles.append(role_props) return user_roles
ea554dfb4e91e3647298a2ef0891452e423ff957
703,855
def IsProjectAddressOnToLine(project_addr, to_addrs): """Return True if an email was explicitly sent directly to us.""" return project_addr in to_addrs
7907bbb313b3e6d8439af79539e469b04a62da63
703,856
def is_list(string): """ Checks to see if a string contains a list in the form [A, B] :param string: string to evaluate :return: Boolean """ if string: if '[' == string[0] and ']' == string[-1] and ',' in string: return True return False
77b86e7480a2a591e18ea21989cfefa06282c5f2
703,857
import string import random def get_random_mac_address(): """Generate and return a MAC address in the format of WINDOWS""" # get the hexdigits uppercased uppercased_hexdigits = ''.join(set(string.hexdigits.upper())) # 2nd character must be 2, 4, A, or E return random.choice(uppercased_hexdigits) + random.choice("24AE") + "".join(random.sample(uppercased_hexdigits, k=10))
171eaca1df7b35def0d09a2ac1bc2e96163a3ceb
703,858
def isChinese(word): """判断是否为中文""" for uchar in word: if not '\u4e00' <= uchar <= '\u9fa5': # 遇到非中文 return False return True
464f87a2211f6e3d2f00a9a2bf12d0669cd297ec
703,859
def entries_to_labels_scores(entries): """ Convert entries to labels and scores for barplot. NOTE The labels only exist to discern entries: the actual labels are set in set_label_legends. """ nicknames = [entry['algo-nickname'] for entry in entries] scores = [] colors = [] for entry in entries: # Special color for 'Human' and 'Random' if entry['algo-title'] == 'Human': color = 'red' elif entry['algo-title'] == 'Random': color = 'black' else: color = 'darkblue' scores.append(entry['score']) colors.append(color) return list(range(len(scores))), scores, colors
31c42ffa820d09bfbfb437904884c354f6fec257
703,861
import re def _natural_key(x): """ Splits a string into characters and digits. This helps in sorting file names in a 'natural' way. """ return [int(c) if c.isdigit() else c.lower() for c in re.split("(\d+)", x)]
1fab7dffb9765b20f77ab759e43a23325b4441f4
703,863
import os def find_file(directory: str, search_file: str) -> str: """Finds relative path of file in given directory and its subdirectories. Args: directory (str): Directory to search in. search_file (str): File to search in directory. Returns: str: Path to file. """ paths = [] for root, _, files in os.walk(directory): for file in files: if file.lower().endswith(search_file.lower()): paths.append(os.path.join(root, file)) if not paths: return "" paths = [ele for ele in paths if ".test" not in ele] shortest_path = min(paths, key=len) relative_path = shortest_path.replace(directory, "").strip("/") return relative_path
4f272ab643d8b271d01cd38041e1e003e3171224
703,864
from typing import Tuple import math def _projected_velocities_from_cog(beta: float, cog_speed: float) -> Tuple[float, float]: """ Computes the projected velocities at the rear axle using the Bicycle kinematic model using COG data :param beta: [rad] the angle from rear axle to COG at instantaneous center of rotation :param cog_speed: [m/s] Magnitude of velocity vector at COG :return: Tuple with longitudinal and lateral velocities [m/s] at the rear axle """ # This gives COG longitudinal, which is the same as rear axle rear_axle_forward_velocity = math.cos(beta) * cog_speed # [m/s] # Lateral velocity is zero, by model assumption rear_axle_lateral_velocity = 0 return rear_axle_forward_velocity, rear_axle_lateral_velocity
defbfa58d1e67b67ff4a118ebff03e62f4c1042c
703,865
def quality_scrub(df, target_cols = ['quality_1', 'quality_2', 'quality_3']): """ Definition: Filters a dataframe where each target_col does not contain 'no_cough' Args: df: Required. A dataframe containing the target columns target_cols: default = ['quality_1', 'quality_2', 'quality_3']. Returns: Returns a filtered dataframe where each target_col does not contain 'no_cough' """ for col in target_cols: df = df[df[col] != 'no_cough'] return df
1187278e008f1e4ec4688d3cf9a3d7a0c1a82dc0
703,867
import subprocess import os def simple_shell(args, stdout=False): """ Simple Subprocess Shell Helper Function """ if stdout: rc = subprocess.call(args, shell=False) else: rc = subprocess.call(args, shell=False, stdout=open(os.devnull, "w"), stderr=subprocess.STDOUT) return rc
b922e35565a5da58cec153415b9112e560de6c73
703,868
def print_decorator(fct): """dento dekorator pouzivam, na to aby som dokazal testovat aj vypisy na standardny vystup""" original_fct = fct output = [] def wrapper(*args): output.append((args)) return original_fct(*args) return wrapper
4fa74cf9bf3653f89114cbdd6503ef13630a17e7
703,869
def remodel_matrix(matrix, new_fire_cells, moisture_matrix): """ matrix: Array of the fire spread area new_fire_cells: list of tuples, each tuple representing the x,y coordinates of a new cell that has been affected by the fire spread. """ for cell in new_fire_cells: x = int(cell[0]) y = int(cell[1]) matrix[x][y] = 0.5*(moisture_matrix[x][y]) return matrix
c24c8fae19e0a8bb884103191906e76af625430f
703,870
import csv def get_author_book_publisher_data(filepath): """ This function gets the data from the csv file """ with open(filepath) as csvfile: csv_reader = csv.DictReader(csvfile) data = [row for row in csv_reader] return data
5d095b20e2e32aacbe4d85efd80461abfa175127
703,871
import pandas def _normalize_similarity(df: pandas.DataFrame) -> None: """Normalizes similarity by combining cls and transformation.""" df["params.similarity"] = (df["params.similarity.cls"] + "_" + df["params.similarity.transformation"]) # transformation only active if similarity in {l1, 2} unused_transformation_mask = ~(df["params.similarity"].str.startswith("l1_") | df["params.similarity"].str.startswith("l2_")) df.loc[unused_transformation_mask, "params.similarity"] = df.loc[unused_transformation_mask, "params.similarity"].apply(lambda v: v.replace("_bound_inverse", "").replace("_negative", "")) def _normalize(name): name = name.split("_") if len(name) == 1: return name[0] else: return f"{name[0]} ({' '.join(name[1:])})" df["params.similarity"] = df["params.similarity"].apply(_normalize)
048c1a7107cf61c20ebc7d2e10214c434898466e
703,872
from typing import List def check_absence_of_skip_series( movement: int, past_movements: List[int], max_n_skips: int = 2, **kwargs ) -> bool: """ Check that there are no long series of skips. :param movement: melodic interval (in scale degrees) for line continuation :param past_movements: list of past movements :param max_n_skips: maximum allowed number of skips in a row :return: indicator whether a continuation is in accordance with the rule """ if abs(movement) <= 1: return True if len(past_movements) < max_n_skips: return True only_skips = all(abs(x) > 1 for x in past_movements[-max_n_skips:]) return not only_skips
94ff2f3e03956d5bea1173182e389a3e6bb4b487
703,873
def yesno_choice(title, callback_yes=None, callback_no=None): """ Display a choice to the user. The corresponding callback will be called in case of affermative or negative answers. :param title: text to display (e.g.: 'Do you want to go to Copenaghen?' ) :param callback_yes: callback function to be called in case of 'y' answer :param callback_no: callback function to be called in case of 'n' answer Return the callback result """ print() print(f'{title} (y/n)') valid_inp = ['y', 'n'] while (True): inp = input() if inp in valid_inp: if inp == 'y': if callable(callback_yes): return callback_yes() else: return 'y' elif inp == 'n': if callable(callback_no): return callback_no() else: return 'n' else: print('Wrong choice buddy ;) Retry:')
93b76a3c7740b90dd01bd46ed429411991f3f34d
703,875
def wrap_col(string, str_length=11): """ String wrap """ if [x for x in string.split(' ') if len(x) > 25]: parts = [string[i:i + str_length].strip() for i in range(0, len(string), str_length)] return ('\n'.join(parts) + '\n') else: return (string)
7b5cdf37cb84a2d2ebbc421ea917fc563026927e
703,876
def combine_results_jsons(drtdp_json, psrtdp_json, vi_json): """ takes overall results jsons and combines them to one json :param drtdp_json a json for drtdp overall results :param psrtdp_json a json for ps-rtdp overall results :param vi_json a json for value iteration overall results :return combined json """ vi_cost = vi_json['best_cost'] if 'best_cost' in vi_json else None vi_time = '{0:.3f}'.format(vi_json['planning_time']) if 'planning_time' in vi_json else None return { "domain": drtdp_json['domain'], "actions": drtdp_json['actions'], "facts": drtdp_json['facts'], "num_agents": drtdp_json['num_agents'], "best_cost": (drtdp_json['best_cost'], psrtdp_json['best_cost'], vi_cost), "messages": ('{0:.3f}'.format(drtdp_json['messages']/10000), '{0:.3f}'.format(psrtdp_json['messages']/10000)), "expansions": ('{0:.3f}'.format(drtdp_json['expansions']/10000), '{0:.3f}'.format(psrtdp_json['expansions']/10000)), "trajectories": (drtdp_json['trajectories'], psrtdp_json['trajectories']), "restarts": (drtdp_json['restarts'], psrtdp_json['restarts']), "planning_time": ('{0:.3f}'.format(drtdp_json['planning_time']), '{0:.3f}'.format(psrtdp_json['planning_time']), vi_time) }
a2bedf628e2af91af2c16111cd33600ade7e435e
703,877
import os def get_list_of_all_data_file_names(datadirectory): """ Return list of all data files (.txt) in specified directory """ print('get_list_of_all_data_file_names', datadirectory) list_of_files = [] for file in os.listdir(datadirectory): if file.endswith('txt'): list_of_files.append(file) return list_of_files
35dd02acdc492d1d38e9cedbe154f2754706e25b
703,878
from unittest.mock import Mock def mock_data_manager(components): """Return a mock data manager of a general model.""" dm = Mock() dm.components = components dm.fixed_components = [] return dm
e796dbe73e2ec7df650ceab450a3a5449a6af9ed
703,879
def moeda(n=0): """ -> Formata um número como moeda :param n: número :return: número formatado """ return f'R$ {n:.2f}'
3727a2257afe8746d6ef2b3c8ee088842c46c5ce
703,880
from functools import wraps def flow(flow): """Decorator: decorator = flow(flow) The decorator then transforms a method: method = decorator(method) so that the "flow" kwarg is set the argument to the decorator. A nonsense value of "flow" will raise and Exception in Componenet.__select_flow """ ## flow(flow) returns this (that's the python decorator protocal) def decorator(method): ## The decorator returns the method with the flow keyword set; ## @wraps makes the docstring and __name__ @wraps(method) def method_with_flow(self, *args, **kwargs): kwargs["flow"] = flow return method(self)(*args, **kwargs) return method_with_flow return decorator
24292de3d0f63ca6eafc9785db4c5bfa5519852f
703,881