content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import importlib def _bot_exists(botname): """ Utility method to import a bot. """ module = None try: module = importlib.import_module('%s.%s' % (botname, botname)) except ImportError as e: quit('Unable to import bot "%s.%s": %s' % (botname, botname, str(e))) return module
c091be6d586faa8aacd48b30f4ce2f4fcc665e0b
5,467
def _format_exponent_notation(input_number, precision, num_exponent_digits): """ Format the exponent notation. Python's exponent notation doesn't allow for a user-defined number of exponent digits. Based on [Anurag Uniyal's answer][answer] to the StackOverflow question ['Python - number of digits in exponent'][question] [question]: http://stackoverflow.com/q/9910972/95592 [answer]: http://stackoverflow.com/a/9911741/95592 """ python_exponent_notation = '{number:.{precision}e}'.format( number=input_number, precision=precision) mantissa, exponent = python_exponent_notation.split('e') # Add 1 to the desired number of exponenent digits to account for the sign return '{mantissa}e{exponent:+0{exp_num}d}'.format( mantissa=mantissa, exponent=int(exponent), exp_num=num_exponent_digits+1)
59f61897c70ca1d9f95412b2892d5c9592e51561
5,468
def reflect(data, width): """Ceflect a data word, means revert the bit order.""" reflected = data & 0x01 for _ in range(width - 1): data >>= 1 reflected = (reflected << 1) | (data & 0x01) return reflected
bd5a0b804419c52ebdc6777fa0256c6a2dd4475c
5,469
def npv(ico, nci, r, n): """ This capital budgeting function computes the net present value on a cash flow generating investment. ico = Initial Capital Outlay nci = net cash inflows per period r = discounted rate n = number of periods Example: npv(100000, 15000, .03, 10) """ pv_nci = 0 for x in range(n): pv_nci = pv_nci + (nci/((1 + r) ** (x + 1))) return pv_nci - ico
fa3128de0fe8a2f7b8bbe754f0e1b1e1a0eb222d
5,470
def normcase(path): """Normalize the case of a pathname. On Unix and Mac OS X, this returns the path unchanged; on case-insensitive filesystems, it converts the path to lowercase. On Windows, it also converts forward slashes to backward slashes.""" return 0
d52dca00cc9db607d4ba22c12ba38f512a05107b
5,471
def typeof(obj, t): """Check if a specific type instance is a subclass of the type. Args: obj: Concrete type instance t: Base type class """ try: return issubclass(obj, t) except TypeError: return False
67fbcf8b1506f44dba8360a4d23705a2e8a69b47
5,472
def main(): """ Returns the answer. """ return 42
f6800af5efb0b65f7c7afdd5ea0ede896fd740f8
5,476
import subprocess def sub_proc_launch(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE): """Launch a subprocess and return the Popen process object. This is non blocking. This is useful for long running processes. """ proc = subprocess.Popen(cmd.split(), stdout=stdout, stderr=stderr) return proc
3ad582b4c915c65e56833c5cd66c03dcb6765b11
5,477
def collect_accuracy(path): """ Collects accuracy values in log file. """ r1 = None r5 = None mAP = None r1_content = 'Rank-1 ' r5_content = 'Rank-5 ' map_content = 'mAP:' with open(path) as input_stream: for line in input_stream: candidate = line.strip() if r1_content in candidate: r1 = float(candidate.split(':')[-1].replace('%', '')) elif r5_content in candidate: r5 = float(candidate.split(':')[-1].replace('%', '')) elif map_content in candidate: mAP = float(candidate.split(':')[-1].replace('%', '')) return r1, r5, mAP
fa94724f16a332fe18d13df3cc0fbcdd060fe897
5,478
def maybe_flip_x_across_antimeridian(x: float) -> float: """Flips a longitude across the antimeridian if needed.""" if x > 90: return (-180 * 2) + x else: return x
50fac7a92d0ebfcd003fb478183b05668b9c909c
5,479
def write_output(features, forecast_hours, poly, line, point): """ writes output to OUTDATA dict depending on query type :param features: output from clipping function :param forecast_hours: list of all queried forecast hours :param poly: boolean to identify a polygon query :param line: boolean to identify a line query :param point: boolean to identify a point query :returns: dict with all queried forecast hours and clipping results """ i = 0 if line and not poly and not point: OUTDATA = {"type": "FeatureCollection", "features": { "type": "Feature", "geometry": { "type": "LineString", "coordinates": features[0][0][0][4], }, "properties": { "Forecast Hours": [] } } } temp_line = [] dir_line = [] speed_line = [] for hour in forecast_hours: OUTDATA["features"]['properties']["Forecast Hours"].append({ "Forecast Hour": hour, }) for i in features[0]: if 'Temperature Data' in features[0][i][3]: for x in features[0][0]: temp_line.append([[x[0], x[1]], x[2]]) (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Temperature"]) = { "Observations Along Line": temp_line } if 'Wind Direction Data' in features[0][i][3]: for x in features[0][1]: dir_line.append([[x[0], x[1]], x[2]]) (OUTDATA["features"]['properties']["Forecast Hours"][ int(i/3)]["Wind Direction"]) = { "Observations Along Line": dir_line } if 'Wind Speed Data' in features[0][i][3]: for x in features[0][2]: speed_line.append([[x[0], x[1]], x[2]]) (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Wind Speed"]) = { "Observations Along Line": speed_line } return OUTDATA if poly: OUTDATA = {"type": "FeatureCollection", "features": { "type": "Feature", "geometry": { "type": "Polygon", "coordinates": features[0][0][4], }, "properties": { "Forecast Hours": [] } } } for hour in forecast_hours: OUTDATA["features"]['properties']["Forecast Hours"].append({ "Forecast Hour": hour, }) for i in features[0]: if 'Temperature Data' in features[0][i][3]: (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Temperature"]) = { "Min Temperature": features[0][i][0], "Max Temperature": features[0][i][1], "Mean Temperature": features[0][i][2] } if 'Wind Direction Data' in features[0][i][3]: (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Wind Direction"]) = { "Min Wind Direction": features[0][i][0], "Max Wind Direction": features[0][i][1], "Mean Wind Direction": features[0][i][2] } if 'Wind Speed Data' in features[0][i][3]: (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Wind Speed"]) = { "Min Wind Speed": features[0][i][0], "Max Wind Speed": features[0][i][1], "Mean Wind Speed": features[0][i][2] } return OUTDATA if point: OUTDATA = {"type": "FeatureCollection", "features": { "type": "Feature", "geometry": { "type": "Point", "coordinates": [features[0][0][0], features[0][0][1]], }, "properties": { "Forecast Hours": [] } } } for hour in forecast_hours: OUTDATA["features"]['properties']["Forecast Hours"].append({ "Forecast Hour": hour, }) for i in features[0]: if 'Temperature Data' in features[0][i][3]: (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Temperature"]) = { "Temperature": features[0][i][2], } if 'Wind Direction Data' in features[0][i][3]: (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Wind Direction"]) = { "Wind Direction": features[0][i][2], } if 'Wind Speed Data' in features[0][i][3]: (OUTDATA["features"]['properties']["Forecast Hours"] [int(i/3)]["Wind Speed"]) = { "Wind Speed": features[0][i][2], } return OUTDATA
abc92e597e4d8a409f7c4d0e0b224a76b4a6cd63
5,480
def plus_one(digits): """ Given a non-empty array of digits representing a non-negative integer, plus one to the integer. :param digits: list of digits of a non-negative integer, :type digits: list[int] :return: digits of operated integer :rtype: list[int] """ result = [] carry = 1 for i in range(len(digits) - 1, -1, -1): result.append((digits[i] + carry) % 10) carry = (digits[i] + carry) // 10 if carry: result.append(1) return list(reversed(result))
a11668a1b2b9adb9165152f25bd1528d0cc2bd71
5,481
def simpleBlocking(rec_dict, blk_attr_list): """Build the blocking index data structure (dictionary) to store blocking key values (BKV) as keys and the corresponding list of record identifiers. A blocking is implemented that simply concatenates attribute values. Parameter Description: rec_dict : Dictionary that holds the record identifiers as keys and corresponding list of record values blk_attr_list : List of blocking key attributes to use This method returns a dictionary with blocking key values as its keys and list of record identifiers as its values (one list for each block). Examples: If the blocking is based on 'postcode' then: block_dict = {'2000': [rec1_id, rec2_id, rec3_id, ...], '2600': [rec4_id, rec5_id, ...], ... } while if the blocking is based on 'postcode' and 'gender' then: block_dict = {'2000f': [rec1_id, rec3_id, ...], '2000m': [rec2_id, ...], '2600f': [rec5_id, ...], '2600m': [rec4_id, ...], ... } """ block_dict = {} # The dictionary with blocks to be generated and returned print('Run simple blocking:') print(' List of blocking key attributes: '+str(blk_attr_list)) print(' Number of records to be blocked: '+str(len(rec_dict))) print('') for (rec_id, rec_values) in rec_dict.items(): rec_bkv = '' # Initialise the blocking key value for this record # Process selected blocking attributes # for attr in blk_attr_list: attr_val = rec_values[attr] rec_bkv += attr_val # Insert the blocking key value and record into blocking dictionary # if (rec_bkv in block_dict): # Block key value in block index # Only need to add the record # rec_id_list = block_dict[rec_bkv] rec_id_list.append(rec_id) else: # Block key value not in block index # Create a new block and add the record identifier # rec_id_list = [rec_id] block_dict[rec_bkv] = rec_id_list # Store the new block return block_dict
5bf9b85ad84ffa3dc11a39a876cbcfefe09a5b2c
5,482
import collections def _group_by(input_list, key_fn): """Group a list according to a key function (with a hashable range).""" result = collections.defaultdict(list) for x in input_list: result[key_fn(x)].append(x) return result
288c108588f9e4ea60c4dac6ff656c8c8ffde580
5,484
def _get_static_covariate_df(trajectories): """The (static) covariate matrix.""" raw_v_df = ( trajectories.static_covariates.reset_coords(drop=True).transpose( 'location', 'static_covariate').to_pandas()) # This can then be used with, e.g. patsy. # expanded_v_df = patsy(raw_v_df, ...patsy details...) # Optionally it can be converted back to xa using. # expanded_v_xa = xarray.DataArray(expanded_v_df) # for now... v_df = raw_v_df return v_df
15c8f367452fc5007ad93fd86e04cfea07e96982
5,485
import json def answer_cells_of_nb(a_ipynb): """ get the contents of all answer cells (having grade_id) in an a_ipynb file """ cells = {} with open(a_ipynb) as ipynb_fp: content = json.load(ipynb_fp) for cell in content["cells"]: meta = cell["metadata"] nbg = meta.get("nbgrader") if nbg is None or not nbg["solution"]: continue assert("grade_id" in nbg), (a_ipynb, cell) prob_name = nbg["grade_id"] # like a1-1-1 source = cell["source"] outputs = cell.get("outputs", []) assert(prob_name not in cells), prob_name cells[prob_name] = source, outputs return cells
3b011d48a8ccfa13d462cccf1b0a58440231a1ce
5,486
def multiply_values(dictionary: dict, num: int) -> dict: """Multiplies each value in `dictionary` by `num` Args: dictionary (dict): subject dictionary num (int): multiplier Returns: dict: mapping of keys to values multiplied by multiplier """ return ( {key: value * num for key, value in dictionary.items()} if dictionary is not None else {} )
16eb87d60da64d648113858ba5cb4308137e0a14
5,488
from typing import Any def desg_to_prefix(desg: str) -> Any: """Convert small body designation to file prefix.""" return (desg.replace('/', '').replace(' ', '') .replace('(', '_').replace(')', '_'))
badde1e3ec9c3f669c7cce8aa55646b15cc5f4c8
5,489
def field2nullable(field, **kwargs): """Return the dictionary of swagger field attributes for a nullable field. :param Field field: A marshmallow field. :rtype: dict """ attributes = {} if field.allow_none: omv = kwargs['openapi_major_version'] attributes['x-nullable' if omv < 3 else 'nullable'] = True return attributes
dd5d4cd63aeede4ef9356baa9fe9a48bd5f87841
5,490
def star_marker_level(prev, curr): """Allow markers to be on the same level as a preceding star""" return (prev.is_stars() and not curr.is_stars() and prev.depth == curr.depth)
3311c452c8f138cd8fa75b67109e75a9bf30902c
5,491
def lookup_loc_carriers(model_run): """ loc_carriers, used in system_wide balance, are linked to loc_tech_carriers e.g. `X1::power` will be linked to `X1::chp::power` and `X1::battery::power` in a comma delimited string, e.g. `X1::chp::power,X1::battery::power` """ # get the technologies associated with a certain loc_carrier lookup_loc_carriers_dict = dict(dims=["loc_carriers"]) data = [] for loc_carrier in model_run.sets["loc_carriers"]: loc_tech_carrier = list( set( i for i in model_run.sets["loc_tech_carriers_prod"] + model_run.sets["loc_tech_carriers_con"] if loc_carrier == "{0}::{2}".format(*i.split("::")) ) ) data.append(",".join(loc_tech_carrier)) lookup_loc_carriers_dict["data"] = data return lookup_loc_carriers_dict
85c20bd789e0250405dded9e0e4a56777047ef5a
5,493
def comp4(a1,a2,b1,b2): """两个区间交集,a1<a2; b1<b2""" if a2<b1 or b2<a1:#'空集' gtii = [] else: lst1 = sorted([a1,a2,b1,b2]) gtii = [lst1[1], lst1[2]] return gtii
ba4357b16ee09f78b6c09f422d27a42cd91e298e
5,495
def mapfmt_str(fmt: str, size: int) -> str: """Same as mapfmt, but works on strings instead of bytes.""" if size == 4: return fmt return fmt.replace('i', 'q').replace('f', 'd')
af51b6ac65c80eef1721b64dcd8ee6a8bb5cbc97
5,496
def get_chrom_start_end_from_string(s): """Get chrom name, int(start), int(end) from a string '{chrom}__substr__{start}_{end}' ...doctest: >>> get_chrom_start_end_from_string('chr01__substr__11838_13838') ('chr01', 11838, 13838) """ try: chrom, s_e = s.split('__substr__') start, end = s_e.split('_') return chrom, int(start), int(end) except Exception: raise ValueError("String %s must be of format '{chrom}__substr__{start}_{end}'" % s)
5dbce8eb33188c7f06665cf92de455e1c705f38b
5,498
def _get_protocol(url): """ Get the port of a url. Default port is 80. A specified port will come after the first ':' and before the next '/' """ if url.find('http://') == 0: return 'http' elif url.find('https://') == 0: return 'https' else: return 'http'
42b2750148829154f17e34a2cebccf4387f07f25
5,500
def row_to_str(row): """Convert a df row to a string for insert into SQL database.""" return str(list(row)).replace("[", "(").replace("]", ")")
fb2b0d598604a124b948f884a6839a40af1203fc
5,501
def subtract_loss_from_gain(gain_load, loss_load): """Create a single DataCollection from gains and losses.""" total_loads = [] for gain, loss in zip(gain_load, loss_load): total_load = gain - loss total_load.header.metadata['type'] = \ total_load.header.metadata['type'].replace('Gain ', '') total_loads.append(total_load) return total_loads
b53044b802a8ea13befdde850a478c435b0370ef
5,503
def cosine(u, v, dim=-1): """cosine similarity""" return (u * v).sum(dim=dim) / (u.norm(dim=dim, p=2) * v.norm(dim=dim, p=2))
2d2a5a02ce20f6ae37dbefa3c8f9399aef2da8ad
5,505
def sentence_segment(doc, candidate_pos): """Store those words only in cadidate_pos""" sentences = [] for sent in doc.sents: selected_words = [] for token in sent: # Store words only with cadidate POS tag if token.pos_ in candidate_pos and token.is_stop is False and len(token.text) > 1: selected_words.append(token.text.lower()) sentences.append(selected_words) return sentences
6c56d47470e60edddfedfeb476aa7833be765218
5,506
def print_atom_swap(swap): """Return atom swap string for DL CONTROL""" return "{} {}".format(swap["id1"], swap["id2"])
4c2fa18434e7a66b98b9716b89a26b622b588cd6
5,507
import sys import argparse def process_command_line(argv): """ Return a 2-tuple: (settings object, args list). `argv` is a list of arguments, or `None` for ``sys.argv[1:]``. """ if argv is None: argv = sys.argv[1:] # initialize the parser object, replace the description parser = argparse.ArgumentParser( description='Plot scatter plots and correlations', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'counts_files', help='A file with two columns, the first is the name of the library' ' and the second is the name of the counts file. The third column' ' is the counts of the single reads.') parser.add_argument( 'output_head', help='Name of output files prefix, two figures will be generated' ' _scatters.tif and _heatmap.tif.') parser.add_argument( '-l', '--seglen', type=int, default=100, help='Length of segment for binning, need to be the same as used to ' 'generate the summary files.') parser.add_argument( '-c', '--counts', type=int, default=5, help='Minimal number of reads to include in the plot.') settings = parser.parse_args(argv) return settings
ac80365e1b9227bc4f23021cd04cce777fd08b6d
5,508
def _check_lfs_hook(client, paths): """Pull the specified paths from external storage.""" return client.check_requires_tracking(*paths)
403b3db59f6eeec72c8f4a3b18808997b0f34724
5,509
import argparse def parse_args(): """Parse the input argument of the program""" parser = argparse.ArgumentParser( description='Process FAQ template files to create the FAQ.') parser.add_argument('-i', '--input', required=True, metavar='I', nargs='+', help='FAQ files parsed') parser.add_argument('-o', '--output', metavar='O', nargs=1, help='output file') return parser.parse_args()
c457a6fa0aed998720d5521890ac3ea263749363
5,510
import os def get_dftd3_energy(ipt): """ Grimme's D3 correction to energy """ fxyz, func, iabc = ipt sabc = ' -abc' if iabc else '' cmd = "dftd3 %s -func %s -bj%s | grep Edisp | awk '{print $NF}'"%(fxyz,func,sabc) #print(cmd) #; sys.exit(2) e = eval(os.popen(cmd).read().strip()) return e
9c0098fd619e202b867b956b56227124d10cae8a
5,512
def is_multioutput(y): """Whether the target y is multi-output (or multi-index)""" return hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1
bcdaa46c304fec50c173dffca5f1f1d5d8871a58
5,515
import logging import sys def connectOutputLogger(file=None, logger_name='output'): """ Function that connects the output logger. This is always enabled and hardwired to generate logging.INFO level messages only. @params file : Output file to store these messages into. @default None : If file is None no output file generated. """ # Create logger for logging messages logger = logging.getLogger(logger_name) # # Define and set the format of the logged messages # formatter = logging.Formatter('%(message)s') # # Create handlers here # std_output = logging.StreamHandler(sys.stdout) std_output.setFormatter(formatter) logger.addHandler(std_output) # log to a file if file !=None: file_output= logging.FileHandler(file) file_output.setFormatter(formatter) logger.addHandler(file_output) logger.setLevel(logging.INFO) return logger
ce3b0f2a9910afd1ea364177f746353559f63bd2
5,516
from typing import Mapping def filter_dict(function_or_value, dict_to_filter): """ Filter by value >>> filter_dict(123, {'a': 123, 'b': 1234}) {'b': 1234} Filter by value not applicable >>> filter_dict(123, {'a': 1234, 'b': 5123}) {'a': 1234, 'b': 5123} Embedded filter by value >>> filter_dict(123, {'a': {'c': 123}, 'b': 1234}) {'b': 1234} Embedded with extra by value >>> filter_dict(123, {'a': {'c': 123, 'd': 432}, 'b': 1234}) {'a': {'d': 432}, 'b': 1234} Embedded mixed filter >>> filter_dict(123, {'a': {'c': 123, 'd': 432}, 'b': 123, 'e': 'test'}) {'a': {'d': 432}, 'e': 'test'} Filter by callable >>> filter_dict(lambda x: x % 2 == 0, {'a': 532, 'b': 891}) {'a': 532} Filter by callable not applicable >>> filter_dict(lambda x: x % 2 == 0, {'a': 538, 'b': 8}) {'a': 538, 'b': 8} Embedded filter by callable >>> filter_dict(lambda x: bool(x), {'a': {'c': False}, 'b': 'test'}) {'b': 'test'} Embedded with extra by callable >>> filter_dict( ... lambda x: 'a' in x, {'a': {'c': 'ba', 'd': 'tt'}, 'b': 'd'}) {'a': {'c': 'ba'}} Embedded mixed filter >>> filter_dict( ... lambda x: bool(x), {'a': {'c': True, 'd': 0}, 'b': 'test', 'e': []} ... ) {'a': {'c': True}, 'b': 'test'} """ func = function_or_value if not callable(function_or_value): def new_func(value): return value != function_or_value func = new_func result = {} for key, value in dict_to_filter.items(): if isinstance(value, Mapping): value = filter_dict(func, value) if value: result[key] = value elif func(value): result[key] = value return result
6403f716c21a1cfef046174899183858837bb92e
5,517
import torch def calc_ranks(idx, label, pred_score): """Calculating triples score ranks. Args: idx ([type]): The id of the entity to be predicted. label ([type]): The id of existing triples, to calc filtered results. pred_score ([type]): The score of the triple predicted by the model. Returns: ranks: The rank of the triple to be predicted, dim [batch_size]. """ b_range = torch.arange(pred_score.size()[0]) target_pred = pred_score[b_range, idx] pred_score = torch.where(label.bool(), -torch.ones_like(pred_score) * 10000000, pred_score) pred_score[b_range, idx] = target_pred ranks = ( 1 + torch.argsort( torch.argsort(pred_score, dim=1, descending=True), dim=1, descending=False )[b_range, idx] ) return ranks
1f3d56c9a93afdd314c9a244319ef78668426481
5,518
import re def function_sql(field, mysql_result_list): """ 替换MySQL查询结果的方法 :param field: 第一个参数是yaml文件里面定义的字段 :param mysql_result_list: 第二个参数是MySQL查询结果列表 :return: """ if "{__SQL" in field: mysql_index_list = re.findall("{__SQL(.+?)}", field) # 获取索引列表 for i in mysql_index_list: mysql_value = mysql_result_list[int(i)] if type(mysql_value) != str: mysql_value = str(mysql_value) field = field.replace("{__SQL" + i + "}", mysql_value) else: pass return field # 返回替换后的字段
769881ae5e3a7caa036c977785827e219e5ab92b
5,519
from typing import Optional def prepare_error_message(message: str, error_context: Optional[str] = None) -> str: """ If `error_context` is not None prepend that to error message. """ if error_context is not None: return error_context + ": " + message else: return message
ea95d40797fcc431412990706d5c098a07986156
5,521
def run(df, docs, columns): """ converts each column to type int :param df: :param columns: :return: """ for doc in docs: doc.start("t07 - Change type of {} to int".format(str(columns).replace("'", "")), df) for column in columns: df[column] = df[column].astype(int) for doc in docs: doc.end(df) return df
5d360a764ad30a80c39d58f9aeb520d7c57f7903
5,524
from datetime import datetime def calcular_diferencia_dias(fin_dia): """ Obtiene la diferencia de dias entre una fecha y hoy """ hoy = datetime.now() end = datetime.strptime(str(fin_dia), '%Y-%m-%d') return abs(end - hoy).days
41b732f3bb09d2deca4be034273a5fed74971386
5,526
import functools def typed(*types): """Type annotation. The final type is the output type. """ if len(types) < 1: raise SyntaxError('Too few arguments: typed{}'.format(types)) if len(types) > 3: raise NotImplementedError('Too many arguments: typed{}'.format(types)) result_type = types[-1] arg_types = types[:-1] def decorator_0(fun): @functools.wraps(fun) def typed_fun(): return result_type(fun()) return typed_fun def decorator_1(fun): @functools.wraps(fun) def typed_fun(arg): arg = arg_types[0](arg) return result_type(fun(arg)) return typed_fun def decorator_2(fun): @functools.wraps(fun) def typed_fun(arg0, arg1): arg0 = arg_types[0](arg0) arg1 = arg_types[1](arg1) return result_type(fun(arg0, arg1)) return typed_fun return [decorator_0, decorator_1, decorator_2][len(arg_types)]
90f100bebd5778d36eee1ad04b7c831b003ce604
5,527
import argparse def makeParser(): """ Make a command-line argument parser. @return: An C{argparse.ArgumentParser} instance. """ parser = argparse.ArgumentParser( description=('Print a JSON object containing reference to read ' 'distances extracted from a SAM file.')) parser.add_argument( '--samFile', action='append', required=True, help='The SAM file(s) to load. May be repeated.') parser.add_argument( '--minMatchingReads', type=int, help=('The minimum number of reads that must match a reference for it ' 'to be included.')) parser.add_argument( '--scoreTag', help=('The score tag to use for the alignment score. If not given, ' '1 will be used to indicate that a read matched a reference ' '(non-matches are not included). The default is no score tag, ' 'which is not that useful. A good choice is "AS", for the ' 'alignment score, but that has to be present in the SAM file, ' 'which means that the aligner (bowtie2, bwa, etc. has to have ' 'produced such a tag.')) parser.add_argument( '--verbose', action='store_true', help='Print extra information.') return parser
d1dd832d1533eb5a506eca19a35499362b03feb7
5,528
def pairwise_list(a_list): """ list转换为成对list "s -> (s0,s1), (s1,s2), (s2, s3), ..." :param a_list: list :return: 成对list """ if len(a_list) % 2 != 0: raise Exception("pairwise_list error!") r_list = [] for i in range(0, len(a_list) - 1, 2): r_list.append([a_list[i], a_list[i + 1]]) return r_list
5142fb2e00c931ab57fc9028eb9b6df5a98c0342
5,530
from datetime import datetime def read_hotfilm_from_lvm(filename, dt=1e-3): """Reads 2-channel hotfilm data from a Labview text file.""" times = [] ch1 = [] ch2 = [] data = [line.rstrip() for line in open(filename).readlines()] line = data[0].split(',')[1:] t = [int(float(n)) for n in line[:5]] seconds = float(line[5]) useconds = int(1e6 * (seconds - int(seconds))) start_time = datetime(t[0], t[1], t[2], t[3], t[4], int(seconds), useconds) seconds = 0 for line in data: line = line.split(',')[1:] ch1.append(float(line[6])) ch2.append(float(line[7])) times.append(seconds) seconds += dt return start_time, times, ch1, ch2
e0dadac656120173e5833e6eb36498943613e8f5
5,532
import argparse import sys def get_cli_args(): """ gets command line arguments """ parser = argparse.ArgumentParser( description='Tool to increase YouTube views', add_help=False, ) # main arguments main = parser.add_argument_group( 'Main Arguments', ) main.add_argument( '--visits', type=int, default=1, help='amount of times the video will be viewed. Default: 1', ) main.add_argument( '--url', help='YouTube video url', ) main.add_argument( '--proxy', help='set the proxy server to be used. e.g: 127.0.0.1:8118', ) main.add_argument( '--enable-tor', action='store_true', help='enable TOR support (You must have installed TOR at your system)', ) # optional arguments optional = parser.add_argument_group('Optional Arguments') optional.add_argument( '-v', '--verbose', action='store_true', help='show more output', ) optional.add_argument( '-h', '--help', action='store_true', help='show this help message and exit', ) args = parser.parse_args() if len(sys.argv) == 1 or args.help: parser.print_help() sys.exit(0) if args.enable_tor is True and args.proxy is None: parser.error('--enable-tor requires --proxy') sys.exit(0) return args
3016982a76c5756c246bc81c7ca101f49b97b04b
5,534
def draft_github_comment(template, result): """ Use a template to draft a GitHub comment :template: (str) the name of the template file :result: (ContentError) the error to display in the comment """ # start with template with open(template, 'r') as f: contents = f.read() # replace variables in template with ContentError values for var in vars(result).keys(): contents = contents.replace(f'{{{{ {var} }}}}', str(getattr(result, var))) return contents
42842b7af06da8a54c0647e5aac079132e82de5a
5,536
import csv def get_zip_rate_areas(file_path): """ reads zips.csv file and returns the content as a dictionary Args: file_path: the path to zips.csv file Returns: a dictionary mapping each zip code into a set of rate areas """ zip_rate_areas = dict() with open(file_path, "r") as zip_codes_file: csv_reader = csv.DictReader(zip_codes_file, delimiter=",") for line in csv_reader: zipcode = line["zipcode"] rate_area = f"{line['state']} {line['rate_area']}" if zipcode not in zip_rate_areas: zip_rate_areas[zipcode] = set() zip_rate_areas[zipcode].add(rate_area) return zip_rate_areas
d8405bc466e7bbe949fded4360d4f184024653d2
5,537
def get_file_encoding(filename): """ Get the file encoding for the file with the given filename """ with open(filename, 'rb') as fp: # The encoding is usually specified on the second line txt = fp.read().splitlines()[1] txt = txt.decode('utf-8') if 'encoding' in txt: encoding = txt.split()[-1] else: encoding = 'utf-8' # default return str(encoding)
c91a8f71429ed5f6eccd7379c66b4ee4c2d73989
5,539
def alias(name): """Make property given by name be known under a different name""" def get(self): return getattr(self,name) def set(self,value): setattr(self,name,value) return property(get,set)
37539fbb2d413a4964fec09f9c1b40fed203dc34
5,540
from typing import Any def float_converter(value: Any) -> float: """Validator that ensures value is a float.""" if isinstance(value, bool): raise ValueError() if isinstance(value, float): return value elif isinstance(value, int): return float(value) else: raise ValueError()
c46832a017c0d83017e75fa58090a030bc5091c2
5,541
import argparse def get_args(): """ Gets the arguments from the command line. """ parser = argparse.ArgumentParser("python infer.py") required, optional = parser._action_groups required.add_argument("-i", "--input", help="Audio (.wav) file to diarize", required=True) required.add_argument("--xml", help="OpenVINO IR .xml file", required=True) required.add_argument("--bin", help="OpenVINO IR .bin file", required=True) required.add_argument("--config", help="model config file", required=True) optional.add_argument("--max-frame", help="Inference window length", default=45) optional.add_argument("--hop", help="Hop length of inference window", default=3) optional.add_argument("--plot", help="Plot the diarization result", default=True) args = parser.parse_args() return args
a4ba0e72a55c375166e9eecf58825c53bcadc861
5,542
from typing import List import logging def get_all_loggers() -> List: """Return list of all registered loggers.""" logger_dict = logging.root.manager.loggerDict # type: ignore loggers = [logging.getLogger(name) for name in logger_dict] return loggers
97059a78925ff669a841644b186e39ccd366472d
5,544
def chan_freq(header, fine_channel, tdwidth, ref_frame): """ Args: header: fine_channel: tdwidth: ref_frame: Returns: """ fftlen = header['NAXIS1'] chan_index = fine_channel - (tdwidth-fftlen)/2 chanfreq = header['FCNTR'] + (chan_index - fftlen/2)*header['DELTAF'] #/* apply doppler correction */ if ref_frame == 1: chanfreq = (1 - header['baryv']) * chanfreq return chanfreq
b7cb67d2d7b21a475fdaddee47ebbd6d112125f8
5,546
import typing def intensity(pixel: typing.Tuple[int, int, int]) -> int: """Sort by the intensity of a pixel, i.e. the sum of all the RGB values.""" return pixel[0] + pixel[1] + pixel[2]
5ba060d409a2f0df148cdc50684b80f920cf314f
5,547
import torch def apply_hardmask_with_map(input, attention, mask): """ Apply any number of attention masks over the input. input: [batch, num_objs, visual_features] = [b, o ,v] attention: [batch, num_objs, glimpses] = [b, o ,g] return: masked_input, masked_weight """ b, o, v = input.shape # remain the lower att-weights mask_map = torch.ones_like(attention) # [b, o, g] mask_val, mask_idx = attention.topk(mask, dim=1) # [b, m, g] mask_map = mask_map.scatter(1, mask_idx, 0.0) # [b, o, g] return input * mask_map # attention = attention * mask_map # [b, o, g] # input = input.unsqueeze(2) # [b, o, 1, v] # attention = attention.unsqueeze(-1) # [b, o, g, 1] # weighted = attention * input # [b, o, g, v] # weighted_mean = weighted.sum(dim=1) # [b, g, v] # return weighted_mean.view(b, -1), mask_idx
3dd40399e42a02770351cd772feb89ed7b2dd16b
5,548
def data_file_read_calltree(filename): """ Extracts the calltree of a fuzzer from a .data file. This is for C/C++ files """ read_tree = False function_call_depths = [] tmp_function_depths = { 'depth' : -2, 'function_calls' : [] } with open(filename, "r") as flog: for line in flog: line = line.replace("\n", "") if read_tree and "======" not in line: stripped_line = line.strip().split(" ") # Type: {spacing depth} {target filename} {line count} if len(stripped_line) == 3: filename = stripped_line[1] linenumber = int(stripped_line[2].replace("linenumber=","")) else: filename = "" linenumber=0 space_count = len(line) - len(line.lstrip(' ')) depth = space_count / 2 curr_node = { 'function_name' : stripped_line[0], 'functionSourceFile' : filename, 'depth' : depth, 'linenumber' : linenumber} if tmp_function_depths['depth'] != depth: if tmp_function_depths['depth'] != -2: function_call_depths += list(sorted(tmp_function_depths['function_calls'], key=lambda x: x['linenumber'])) tmp_function_depths = { 'depth' : depth, 'function_calls' : [] } tmp_function_depths['function_calls'].append(curr_node) #function_call_depths.append(curr_node) if "====================================" in line: read_tree = False if "Call tree" in line: read_tree = True # Add the remaining list of nodes to the overall list. tmp_function_depths['function_calls'] += list(sorted(tmp_function_depths['function_calls'], key=lambda x: x['linenumber'])) return function_call_depths
10d876a8aa585a767f1939434edc018e5c44404d
5,549
def get_boundary_from_response(response): """ Parses the response header and returns the boundary. :param response: response containing the header that contains the boundary :return: a binary string of the boundary """ # Read only the first value with key 'content-type' (duplicate keys are allowed) content = response.headers.pop('content-type')[0] # Find the start and end index of the boundary b_start = content.find(b'boundary=') b_end = content[b_start:].find(b';') # Separate out boundary if b_end == -1: # If the end point is not found, just go to the end of the content string boundary = content[b_start+9:] else: boundary = content[b_start+9:b_start+b_end] return boundary
66a0112598b2210cca1a2210f6af963dfee641f7
5,553
import logging import json def get_message(message): """{ 'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L, }""" if not message: return logging.info('MSG: %s', message) data = message.get('data', {}) return json.loads(data)
2e79ed94fbfc3fba122e8bd8663e33b124d4d2b6
5,554
def getPairCategory(rollSorted): """ Converts a roll's ordered list of frequencies to the pairwise hand category. """ if rollSorted[0] == 6: return "six-of-a-kind" elif rollSorted[0] == 5: return "five-of-a-kind" elif rollSorted[0] == 4 and rollSorted[1] == 2: return "four-two full house" elif rollSorted[0] == 4: return "four-of-a-kind" elif rollSorted[0] == 3 and rollSorted[1] == 3: return "double threes-of-a-kind" elif rollSorted[0] == 3 and rollSorted[1] == 2: return "three-two full house" elif rollSorted[0] == 3: return "three-of-a-kind" elif rollSorted[0] == 2 and rollSorted[1] == 2 \ and rollSorted[2] == 2: return "three pairs" elif rollSorted[0] == 2 and rollSorted[1] == 2: return "two pairs" elif rollSorted[0] == 2: return "one pair" else: return "high card"
1c48abd8d0c1a27a50ce587857852a95e8949e74
5,555
def uint_to_little_endian_bytearray(number, size): """Converts an unsigned interger to a little endian bytearray. Arguments: number -- the number to convert size -- the length of the target bytearray """ if number > (2 ** (8 * size) - 1): raise ValueError("Integer overflow") nle = [0] * size for i in range(size): nle[i] = number >> i*8 & 0xFF return nle
bd3314fedf0accbc0d15b1bb146f54f52cb3bce1
5,557
import re def to_alu_hlu_map(input_str): """Converter for alu hlu map Convert following input into a alu -> hlu map: Sample input: ``` HLU Number ALU Number ---------- ---------- 0 12 1 23 ``` ALU stands for array LUN number hlu stands for host LUN number :param input_str: raw input from naviseccli :return: alu -> hlu map """ ret = {} if input_str is not None: pattern = re.compile(r'(\d+)\s*(\d+)') for line in input_str.split('\n'): line = line.strip() if len(line) == 0: continue matched = re.search(pattern, line) if matched is None or len(matched.groups()) < 2: continue else: hlu = matched.group(1) alu = matched.group(2) ret[int(alu)] = int(hlu) return ret
8e211b7efa3f8dd23c042f046d881daf987062bc
5,558
import subprocess def run_command(command): """ Run command """ try: res = subprocess.run( command, capture_output=True, text=True, check=True, shell=True, ) out = res.stdout except subprocess.CalledProcessError as exc: out = exc.output, exc.stderr, exc.returncode return out
1807e42893c7f6ba6bdc52849bb69221a9d4be89
5,559
def merge_scores(scores_test, scores_val): """ Aggregate scores """ scores_valtest = {} for key in scores_test: key_valtest = "final/" + key.split("/")[1] if key.startswith("test/"): keyval = "val/" + key.split("/")[1] value = 0.5 * (scores_test[key]["value"] + scores_val[keyval]["value"]) if scores_test[key]["string"].endswith("%"): value_str = f"{value:05.2%}" else: value_str = f"{value:.6}" stats = {"value": value, "string": value_str} scores_valtest[key_valtest] = stats else: scores_valtest[key_valtest] = scores_test[key] return scores_valtest
be0dded69367e7554c0cc2632946d46954a3cc15
5,560
def normalize(df): """Pandas df normalisation Parameters: df (pd df) : input df Returns: result (pd df) : output df """ result = df.copy() for feature_name in df.columns: max_value = df[feature_name].max() min_value = df[feature_name].min() result[feature_name] = 2 * (df[feature_name] - min_value) / (max_value - min_value) - 1 result = result.fillna(0) return result
2fc05fc9ef7642ac4b84cb6ed567ec64c1da0836
5,561
import os def copy_decompress(source_fn): """Generate bash code to copy/decompress file to $TMPDIR. """ new_fn = os.path.basename(source_fn) if source_fn.lower().endswith(".gz"): new_fn = new_fn.strip(".gz") cmd = "gunzip -C {source_fn} > $TMPDIR/{new_fn}" elif source_fn.lower().endswith(".bz2"): new_fn = new_fn.strip(".bz2") cmd = "bzip2 -dc {source_fn} > $TMPDIR/{new_fn}" elif source_fn.lower().endswith(".dsrc"): new_fn = new_fn.strip(".dsrc") cmd = "dsrc d {source_fn} $TMPDIR/{new_fn}" else: cmd = "cp {source_fn} $TMPDIR/{new_fn}" cmd = "\n".join([cmd, "cd $TMPDIR"]) return cmd.format(source_fn=source_fn, new_fn=new_fn), new_fn
7b65e66f240e2e86b70c963e7f0d90381656210e
5,563
def _default_value(argument, default): """Returns ``default`` if ``argument`` is ``None``""" if argument is None: return default else: return argument
52eed8ddaf3c52adba69044cc462fc11279670c5
5,564
def is_constant_type(expression_type): """Returns True if expression_type is inhabited by a single value.""" return (expression_type.integer.modulus == "infinity" or expression_type.boolean.HasField("value") or expression_type.enumeration.HasField("value"))
66a3237971299df3c7370f039d87a8b5f4ae2be5
5,565
import torch def attention_mask_creator(input_ids): """Provide the attention mask list of lists: 0 only for [PAD] tokens (index 0) Returns torch tensor""" attention_masks = [] for sent in input_ids: segments_ids = [int(t > 0) for t in sent] attention_masks.append(segments_ids) return torch.tensor(attention_masks)
06a5880069cdc88ea33fe987bf4ac77aceef13eb
5,567
def nonrigid_rotations(spc_mod_dct_i): """ Determine if the rotational partition function for a certain species should be calculated according to some non-rigid model. This determination solely relies on whether has specified the use of a non-rigid model for the species. :param spc_mod_dct_i: species partition function models :type spc_mod_dct_i: dict[str: str] :rtype: bool """ rot_model = spc_mod_dct_i['rot']['mod'] return bool(rot_model == 'vpt2')
5ef94d4dc1b267ffab6bb654d58aae592b69d367
5,569
import numpy def normalize_const(v): """ Normalize a numpy array of floats or doubles. """ return v / numpy.linalg.norm(v)
927ad9d2d94735263ac10a445f4f7fe4b3150c95
5,570
import json def json_response(data): """this function is used for ajax def route(request): return json_response(t.json()) """ header = 'HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n' body = json.dumps(data, ensure_ascii=False, indent=8) r = header + '\r\n' + body return r.encode(encoding='utf-8')
8678a8f62fab10d9120d354889387b6d70cddea9
5,573
def check_strand(strand): """ Check the strand format. Return error message if the format is not as expected. """ if (strand != '-' and strand != '+'): return "Strand is not in the expected format (+ or -)"
9c2e720069ad8dcc8f867a37925f6e27e91dcb3f
5,575
def _copy_df(df): """ Copy a DataFrame """ return df.copy() if df is not None else None
263bf1cf9cbdae371ea3e4685b4638e8a5714d7f
5,576
def autosolve(equation): """ Automatically solve an easy maths problem. :type equation: string :param equation: The equation to calculate. >>> autosolve("300 + 600") 900 """ try: # Try to set a variable to an integer num1 = int(equation.split(" ")[0]) except ValueError: # Try to set a variable to a decimal num1 = float(equation.split(" ")[0]) try: # Try to set a variable to an integer num2 = int(equation.split(" ")[2]) except ValueError: # Try to set a variable to a decimal num2 = float(equation.split(" ")[2]) # If the lowercase version of the operator is '+', 'plus' or 'add' if equation.split(" ")[1].lower() in ["+", "plus", "add"]: # Return the answer return num1 + num2 # If the lowercase version of the operator is '-', 'minus' or 'subtract' elif equation.split(" ")[1].lower() in ["-", "minus", "subtract"]: # Return the answer return num1 - num2 # If the lowercase version of the operator is '*', 'times', 'multiply' elif equation.split(" ")[1].lower() in ["*", "times", "multiply"]: # Return the answer return num1 * num2 # If the lowercase version of the operator is '/', 'divide' or 'quotient' elif equation.split(" ")[1].lower() in ["/", "divide", "quotient"]: # Return the answer return num1 / num2 # If the lowercase version of the operator is '%, 'remainder' or 'rem' elif equation.split(" ")[1].lower() in ["%", "remainder", "rem"]: # Return the answer return num1 % num2 # Raise a warning raise ValueError("Invalid operation provided.")
a4db1dedffdccc44d7747c4743f4f2eaf8dbd81a
5,577
def api_url(service: str = "IPublishedFileService", function: str = "QueryFiles", version: str = "v1") -> str: """ Builds a steam web API url. :param service: The steam service to attach to. :param function: The function to call. :param version: The API version. :return: The built URL. """ return "https://api.steampowered.com/%s/%s/%s/" % ( service, function, version )
2538ab8c8035c491611585089ddd3a1625e423cc
5,578
import os def project_dir(project_name=None): """ 获取当前项目根路径 :param project_name: :return: 根路径 """ PROJECT_NAME = 'stock-technical-analysis' if project_name is None else project_name project_path = os.path.abspath(os.path.dirname(__file__)) root_path = project_path[:project_path.find("{}\\".format(PROJECT_NAME)) + len("{}\\".format(PROJECT_NAME))] return root_path
16a304e6c6fa068380e8908569b1f03f2ab3fb68
5,580
def cleaned_reviews_dataframe(reviews_df): """ Remove newline "\n" from titles and descriptions, as well as the "Unnamed: 0" column generated when loading DataFrame from CSV. This is the only cleaning required prior to NLP preprocessing. INPUT: Pandas DataFrame with 'title' and 'desc' column names OUTPUT: Cleaned DataFrame with combined 'title_desc' column """ reviews_df['title'] = reviews_df['title'].str.replace('\n', '') reviews_df['desc'] = reviews_df['desc'].str.replace('\n','') reviews_df['title_desc'] = reviews_df['title'] + reviews_df['desc'] if 'Unnamed: 0' in set(reviews_df.columns): reviews_df = reviews_df.drop('Unnamed: 0', axis=1) return reviews_df
8f805f556667f5d734d4d272a2194784d37ce99c
5,581
def not_list(l): """Return the element wise negation of a list of booleans""" assert all([isinstance(it, bool) for it in l]) return [not it for it in l]
6d30f5dd587cdc69dc3db94abae92a7a8a7c610d
5,582
import os import codecs import json import traceback def read_json(filename): """ JSONファイル読み込む """ try: basedir = os.path.dirname(os.path.abspath(__file__)) indir = os.path.join(basedir, "data") readfilename = os.path.join(indir, filename) with codecs.open(readfilename, 'r', "utf-8") as f: jsonData = json.loads(f.read()) except IOError as e: traceback.print_exc() raise Exception(str(e)) return jsonData
471f4a56abbae11eef262e6676294b5ec50c9198
5,583
def highlight_threshold(image, img_data, threshold, color=(255, 0, 0)): """ Given an array of values for an image, highlights pixels whose value is greater than the given threshold. :param image: The image to highlight :param img_data: The values to use :param threshold: The threshold above which pixels should the highlighted :param color: The color to highlight pixels with :return: The image, with high-value pixels highlighted """ out_pixels = list(image) for i in range(len(image)): p, e = image[i], img_data[i] if e > threshold: out_pixels[i] = color return out_pixels
bc4b0c9f44f7d45b947c9913f6b6f43b73ea542b
5,584
import numpy def error_norm(q_numerical, q_exact, dx, p=2): """ Compute the discrete error in q in the p norm Parameters ---------- q_numerical : numpy vector The numerical solution, an array size (N,) or (N,1) q_exact : numpy vector The exact solution, whose size matches q_numerical dx : float The relevant grid spacing p : int or 'inf', optional The norm. The default is 2. Returns ------- error_value : float (dx * sum((q_n - q_e)**p))**(1/p) """ if p == 'inf': error_value = numpy.max(numpy.abs(q_numerical - q_exact)) else: error_value = (dx * numpy.sum(numpy.abs(q_numerical - q_exact)**p))**(1/p) return error_value
e4d33583ee2c5308a2eda9755c44961acba2603d
5,585
import numpy def summarize_list(values): """ Takes a list of integers such as [1,2,3,4,6,7,8] and summarises it as a string "1-4,6-8" :param values: :return: string """ sorted_values = numpy.array(sorted(values)) summaries = [ (f'{chunk[0]}-{chunk[-1]}' if len(chunk) > 1 else f'{chunk[0]}') for chunk in numpy.split(sorted_values, numpy.where(numpy.diff(sorted_values) > 1)[0] + 1) if len(chunk) ] return ','.join(summaries)
ea6e3501fb3340e0a78a71096129df5b3400fac9
5,586
def line_edit_style_factory(txt_color='white', tgt_layer_color='white', bg_color='#232323'): """Generates a string of a qss style sheet for a line edit. Colors can be supplied as strings of color name or hex value. If a color arg receives a tuple we assume it is either an rgb or rgba tuple. :param txt_color: Color the text of the line edit should be. :param tgt_layer_color: The color of the current target layer. :param bg_color: The color that will fill the background of the line eidit. :return: string of qss """ def handle_rgb(color_tuple): """Assumes the tuple is rgba or rgb (len 4 or 3)""" val = ','.join([str(i) for i in color_tuple]) if len(color_tuple) == 4: rgb = 'rgba({})'.format(val) else: rgb = 'rgb({})'.format(val) return rgb if isinstance(bg_color, tuple): bg_color = handle_rgb(bg_color) style = ''' QTextEdit, QLineEdit { border-radius: 11px; border: 1px solid transparent; background-color: %s; color: %s } QTextEdit:hover, QLineEdit:hover { border: 1px solid %s } QTextEdit:focus, QLineEdit:focus { border: 2px solid %s } ''' % (bg_color, txt_color, tgt_layer_color, tgt_layer_color) return style
10670afc32ec1c19d09dd72fc0e23bb1583ba3af
5,587
import stat import os def isdir(path): """Like os.path.isdir, but raises an exception on error.""" return bool(stat.S_ISDIR(os.stat(path).st_mode))
e68179caf5da3453f29ff8796702f494879dcca9
5,588
def CallCountsToMockFunctions(mock_function): """A decorator that passes a call count to the function it decorates. Examples: @CallCountsToMockFunctions def foo(call_count): return call_count ... ... [foo(), foo(), foo()] [0, 1, 2] """ counter = [0] def Result(*args, **kwargs): # For some values of `counter`, the mock function would simulate raising # an exception, so let the test case catch the exception via # `unittest.TestCase.assertRaises()` and to also handle recursive functions. prev_counter = counter[0] counter[0] += 1 ret_value = mock_function(prev_counter, *args, **kwargs) return ret_value return Result
cc621cabdf87ff554bb02c25282e99fadcaaa833
5,589
async def ping(): """ .ping: respond with pong """ return "pong"
988165efb5087fd838a2930dbe4ed540b2d70037
5,590
def convert_binary_to_unicode(binary_input): """ converts binary string of length 18 input to unicode :param binary_input: String :return: String """ unicode_output = '' for starting_position in range(0, len(binary_input), 18): unicode_output += chr(int(binary_input[starting_position:starting_position + 18], 2)) return unicode_output
ae00c8b31779420662dca09e1ca6c23590b45e38
5,591
def merge_sort(array): """ Merge Sort Complexity: O(NlogN) """ if len(array) > 1: mid = len(array) // 2 left = array[:mid] right = array[mid:] left = merge_sort(left) right = merge_sort(right) array = [] # This is a queue implementation. We can also use # a deque but slicing it needs the itertools slice # function which I didn't want to use. More on that # in the stacks and queues chapter. l1 = l2 = 0 while len(left) > l1 and len(right) > l2: if left[l1] < right[l2]: array.append(left[l1]) l1 += 1 else: array.append(right[l2]) l2 += 1 while len(left) > l1: array.append(left[l1]) l1 += 1 while len(right) > l2: array.append(right[l2]) l2 += 1 return array
73b3ac5b950f5788cbc3e7c98d2a4d5aac427929
5,593
def ErrorCorrect(val,fEC): """ Calculates the error correction parameter \lambda_{EC}. Typical val is 1.16. Defined in Sec. IV of [1]. Parameters ---------- val : float Error correction factor. fEC : float Error correction efficiency. Returns ------- float Error correction parameter. """ return val * fEC
83c4483c56c7c3b79060dd070ec68f6dfd5ee749
5,595
def indented_kv(key: str, value: str, indent=1, separator="=", suffix=""): """Print something as a key-value pair whilst properly indenting. This is useful for implementations of`str` and `repr`. Args: key (str): Key. value (str): Value. indent (int, optional): Number of spaces to indent. Defaults to 1. separator (str, optional): Separator between the key and value. Defaults to "=". suffix (str, optional): Extra to print at the end. You can set this, e.g., to ",\n" or ">". Defaults to no suffix. Returns str: Key-value representation with proper indentation. """ key_string = f"{indent * ' '}{key}{separator}" value_string = value.strip().replace("\n", "\n" + " " * len(key_string)) return key_string + value_string + suffix
b27a7ed7a0db4219332fda1e1131c888216141b2
5,596
def are_in_file(file_path, strs_to_find): """Returns true if every string in the given strs_to_find array is found in at least one line in the given file. In particular, returns true if strs_to_find is empty. Note that the strs_to_find parameter is mutated.""" infile = open(file_path) for line in infile: if len(strs_to_find) == 0: return True index = 0 while index < len(strs_to_find): if strs_to_find[index] in line: del strs_to_find[index] else: index = index + 1 return len(strs_to_find) == 0
474234a35bf885c5f659f32a25c23580f2014cc2
5,597
def range_(stop): """:yaql:range Returns an iterator over values from 0 up to stop, not including stop, i.e. [0, stop). :signature: range(stop) :arg stop: right bound for generated list numbers :argType stop: integer :returnType: iterator .. code:: yaql> range(3) [0, 1, 2] """ return iter(range(stop))
28717348bcdcd432388b8a4809c897c70a2fce3f
5,599
def get_libdcgm_path(): """ Returns relative path to libdcgm.so.2 """ return "../../lib/libdcgm.so.2"
a1067449bdc9012e07c5707ece68c3aae2799694
5,602
import re, fileinput def readConfig(filename): """Parses a moosicd configuration file and returns the data within. The "filename" argument specifies the name of the file from which to read the configuration. This function returns a list of 2-tuples which associate regular expression objects to the commands that will be used to play files whose names are matched by the regexps. """ config = [] expecting_regex = True regex = None command = None for line in fileinput.input(filename): # skip empty lines if re.search(r'^\s*$', line): continue # skip lines that begin with a '#' character if re.search('^#', line): continue # chomp off trailing newline if line[-1] == '\n': line = line[:-1] # the first line in each pair is interpreted as a regular expression # note that case is ignored. it would be nice if there was an easy way # for the user to choose whether or not case should be ignored. if expecting_regex: regex = re.compile(line) expecting_regex = False # the second line in each pair is interpreted as a command else: command = line.split() config.append((regex, command)) expecting_regex = True return config
3b641686b8e6cfaebec668367a12e32bc59104a8
5,603
def text_box_end_pos(pos, text_box, border=0): """ Calculates end pos for a text box for cv2 images. :param pos: Position of text (same as for cv2 image) :param text_box: Size of text (same as for cv2 image) :param border: Outside padding of textbox :return box_end_pos: End xy coordinates for text box (end_point for cv2.rectangel()) """ box_x, box_y = pos text_w, text_h = text_box box_end_pos = (box_x + text_w + border, box_y + text_h + border) return box_end_pos
5bd2b46fe3456ccdef1407b90256edeb310d92bc
5,604
from typing import List from typing import Tuple def cnf_rep_to_text(cnf_rep: List[List[Tuple[str, bool]]]) -> str: """ Converts a CNF representation to a text. :param cnf_rep: The CNF representation to convert. :return: The text representation of the CNF. """ lines = [] for sentence in cnf_rep: sentence_str = '' first_in_clause = True for atom in sentence: if first_in_clause: first_in_clause = False else: sentence_str += ' ' if atom[1]: sentence_str += atom[0] else: sentence_str += '!' + atom[0] lines.append(sentence_str) return '\n'.join(lines)
dec3754493cfb0bd9fb5e68d2bab92a40bd0f294
5,605
def reshape_for_linear(images): """Reshape the images for the linear model Our linear model requires that the images be reshaped as a 1D tensor """ n_images, n_rgb, img_height, img_width = images.shape return images.reshape(n_images, n_rgb * img_height * img_width)
dffc5e7d0f96c4494443a7480be081b8fe6b4abd
5,606
from json import load import logging def read_drive_properties(path_name): """ Reads drive properties from json formatted file. Takes (str) path_name as argument. Returns (dict) with (bool) status, (str) msg, (dict) conf """ try: with open(path_name) as json_file: conf = load(json_file) return {"status": True, "msg": f"Read from file: {path_name}", "conf": conf} except (IOError, ValueError, EOFError, TypeError) as error: logging.error(str(error)) return {"status": False, "msg": str(error)} except: logging.error("Could not read file: %s", path_name) return {"status": False, "msg": f"Could not read file: {path_name}"}
18b9051801b032f5aa5532da0cfcca8793be8c91
5,608