content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def is_mc_multiplier(multiplier, modulus): """ Checks if multiplier is a MC multiplier w.r.t. modulus. :param multiplier: an integer in (0, modulus). :param modulus: a prime number. :return: True if multiplier is a MC multiplier w.r.t. modulus. """ return (modulus % multiplier) < math.floor(modulus / multiplier)
6d210f8de081ae0a468692b2f93e5145170917e9
702,760
def readToTuple(f_path): """Reads in a two-col file (tab-delim) and returns a list of tuples""" f = open(f_path) ls = [] for l in f: if l.startswith("#"): continue ls.append(tuple(l.strip().split("\t"))) return ls
726d1b4a4682c4e11afbf59e342340e0cf5ccc63
702,761
import string from typing import Counter def singlebyte_xor_cipher(hex): """ Takes a hex string and finds the best xor key and returns (ResultString, Confidence) """ common = ['n', 'i', 'o', 't', 'e', ' '] ret = None score = 0 key=0 if not isinstance(hex, bytearray): hex = hex.decode('hex') hex = bytearray(hex) for i in range(0, 255): data = bytearray() myscore = 1 for let in hex: data.append(let^i) datastr = str(data) #Check if all characters are printable if not all(c in string.printable for c in datastr): continue counts = Counter(datastr) for let, count in counts.most_common()[:3]: if let in common: myscore += 1*common.index(let) if myscore > score: score = myscore ret = datastr key = i return (ret, score, key)
1611372d05bfa7753d77cc389e58420a2efb0c99
702,762
def df_to_dict_single(df, curation_id=None): """ Purpose: Convert a single entry pandas DataFrame into a dictionary and strip out indexing information :param df: pandas DataFrame with single entry (e.g., use df.loc[] to filter) :param curation_id: integer providing the curation_id. Default: Uses most recent :return df_dict: dict that contains single entry pandas DF """ df_dict = df.reset_index().to_dict(orient='records') if isinstance(curation_id, type(None)): # Uses most recent (reverse ordered) df_dict0 = df_dict[0] else: # Use specified curation_id df_dict0 = [sub_dict for sub_dict in df_dict if sub_dict['id'] == curation_id][0] return df_dict0
5a63e4cb56f8492adfae48cfd0c54f0fa6f6e104
702,763
import re def getSisters(tree, t="g"): """Some nasty regex to get pairs of sister taxa (only at terminal branches)""" if t == "s": l = re.findall("\(([1-9][0-9]|\d),([1-9][0-9]|\d)\)", tree) else: l = re.findall( "\(([1-9][0-9]|\d):\d\.\d\d\d,([1-9][0-9]|\d):\d\.\d\d\d\)", tree ) return l
b5c3d26b406847bfd0858181bd3becb90fccb878
702,764
def get_train_feed_dict(model, reviews, win_reviews, batch_length, ote_labels, ts_labels, opn_labels, stm_lm_labels, lr, dropout_rate=1.0, train_flag=True): """Construct feed dictionary.""" feed_dict = dict() feed_dict.update({model.reviews: reviews}) feed_dict.update({model.win_reviews: win_reviews}) feed_dict.update({model.batch_length: batch_length}) feed_dict.update({model.asp_labels: ote_labels}) feed_dict.update({model.ts_labels: ts_labels}) feed_dict.update({model.opn_labels: opn_labels}) feed_dict.update({model.stm_lm_labels: stm_lm_labels}) # feed_dict.update({model.lmf_labels: lmf_labels}) # feed_dict.update({model.lmb_labels: lmb_labels}) feed_dict.update({model.lr: lr}) feed_dict.update({model.dropout_rate: dropout_rate}) feed_dict.update({model.train_flag: train_flag}) return feed_dict
d1abf917a6fc0db66102aebda61ce431a508988f
702,765
def increment_ctr(ctr): """ Increments one the counter. Parameters ---------- ctr : string Counter Returns ------- incremented_counter : string Incremented Counter """ ctr_inc_int = int.from_bytes(bytes.fromhex(ctr), byteorder="big") + 1 return bytes.hex(ctr_inc_int.to_bytes(length=16, byteorder="big"))
0ef04e10283f02b6b7df46cf196493a3ad4a95c8
702,766
def get_base_required_fields_uframe(): """ Get required fields for base asset in uframe. """ base_required_fields = [ 'assetId', 'assetType', '@class', 'dataSource' 'deliveryDate', 'deliveryOrderNumber', 'depthRating', 'description', 'events', 'firmwareVersion', 'institutionPropertyNumber', 'institutionPurchaseOrderNumber', 'location', 'manufacturer', 'mobile', 'modelNumber', 'name', 'notes', 'owner', 'ooiPropertyNumber', 'ooiPartNumber', 'ooiSerialNumber', 'serialNumber', 'remoteResources', 'softwareVersion', 'physicalInfo', 'powerRequirements', 'purchaseDate', 'purchasePrice', 'shelfLifeExpirationDate', 'uid' ] return base_required_fields
233e9c8985b82335d32765daab3cc29ae5959354
702,769
def get_comparative_forms(tokens): """Identify, color and count comparatives and superlatives""" # find comp. forms of adjectives comparatives = [t for t in tokens if t.full_pos in ['ADJA', 'ADJD'] and t.mo.comp == 'Comp'] superlatives = [t for t in tokens if t.full_pos in ['ADJA', 'ADJD'] and t.mo.comp == 'Sup' ] # color for t in comparatives: t.adj_color.append('Comparative adjectives') for t in superlatives: t.adj_color.append('Superlative adjectives') # count return len(comparatives), len(superlatives), len(comparatives)+len(superlatives)
020ce0f3a4959a2525698bfac4c45ac497e5cc67
702,770
def XXX(self, s): """ :type s: str :rtype: int """ left, right = 0, 0 while right < len(s): right += 1 if len(set(s[left:right])) != right-left: left += 1 return right-left
26e08dc0b82985fef73b0b13669c74f3d9f9acef
702,771
def himmelblauConstraintOne(solution): """First restriction Args: solution (Solution): Candidate solution Returns: bool: True if it meets the constraint, False otherwise """ return (26 - (solution[0] - 5) ** 2 - solution[1] ** 2) >= 0
f5878d2573559b78fee3b434d537a380abb5e2c8
702,772
from unittest.mock import patch def patch_try_disk(return_value): """ Mocks the InsightsUploadConf.try_disk method so it returns the given parsed file contents. """ def decorator(old_function): patcher = patch("insights.client.collection_rules.InsightsUploadConf.try_disk", return_value=return_value) return patcher(old_function) return decorator
95ed09006e0a17bc58e5f320f1ae5dfb203300dd
702,773
import re def lyric_wikia_capitalize(string, noupper = True): """ lyrics.wikia.com page name rules: - Uppercase All Words - No all-uppercase WORDS allowed in song titles (but in artist names) - Keep StrANgeLy cased words See http://lyrics.wikia.com/wiki/LyricWiki:Page_Names """ pattern = re.compile("([\w]+)", re.UNICODE) parts = pattern.split(string) result = u"" for part in parts: if not pattern.match(part): #no word, keep as it is result += part continue if noupper and part.isupper(): #everything uppercase? no! part = part.lower() result += part[0].upper() + part[1:] return result
b3d5aefff0715a7ff1e7298b9bdb492bd461c217
702,774
def create_ip_list(addr0, n_addrs): """Creates list of IP multicast subscription addresses. Args: addr0 (str): first IP address in the list. n_addrs (int): number of consecutive IP addresses for subscription. Returns: addr_list (list): list of IP addresses for subscription. """ prefix, suffix0 = addr0.rsplit('.', 1) addr_list = [addr0] for i in range(1, n_addrs): addr_list.append(prefix + '.{}'.format(i + int(suffix0))) return addr_list
e29b5c4b9f9ec0dc46916977e4a54bb77a9e74a6
702,775
import decimal def decimal_to_num(obj): """ Helper function to convert all decimal valued inputs to the real representation of the value (int or float.) This function is recursive. Parameters: obj (obj): An object to parse for decimals. Returns: obj: The passed in object with any transformations made. """ if isinstance(obj, list): for item in range(len(obj)): # pylint: disable=consider-using-enumerate obj[item] = decimal_to_num(obj[item]) elif isinstance(obj, dict): for key, value in obj.items(): obj[key] = decimal_to_num(value) elif isinstance(obj, decimal.Decimal): if obj % 1 == 0: obj = int(obj) else: obj = float(obj) return obj
395bb2c1c03d2c41e552b405df4bceb4172fe7aa
702,777
def count_accuracy_raw(pred_corpus, target_corpus): """ Test accuracy, Raw accuracy """ count_accu = 0 total = 0 pred_sents = pred_corpus.split('.') target_sents = target_corpus.split('.') for pred_sent, target_sent in zip(pred_sents, target_sents): pred_list = pred_sent.split(' ') targ_list = target_sent.split(' ') for pred_token, target_token in zip(pred_list, targ_list): total += 1 if pred_token == target_token: count_accu += 1 return count_accu, total
6935fd0bba49d0529be382c3c78f937618f28647
702,778
def get_callers_info(callers_file, callers_package, callers, res_dir): """ Tries to create a map between the top callers and its associated file, using a best effort approach, by parsing the package declaration, in case this is need. If a file matches to a caller in the dataset, it retrieves any required information for the caller, and stores it to a dictionary. This includes the package,class,method and file names. :type callers: list :param callers: a list containing the top callers of each cluster :type res_dir: string :param res_dir: the current session's results directory :return a dictionary which is the map between top callers and their associated files, containing any required information """ #xml_dir = os.path.join(res_dir, 'xmlFiles') callers_info = {} for caller_id in range(len(callers)): caller = callers[int(caller_id)] package_class, method = caller.rsplit('.', 1) if callers_package[caller_id] != '': classname = package_class.rsplit(callers_package[caller_id] + '.', 1)[1] else: classname = package_class caller_info = {'id': caller_id, 'package': callers_package[caller_id], 'classname': classname, 'method': method, 'filename': callers_file[caller_id]} #get_caller_info(caller_info, xml_dir) callers_info[caller] = caller_info return callers_info
37fa8fcc47ba455e6bc47243f8334f2ab26e3962
702,779
def isNumber(n): """retorna true si 'n' es un numero""" return all(n[i] in "0123456789" for i in range(len(n)))
40541c759357fe2706fb453947e55dabab513040
702,780
def file_content_to_list(file): """ Append each line of the file to a theèlist :param file: The file to transform into a theèlist :return: The the list """ lst = [] with open(file) as file_alias: for line in file_alias: lst.append(line) return lst
cee015f6e7121fc513c8944c61cfd225ee0dcf12
702,781
def add_slash(text: str): """returns the same text with slash at the end""" return text + '/'
a87c204dfc163f5ee814fbda92ad7a8368346893
702,782
import re def __detect_str_type(data) -> str: """ :column_type str :rtype str """ r = re.search("[^=]+=[^&]*&*", data) # application/x-www-form-urlencoded pattern if r: return "application/x-www-form-urlencoded" else: return "plain/text"
6dda59aa570070538b54738b83fc69ba129637f0
702,783
def diffusion_coeff(t, sigma): """Compute the diffusion coefficient of our SDE. Args: t: A vector of time steps. sigma: The $\sigma$ in our SDE. Returns: The vector of diffusion coefficients. """ return sigma**t
e0b1e1c76f7773a85562adb327c18863c715917a
702,785
from typing import OrderedDict def get_form_errors(form): """ Django form errors do not obey natural field order, this template tag returns non-field and field-specific errors :param form: the form instance """ return { 'non_field': form.non_field_errors(), 'field_specific': OrderedDict( (field, form.errors[field.name]) for field in form if field.name in form.errors ) }
056597492d24dc406c9d952f5cb56c14d0a75fff
702,786
import torch def log_sum_exp(tensor, dim=-1): """ Safe log-sum-exp operation """ return torch.logsumexp(tensor, dim)
5b6154be4c12576941f7e8d97a26296149982e1b
702,787
from typing import List from typing import Dict def get_vrf_group(files_list: List[str]) -> Dict[str, List[str]]: """ Group files by VRF name. """ groups = {} for filename_path in files_list: filename_path = filename_path.replace("\\", "/") # print(filename_path) if "show" in filename_path \ or "display" in filename_path: continue """ Example) "switch-001_192.168.252.201_20191028-142654_vrf_10.log" vrf_name = "_vrf_10.log" """ pos1 = filename_path.rfind("_") pos2 = filename_path[:pos1].rfind("_") vrf_name = filename_path[pos2:] # Group by vrf_name and store a list of file names in the list. # print(vrf_name) if not vrf_name in groups.keys(): groups[vrf_name] = [] groups[vrf_name].append(filename_path) return groups
348f1c10f4bd054ca2f45f36b5420a971b52e4cf
702,788
def scr_total( bscr, scr_op ): """ This function simply adds the SCR_Op to the BSCR """ return bscr + scr_op
7d1711f75abae59b79cf62f6e64daeb7e4c556eb
702,789
import re import six def load_tff_dat(fname, processor=None): """Read a tff.dat or dff.dat files generated by tff command Parameters ---------- fname : file or str File, or filename processor: callable or None A final output processor, by default a tuple of tuples is returned Returns ------- Whathever the processor return or a tuple of tuples """ processor = processor or tuple def gen(fp): buff = [] for line in fp: line = re.sub(r"\*{2,}", "nan", line) # remove al ****... # new source? if buff and not line.startswith(" "): yield tuple(buff) buff = [] buff.extend(line.strip().split()) if buff: yield tuple(buff) if isinstance(fname, six.string_types): with open(fname) as fp: generator = gen(fp) return processor(generator) generator = gen(fname) return processor(generator)
55f9ba3915c2d31cb83b8ea26de996f8f29e5e43
702,790
from typing import List def is_luhn(string: str) -> bool: """ Perform Luhn validation on input string Algorithm: * Double every other digit starting from 2nd last digit. * Subtract 9 if number is greater than 9. * Sum the numbers * >>> test_cases = [79927398710, 79927398711, 79927398712, 79927398713, ... 79927398714, 79927398715, 79927398716, 79927398717, 79927398718, ... 79927398719] >>> test_cases = list(map(str, test_cases)) >>> list(map(is_luhn, test_cases)) [False, False, False, True, False, False, False, False, False, False] """ check_digit: int _vector: List[str] = list(string) __vector, check_digit = _vector[:-1], int(_vector[-1]) vector: List[int] = [*map(int, __vector)] vector.reverse() for idx, i in enumerate(vector): if idx & 1 == 0: doubled: int = vector[idx] * 2 if doubled > 9: doubled -= 9 check_digit += doubled else: check_digit += i if (check_digit) % 10 == 0: return True return False
92253489a18efc902198d5eb3fb93a06a74a3246
702,791
import sqlite3 def handle_artist(command): """ Process the artist command """ conn = sqlite3.connect('myjazzalbums.sqlite') cur = conn.cursor() if command [-1] == "?": artist_name = command[6:-1].strip().title() else: artist_name = command[6:].strip().title() if artist_name == "Kenny G": response = "Why the f$ck would Ian have Kenny G in his collection?" else: cur.execute (" SELECT Album.title FROM Album JOIN Artist ON Artist.id = Album.artist_id WHERE Artist.name =?" , (artist_name,) ) count = 0 response = 'Albums for *' + artist_name + '*:\n' for row in cur : response += row[0]+ '\n' count = count + 1 response = "I have "+ str (count) +" " + response return response cur.close()
4395b4d3a25f3ea5e10accd938a51df342913e17
702,792
import glob def patternMatch(pattern, dir='./'): """ :pattern: A file pattern to match the desired output. Input to a glob, so use traditional unix wildcarding. :dir: The directory to search. :returns: list of matching files in the target directory """ files = [] files = glob.glob(dir+pattern) return files
d5e9b1d531cdfa3ebca3baea2b8e273621df3357
702,793
def removeBottomMargin(image, padding): """Remove the bottom margin of width = padding from an image Args: image (PIL.Image.Image): A PIL Image padding (int): The padding in pixels Returns: PIL.Image.Image: A PIL Image """ return image.crop((0, 0, image.width, image.height - padding))
69cd12d6c3ed0b857bae3f42c34e9754fa3620f3
702,794
def num(val): """Return val as an int, float, or bool, depending on what it most closely resembles.""" if isinstance(val, (float, int)): return val elif val in ('True', 'False'): return val == 'True' elif isinstance(val, str): try: return int(val) except ValueError: pass try: return float(val) except ValueError: raise ValueError("Could not convert '{}' to either " "int or float.") raise RuntimeError("Invalid value '{}' given to num.".format(val))
552219ed6e97013c0b367542f68ae2e5e9f98300
702,795
def fib(id): """ id: index (zero-based) returns: Fibonacci number for the given index id: 0 1 2 3 4 5 6 7 Fib: 0 1 1 2 3 5 8 13 """ if id < 0: return 0 if id == 0: return 0 if id == 1: return 1 first = 0 second = 1 counter = 2 fib_num = 0 while counter <= id: fib_num = first + second first = second second = fib_num counter = counter + 1 print(counter, fib_num) return fib_num
fc5c58c364417cdfd6c5276da644d259258af613
702,796
def flatten_list(unflattened_list): """ Take list of iterables/non-iterables and outputs a list of non-iterables. """ flattened_list = [] for item in unflattened_list: if hasattr(item, '__iter__') and not isinstance(item, str): flattened_list.extend(item) else: flattened_list.append(item) return flattened_list
cbf7297bd10312a47487fd814575323881940ef5
702,797
def sector_code_map(industry_code): """ 国证行业分类映射为部门行业分类 国证一级行业分10类,转换为sector共11组,单列出房地产。 """ if industry_code[:3] == 'C01': return 309 if industry_code[:3] == 'C02': return 101 if industry_code[:3] == 'C03': return 310 if industry_code[:3] == 'C04': return 205 if industry_code[:3] == 'C05': return 102 if industry_code[:3] == 'C06': return 206 if industry_code.startswith('C07'): if industry_code[:5] == 'C0703': return 104 else: return 103 if industry_code[:3] == 'C08': return 311 if industry_code[:3] == 'C09': return 308 if industry_code[:3] == 'C10': return 207 return -1
73d55282c0e6228747d91b09b5ee733aecf4766c
702,798
def aten_transpose(mapper, graph, node): """ 构造矩阵转置的PaddleLayer。 TorchScript示例: %715 : Tensor = aten::transpose(%x.21, %704, %705) 参数含义: %715 (Tensor): 输出,转置后的矩阵。 %x.21 (Tensor): 需要转置的Tensor。 %704 (int): 转置的维度1。 %705 (int): 转置的维度2。 """ scope_name = mapper.normalize_scope_name(node) output_name = mapper._get_outputs_name(node)[0] layer_outputs = [output_name] layer_inputs = {} layer_attrs = {} inputs_name, inputs_node = mapper._get_inputs_name(node) # 获取当前节点输出的list current_outputs = [output_name] # 处理输入0,即%x.21 mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name) layer_inputs["x"] = inputs_name[0] # 处理输入1,即%704 mapper._check_input(graph, inputs_node[1], inputs_name[1], current_outputs, scope_name) dim1 = inputs_name[1] # 处理输入2,即%705 mapper._check_input(graph, inputs_node[2], inputs_name[2], current_outputs, scope_name) dim2 = inputs_name[2] # 获取当前节点输入的list current_inputs = list(layer_inputs.values()) graph.add_layer( "prim.shape", inputs={"input": inputs_name[0]}, outputs=[output_name + "_shape"], scope_name=scope_name) current_outputs.append(output_name + "_shape") graph.add_layer( "prim.len", inputs={"input": output_name + "_shape"}, outputs=[output_name + "_len"], scope_name=scope_name) current_outputs.append(output_name + "_len") current_inputs.append(output_name + "_shape") graph.add_layer( "prim.len2list", inputs={"len": output_name + "_len"}, outputs=[output_name + "_list"], scope_name=scope_name) current_outputs.append(output_name + "_list") current_inputs.append(output_name + "_len") graph.add_layer( "prim.check_dim", inputs={"len": output_name + "_len", "dim": dim1}, outputs=[dim1 + "_new"], scope_name=scope_name) graph.add_layer( "prim.check_dim", inputs={"len": output_name + "_len", "dim": dim2}, outputs=[dim2 + "_new"], scope_name=scope_name) graph.add_layer( "prim.replaceitem", inputs={ "list": output_name + "_list", "index": dim1 + "_new", "item": dim2 + "_new" }, outputs=[], scope_name=scope_name) graph.add_layer( "prim.replaceitem", inputs={ "list": output_name + "_list", "index": dim2 + "_new", "item": dim1 + "_new" }, outputs=[], scope_name=scope_name) graph.add_layer( "paddle.transpose", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name, perm=output_name + "_list") return current_inputs, current_outputs
3922de1a23ce0725b4a8fa6d1f748ad1ad3bcf81
702,799
def get_syst ( syst , *index ) : """Helper function to decode the systematic uncertainties Systematic could be - just a string - an object with index: obj [ibin] - a kind of function: func (ibin) """ if isinstance ( syst , str ) : return syst elif syst and hasattr ( syst , '__getitem__' ) : return str ( syst [ index ] ) elif syst and callable ( syst ) : return str ( syst ( *index ) ) elif syst : raise AttributeError("Invalid systematic %s/%s" % ( syst , type( syst ) ) ) return ''
37b2b39245587da16345752e02759d2c94c93415
702,800
import weakref def weak_arg(arg): """ Create a weak reference to arg and wrap the function so that the dereferenced weakref is passed as the first argument. If arg has been deleted then the funcion is not called. """ # Create the weak reference weak_arg = weakref.ref(arg) def decorator(function): # We need multiple wrappers to traits can find the number of arguments. # The all just deref the weak referene and the call the function if it # is not None def wrapper0(): arg = weak_arg() if arg is not None: return function(arg) def wrapper1(arg1): arg = weak_arg() if arg is not None: return function(arg, arg1) def wrapper2(arg1, arg2): arg = weak_arg() if arg is not None: return function(arg, arg1, arg2) def wrapper3(arg1, arg2, arg3): arg = weak_arg() if arg is not None: return function(arg, arg1, arg2, arg3) def wrapper4(arg1, arg2, arg3, arg4): arg = weak_arg() if arg is not None: return function(arg, arg1, arg2, arg3, arg4) def wrappern(*args): arg = weak_arg() if arg is not None: function(arg, *args) # Return the correct wrapper depending on the arg count args = function.func_code.co_argcount-1 if args == 0: return wrapper0 elif args == 1: return wrapper1 elif args == 2: return wrapper2 elif args == 3: return wrapper3 elif args == 4: return wrapper4 else: return wrappern return decorator
90da143dada896fe7988b4f3003110a3b678b5ab
702,801
def dya(series, n=1): """Difference over n years, annualized""" return (series-series.shift(n*series.index.freq.periodicity)) / n
4f414beec19d9d6b5acfc87890b7ea2681f8d251
702,802
def find_projects(company_name, project_list): """returns list of projects associated with company_name :param company_name: name of company to return projects for :type company_name: str :param project_list: list of projects as dictionaries :type project_list: list :return: list """ result = [] for project in project_list: if project['company'] == company_name: result.append(project) return result
e2e193aa103bec6620fb17679ff02de92c3f299e
702,803
import logging def log_to_stdout(level=15): """ Adds the stdout to the logging stream and sets the level to 15 by default """ logger = logging.getLogger("fetch_data") # remove existing file handlers for handler in logger.handlers: if isinstance(handler, logging.StreamHandler): logger.handlers.remove(handler) # add the new logger with the formatting logFormatter = logging.Formatter( "%(asctime)s [%(name)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S" ) streamHandler = logging.StreamHandler() streamHandler.setFormatter(logFormatter) logger.addHandler(streamHandler) logger.setLevel(level) return logger
ec5f18ca6349664687621f298c17122b6733c8be
702,804
def join_lines(lines): """Joins `lines` with newlines and returns a single string. You would think you could do that with | join("\n") but you can't — see https://github.com/debops/ansible-sshkeys/issues/4 """ return ''.join("%s\n" % l for l in lines)
e0041b302e662e02b153bc7b0ff8b3cb558bb7a5
702,805
import shutil import sys def withprogressbar(func): """Decorates ``func`` to display a progress bar while running. The decorated function can yield values from 0 to 100 to display the progress. """ def _func_with_progress(*args, **kwargs): max_width, _ = shutil.get_terminal_size() gen = func(*args, **kwargs) while True: try: progress = next(gen) except StopIteration as exc: sys.stdout.write('\n') return exc.value else: # Build the displayed message so we can compute # how much space is left for the progress bar itself. message = '[%s] {}%%'.format(progress) bar_width = max_width - len(message) + 3 # Add 3 characters to cope for the %s and %% filled = int(round(bar_width / 100.0 * progress)) spaceleft = bar_width - filled bar = '=' * filled + ' ' * spaceleft sys.stdout.write((message+'\r') % bar) sys.stdout.flush() return _func_with_progress
143fed7cbb36d6d1f01525967ed825d5fa0c6346
702,806
def health(): """ Serve React App """ return "Hello World!"
ef7938daeadd74361d954153c723dd136590962b
702,807
import math import re import sys def validate_name(string, name_type): """ Validates the node & property names """ if type(string) != str and math.isnan(string): return None match = None if name_type == 'node': match = re.search('''[^a-zA-Z_]''', string) elif name_type == 'property': match = re.search('''[^a-zA-Z_0-9]''', string) if match: sys.exit('Illegal character {0} found in node/property name {1}. Only lowercase letters and underscore allowed.'.format(match, string)) return string.lower()
122ac61657d088a5dccbacc3bae8fc80183ce3d2
702,808
import sys def is_indy_sdk_module_installed(): """Check whether indy (indy-sdk) module is installed. Returns: bool: Whether indy (indy-sdk) is installed. """ try: # Check if already imported if "indy" in sys.modules: return True # Try to import return True except ModuleNotFoundError: # Not installed if import went wrong return False
8dc17e302c7a07482fdc43853d6a535a1ef2d847
702,809
def test_retrieve_and_encode_simple(test_client, test_collection_name): """Test retrieving documents and encoding them with vectors. """ VECTOR_LENGTH = 100 def fake_encode(x): return test_client.generate_vector(VECTOR_LENGTH) # with TempClientWithDocs(test_client, test_collection_name, 100) as client: test_client.insert_documents(test_collection_name, test_client.create_sample_documents(100)) results = test_client.retrieve_and_encode(test_collection_name, models={'country': fake_encode}) assert list(test_client.collection_schema(test_collection_name)['country_vector_'].keys())[0] == 'vector' assert len(results['failed_document_ids']) == 0 assert 'country_vector_' in test_client.collection_schema(test_collection_name) docs = test_client.retrieve_documents(test_collection_name)['documents'] assert len(docs[0]['country_vector_']) == VECTOR_LENGTH
4fb6b1ea0278575ff53778dbefe8fb4f12a9abc2
702,810
import os import json def load_model_data(root_dir, model_name): """ Used for models split in several files. Loads subscripts_dic, namespace and modules dictionaries Parameters ---------- root_dir: str Path to the model file. model_name: str Name of the model without file type extension (e.g. "my_model"). Returns ------- namespace: dict Translation from original model element names (keys) to python safe function identifiers (values). subscripts: dict Dictionary describing the possible dimensions of the stock's subscripts. modules: dict Dictionary containing view (module) names as keys and a list of the corresponding variables as values. """ with open(os.path.join(root_dir, "_subscripts_" + model_name + ".json") ) as subs: subscripts = json.load(subs) with open(os.path.join(root_dir, "_namespace_" + model_name + ".json") ) as names: namespace = json.load(names) # the _modules.json in the sketch_var folder shows to which module each # variable belongs with open(os.path.join(root_dir, "modules_" + model_name, "_modules.json") ) as mods: modules = json.load(mods) return namespace, subscripts, modules
f26a491003c43224e08daf361bf95a6be3756f36
702,811
def run(): """Run example""" return 'running...'
702a6af69b98326b8a557929e06d2e8bc4308660
702,813
import os def validate(args): """ Check that the CLI arguments are valid. """ if not args.source_path: print("Error: You need to specify a source path.") return False else: if not os.path.isdir(args.source_path): print("Error: Source path is not a folder.you can run autosub direct.") return False return True
5d140178fd803f882e273f36bdf59e73135550ef
702,814
def freq_correctionbis(cpt_matrice, beta) : """ Corrige les counts avec equiprobabilites """ for i in range(cpt_matrice.shape[0]) : for j in range(cpt_matrice.shape[1]) : cpt_matrice[i,j] = ((cpt_matrice[i,j] + (1/20) )/(1+beta)) return(cpt_matrice)
570393b8df2a3a8bf675d7ce47950d8290680779
702,815
def scale_from_internal(vec, scaling_factor, scaling_offset): """Scale a parameter vector from internal scale to external one. Args: vec (np.ndarray): Internal parameter vector with external scale. scaling_factor (np.ndarray or None): If None, no scaling factor is used. scaling_offset (np.ndarray or None): If None, no scaling offset is used. Returns: np.ndarray: vec with external scale """ if scaling_factor is not None: vec = vec * scaling_factor if scaling_offset is not None: vec = vec + scaling_offset return vec
c7f2471d2a7776f8756d709d0288163aab3594ae
702,816
def filter_pre_string(_string: str, lines_to_cut: int) -> str: """ Filter the xml out of html :param str _string: :param int lines_to_cut: """ filtered_array = _string.splitlines()[lines_to_cut:] filtered_string = "".join(filtered_array) filtered_string = filtered_string.strip() return filtered_string
1bde68fbfa3f87360b47ece45b64782dcc3bcde6
702,818
def ssum(self, **kwargs): """Calculates and prints the sum of element table items. APDL Command: SSUM Notes ----- Calculates and prints the tabular sum of each existing labeled result item [ETABLE] for the selected elements. If absolute values are requested [SABS,1], absolute values are used. """ command = f"SSUM," return self.run(command, **kwargs)
299a14a2e21454a304d6d023d2b3a3709b24eb18
702,819
def get_nth_combination( iterable, *, items: int, index: int, ): """ Credit to: https://docs.python.org/3/library/itertools.html#itertools-recipes Examples: >>> wallet = [1] * 5 + [5] * 2 + [10] * 5 + [20] * 3 >>> get_nth_combination(wallet, items=3, index=454) (20, 20, 20) >>> get_nth_combination(wallet, items=3, index=-1) (20, 20, 20) >>> get_nth_combination(wallet, items=3, index=455) Traceback (most recent call last): ... IndexError: Index 455 out of bounds >>> get_nth_combination(wallet, items=3, index=-454) (1, 1, 1) >>> get_nth_combination(wallet, items=len(wallet), index=0) (1, 1, 1, 1, 1, 5, 5, 10, 10, 10, 10, 10, 20, 20, 20) >>> get_nth_combination(wallet, items=len(wallet), index=1) Traceback (most recent call last): ... IndexError: Index 1 out of bounds """ space = tuple(iterable) n = len(space) if items < 1: msg = f'Argument must be positive' raise ValueError(msg) if items > n: msg = f'Sample space has {n} items; Argument items cannot exceed that' raise ValueError(msg) c = 1 k = min(items, n - items) for i in range(1, k + 1): c = c * (n - k + i) // i orig_index = index if index < 0: index += c if index < 0 or index >= c: msg = f'Index {orig_index} out of bounds' raise IndexError(msg) result = [] while items: c, n, items = c * items // n, n - 1, items - 1 while index >= c: index -= c c, n = c * (n - items) // n, n - 1 result.append(space[-(n + 1)]) return tuple(result)
6ed186d260ca86c0f16d383576e69402d079ed9b
702,820
def produce_columns(df): """Reports columns for use in model.""" columnsNames = [] for i in df.columns: if i == 'Survived' or i == 'PassengerId': pass else: columnsNames.append(i) return columnsNames
ce527118fc099ac05d20dbb981f5f3231e9070fa
702,821
def fixture_ref_6_2_3_5(): """Reference for (load, bins) of 6, [2, 3, 5].""" ref = { "load": 6, "bins": [2, 3, 5], "solutions": { key: [(3, 3)] for key in ["length", "capacity", "combo"] } } return ref
7b7121e404b4daf3277a4ab6f33eb955cd6d47e7
702,822
def recreate_knob_from_optimizer_values(variables, opti_values): """ recreates knob values from a variable """ knob_object = {} # create the knobObject based on the position of the opti_values and variables in their array for idx, val in enumerate(variables): knob_object[val] = opti_values[idx] return knob_object
8c253ea75f1dbb8c27cde21b2208b5386d75930e
702,824
import typing import sys import argparse import multiprocessing def parse_args(args:typing.Sequence[str]=sys.argv[1:]): """ Parse the command-line arguments :param args: argument strings :return: the options dictionary """ parser = argparse.ArgumentParser() parser.add_argument( "--input", required=True, help="The directory of the input precomputed volume" ) parser.add_argument( "--input-format", default="blockfs", help="The precomputed volume input format. Default is blockfs" ) parser.add_argument( "--output", required=True, help="The output directory for the blockfs precomputed volume" ) parser.add_argument( "--power", type=float, default=0.09, help="The power of the exponent in the kernel, e**(-<power>*x)" ) parser.add_argument( "--kernel-size", type=int, help="The kernel length. This must be an odd number. The default is " "the nearest odd integer where the kernel value is less than .01" ) parser.add_argument( "--iterations", type=int, help="The number of iterations for the Richardson-Lucy loop", default=10 ) parser.add_argument( "--levels", type=int, default=5, help="# of levels in the resulting precomputed volume" ) parser.add_argument( "--voxel-size", default="1.8,1.8,2.0", help="The voxel size in microns: three comma-separated numbers. " "Default is 1.8,1.8,2.0" ) parser.add_argument( "--n-cores", type=int, default=multiprocessing.cpu_count(), help="# of processes used to perform the deconvolution" ) parser.add_argument( "--n-writers", type=int, default=min(13, multiprocessing.cpu_count()), help="# of processes used when writing the blockfs volume" ) parser.add_argument( "--use-gpu", action="store_true", help="Use GPU for deconvolution." ) parser.add_argument( "--n-blocks-per-process", type=int, default=2, help="# of blockfs blocks on a side to process at the same time. " "A larger number amortizes the padding while a smaller number " "conserves memory." ) return parser.parse_args(args)
6088425154379e77752ff4dfa52b86fbba8df429
702,825
def _create_validate(cls, validators): """ Create a new validate method with extra validator functions. """ def validate(self, value): super(cls, self).validate(value) for validator in validators: validator(value) validate.__doc__ = validators[0].__doc__ return validate
a3f8b1ef5255f1c2f359b82539aa911b6d136514
702,826
def _status(self): """status -> Returns the Shot status. None if no Status is set.""" status = None tags = self.tags() for tag in tags: if tag.metadata().hasKey('tag.status'): status = tag.metadata().value('tag.status') return status
eb8fd85218f6e745f09e4984db8b6c74ba464dcb
702,827
def capped(value, minimum=None, maximum=None, key=None, none_ok=False): """ Args: value: Value to cap minimum: If specified, value should not be lower than this minimum maximum: If specified, value should not be higher than this maximum key (str | None): Text identifying 'value' (ValueError is raised if provided and `value` is not within bounds) none_ok (bool): True if `None` value is considered OK Returns: `value` capped to `minimum` and `maximum` (if it is outside of those bounds) """ if value is None: if none_ok: return None if key and not none_ok: raise ValueError("'None' is not acceptable for '%s'" % key) return minimum if minimum is not None else maximum if minimum is not None and value < minimum: if key: raise ValueError("'%s' value %s is lower than minimum %s" % (key, value, minimum)) return minimum if maximum is not None and value > maximum: if key: raise ValueError("'%s' value %s is greater than maximum %s" % (key, value, maximum)) return maximum return value
663a63041699f4e4f52886adbd49423bf52c0282
702,828
def row(ctx): """Get this cell's row.""" return ctx["cell"].row
4cfc89daa3ca771359acd762d716316209ca0eb4
702,829
def above_the_line(x_array, x1, x2): """ Return states above a specified line defined by (x1, x2). We assume that a state has only two coordinates. Parameters ---------- x_array: `np.array` A 2-d matrix. Usually, an embedding for data points. x1: `np.array` A list or array of two entries. x2: `np.array` A list or array of two entries. Returns ------- A boolean array. """ return (x_array[:, 1] - x1[1]) > ((x2[1] - x1[1]) / (x2[0] - x1[0])) * ( x_array[:, 0] - x1[0] )
d20b5d462b7254a93f7896b592ae25eae26075a7
702,831
import typing import os import re def guess_track_title(fname: str) -> typing.Tuple[int, str]: """ Get the track number and title from a filename """ basename, _ = os.path.splitext(fname) if match := re.match(r'([0-9]+)([^0-9]*)$', basename): return int(match.group(1)), match.group(2).strip().title() return 0, basename.title()
7a45243b33239cfc137318e7118f32e8b114a492
702,832
import torch def _add_embedding_layer(model_1, model_2): """ Returns an embedding layer with a weight matrix of the follwing structure: [MODEL_1 EMBEDDING MATRIX ; MODEL_2 EMBEDDING MATRIX] """ result_layer = torch.nn.Embedding( model_1.num_embeddings, model_1.embedding_dim + model_2.embedding_dim ) result_layer.weight = torch.nn.Parameter( torch.cat((model_1.weight.data, model_2.weight.data), dim=1) ) return result_layer
2b4f4f3e36d56c57302cdcbf07c6cbbdb5165e11
702,833
def generate_key_lu_dict( dict_tuple_keys, unique_identifier, enduses, sectors, technologies ): """Generate look_up keys to position in 'load_profiles' Arguments ---------- dict_tuple_keys : dict Already existing lu keys unique_identifier : string Unique identifier of load shape object enduses : list List with enduses sectors : list List with sectors technologies : list List with technologies Returns ------- dict_tuple_keys : str Lookup position in dict """ for enduse in enduses: for sector in sectors: for technology in technologies: dict_tuple_keys[(enduse, sector, technology)] = unique_identifier return dict_tuple_keys
adc2fd7357d16b3026ae7d0d8f363919b61f8525
702,834
def rdns_domain(network): """Transform :py:class:`netaddr.IPNetwork` object to rDNS zone name""" if network.prefixlen == 0: return "ip6.arpa" if network.version == 6 else "in-addr.arpa" if network.version == 4: return ".".join(map(str, reversed( network.ip.words[:network.prefixlen // 8]))) + ".in-addr.arpa" elif network.version == 6: return ".".join(map(str, reversed(list("".join(hex(n)[2:].rjust( 4, "0") for n in network.ip.words))[:network.prefixlen // 4]))) + ".ip6.arpa"
b94656f270d39ac175efb8bc8a99af0d29dad7df
702,835
def to_undirected(graph): """Returns an undirected view of the graph `graph`. Identical to graph.to_undirected(as_view=True) Note that graph.to_undirected defaults to `as_view=False` while this function always provides a view. """ return graph.to_undirected(as_view=True)
96ceb4e2d7dbe2a9c120b8e1ac7cad0ef2b2c6ae
702,836
import ctypes import sys def c_str(string : str) -> ctypes.c_char_p: """Create ctypes char * from a Python string.""" if sys.version_info[0] > 2: py_str = lambda x: x.encode('utf-8') else: py_str = lambda x: x return ctypes.c_char_p(py_str(string))
e2c783d5d72eece66aef14245e2770bc8a43588b
702,837
def getText(rng): """ Get the pure text that is included in a js range @param range js range to get the text of @return string of the range's text """ return rng.toString()
71c7c2eccb850ab1d807496d8033bb426b467492
702,838
def _chain_validator(*funcs): """Chain a series of validators.""" def chained(value): for func in funcs: value = func(value) return value return chained
40082602f92a28160306bfff4d7b61703ea2e962
702,840
from typing import MutableMapping from typing import Any def remove_keys( _dict: MutableMapping[str, Any] | None, keys: list[str] ) -> MutableMapping[str, Any]: """Remove keys from a dictionary.""" if not _dict: return {} new = dict(_dict) for key in keys: new.pop(key, None) return new
7a0ee8482eea69b0be7f7ecfd41355206adcf01c
702,841
import struct def read64(f): """Read 8 bytes from a file and return as an 64-bit unsigned int (little endian). """ return struct.unpack("<Q", f.read(8))[0]
4a055188bd9db074ca3807771d779eccb25e5484
702,843
import re def contain_static(val): """ Check if URL is a static resource file - If URL pattern ends with """ if re.match(r'^.*\.(jpg|jpeg|gif|png|css|js|ico|xml|rss|txt).*$', val, re.M|re.I): # Static file, return True return True else: # Not a static file, return False return False
9b69c0e8c69f9a97abbea82855d0c387de2a381a
702,844
def make_univariate(F, T): """ Given a homogeneous bivariate polynomial `F(xi,xj)`, sitting inside a ring `K[x0,x1,x2]`, dehomogenise it into ring T (univariate), for later factoring. """ assert(F.is_homogeneous() and len(F.variables())<3) R = F.base_ring() S = F.parent() x0,x1,x2 = S.gens() eta = T.gen() if F.degree(x0)>0: Fa = F(eta,1,1) elif F.degree(x1)>0: Fa = F(1,eta,1) else: Fa = F(1,1,eta) return Fa
0f2c230e9e19bcb61b687bcd7aff5ec6d5069347
702,845
import json import re def unsubChange(doc, API, session): """ Calculate a mailing list unsub request """ diff = "" mls = {} with open("private/json/ml-modsubs.json") as f: mls = json.load(f) f.close() li = doc['listname'] l,d = li.split('@', 2) d = d.replace(".apache.org", "") # Check that we can edit/add lists here if d not in session.user['canAdmin'].values(): raise API.exception(403, "You cannot edit mailing lists for this sub-domain!") # Validate method if doc.get('action', '') not in ['unsub', 'ban']: raise API.exception(400, "Invalid action specified") # Validate target target = doc.get('target') if not re.match(r"^[^<>\"\s|\\&:;,]+@[^<>\"\s|\\&:;,]+$", target): raise API.exception(400, "Invalid email address supplied!") if li in mls: if not target in mls[li]['subscribers']: raise API.exception(404, "Email address not found in subscriber list!") else: raise API.exception(404, "Mailing list not found!") diff = "- Modify subscribers on %s\n" % li diff += "- Remove %s from the mailing list\n" % target if doc.get('action') == 'ban': diff += "- Prevent %s from subscribing to the list\n" % target return diff, doc
72b1e1d0886222b43ff0df4deccfbce5e4f55332
702,846
def get_min(statistical_series): """ Get minimum value for each group :param statistical_series: Multiindex series :return: A series with minimum value for each group """ return statistical_series.groupby(level=0).agg('min')
c032f2f834cfe298a6c9f98c9116aaf354db0960
702,847
import time def now(): """ Get the current time function. :return: Time function. :rtype: function """ if hasattr(time, 'monotonic'): return time.monotonic return time.time
a50542697bbf4fa78d942fb564749e4e26b37ff4
702,848
def to_roman(value: int, make_upper: bool = True) -> str: """ The presence of 500 (D) and 50 (L), coupled with the special handling of 400, 900, 40, 90, 4 and 9, make table lookup seem like the best approach. """ if value == 0: return 'Zero' if value > 3999: return f'{value:,}' thousands, value = divmod(value, 1000) hundreds, value = divmod(value, 100) tens, ones = divmod(value, 10) return_str = thousands * 'm' return_str += ['', 'c', 'cc', 'ccc', 'cd', 'd', 'dc', 'dcc', 'dccc', 'cm'][hundreds] return_str += ['', 'x', 'xx', 'xxx', 'xl', 'l', 'lx', 'lxx', 'lxxx', 'xc'][tens] return_str += ['', 'i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix'][ones] return_str = return_str.upper() if make_upper else return_str.lower() return return_str
d74982f73848b2d64c421a10a2b3bed6c89a7c54
702,849
def get_daily_returns(port_val): """Get daily returns of a portfolio value dataframe. Args: port_val (dataframe): daily portfolio value Returns: daily_ret (dataframe): daily returns """ daily_ret = port_val.copy() daily_ret[1:] = (port_val[1:] / port_val[:-1].values)-1 daily_ret = daily_ret.ix[1:] return daily_ret
8d1a2dd03c2a5992b97b5d8d4b00cc65ebe4caba
702,850
def add_sppc_args(parser): """Add args of SPP Container to app.""" parser.add_argument( '--dist-name', type=str, default='ubuntu', help="Name of Linux distribution") parser.add_argument( '--dist-ver', type=str, default='latest', help="Version of Linux distribution") parser.add_argument( '--workdir', type=str, help="Path of directory in which the command is launched") parser.add_argument( '--name', type=str, help='Name of container') parser.add_argument( '-ci', '--container-image', type=str, help="Name of container image") parser.add_argument( '-fg', '--foreground', action='store_true', help="Run container as foreground mode") parser.add_argument( '--dry-run', action='store_true', help="Only print matrix, do not run, and exit") return parser
9458603e0ce6717858c2d844db85b159bff8714e
702,851
from typing import Any from pathlib import Path def get_object_filepath(object: Any) -> str: """ Get object's filepath. """ path = Path(object.__code__.co_filename).absolute() try: filepath = "./" + path.relative_to(Path.cwd()).as_posix() except ValueError: filepath = path.as_posix() return filepath
de30939b8699e812e712b4e5eea32cf6fe80eb4d
702,853
def _depgrep_rel_disjunction_action(_s, _l, tokens): """ Builds a lambda function representing a predicate on a tree node from the disjunction of several other such lambda functions. """ # filter out the pipe tokens = [x for x in tokens if x != "|"] # print 'relation disjunction tokens: ', tokens if len(tokens) == 1: return tokens[0] elif len(tokens) == 2: return (lambda a, b: lambda n, m=None, el=None: a(n, m, el) or b(n, m, el))( tokens[0], tokens[1] )
1daad27c573ff7e36083d422aacd9d232ecef32c
702,854
def merge(batch, results): """ Merge clumped results files together """ merger = batch.new_job(name='merge-results') merger.image('ubuntu:18.04') if results: merger.command(f''' head -n 1 {results[0]} > {merger.ofile} for result in {" ".join(results)} do tail -n +2 "$result" >> {merger.ofile} done sed -i '/^$/d' {merger.ofile} ''') return merger
92037b34ff8db44381789f15518b5f44949b26a2
702,856
import torch from typing import OrderedDict def torch_load_state_dict_without_module(ckp_file): """ this function using for load a model without module """ checkpoint = torch.load(ckp_file) state_dict =checkpoint['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): if 'module.' in k: k = k[7:] # remove 'module.' of dataparallel new_state_dict[k]=v return new_state_dict
72f265b2087bd40d328d8760cc9668c9e6ccacf6
702,857
def code_list_lengthener(code_list, parameter): """Ensures that code_list is long enough to accept an item in its parameter-th location""" while len(code_list) < parameter+1: code_list.append(0) return code_list
fd9a2d498ca1c679f44b0937a902bd95a7a882c8
702,858
import unicodedata import re def unaccented_letters(s: str) -> str: """Return the letters of `s` with accents removed.""" s = unicodedata.normalize('NFKD', s) s = re.sub(r'[^\w -]', '', s) return s
e75b8929f8bd800ad4c79ae5688dea1067c351c5
702,859
import os import re from collections import defaultdict def load_ud_english(fpath): """Load a file from the UD English corpus Parameters ---------- fpath : str Path to UD corpus file ending in .conllu Output: Returns a list equal to length of total num of docs with each item contaiing the first index of sentence in that doc """ n = 1 fname = os.path.split(fpath)[1] parses = defaultdict(list) sent_ids = [] newdoc_ids = [] for l in open(fpath): ident = fname+' '+str(n) if re.match(r'\# newdoc id', l): newdoc_ids.append(n) #newdoc_ids.append(l.split("=")[-1].strip()) if re.match(r'^\d', l): l_split = l.strip().split() parses[ident].append(l_split) elif parses[ident]: sent_ids.append(ident) n += 1 return newdoc_ids, len(sent_ids)
b2e6d83a3e1a5cbb806c30915287cd2717510019
702,860
def parse_mapping_file(fp): """ """ ensp_to_hgnc = {} with open(fp, "r") as fh: for line in fh: line = line.rstrip() words = line.split() if(len(words) >= 4): ensp_to_hgnc[words[3]] = words[0] return ensp_to_hgnc
e3c47d9a73cf19eddd9efc8b4fb7840f5bda156b
702,861
def sort_group(indx, column1, column2): """ Parameters ---------- indx : integer column1 : list data type (contains strings of SOC NAMES / WORK STATES which need to be ordered.) column2 : list data type (contains integers which denote numbers of of certified applications.) Returns ------- sort_group : list Returns a list where the entry at index 'indx' of column1 has been put in its proper place as per alphabetical ordering. Examples -------- >>> t1 = ['aaa', 'a', 'az', 'ay', 'ab', 'aa', 'ac', 'd', 'b', 'c'] >>> t2 = [7,6,5,5,5,5,5,4,3,3] >>> Bu = sort_group(2, t1, t2) >>> Bu ['aaa', 'a', 'aa', 'az', 'ay', 'ab', 'ac', 'd', 'b', 'c'] The returned result is always a list of the same length as column1. """ j = indx+1 check = True while ( (check) and (j<len(column2)) ): if ( column2[indx] == column2[j] ): if ( column1[indx] > column1[j] ): dum = column1[indx] column1[indx] = column1[j] column1[j] = dum if ( column2[indx] > column2[j] ): check = False j += 1 return column1
6bc153d78be6c40a1c3b7324784f45fb9f01fbd4
702,862
import argparse def make_argument_parser() -> argparse.ArgumentParser: """Generic experiment parser. Generic parser takes the experiment yaml as the main argument, but has some options for reloading, etc. This parser can be easily extended using a wrapper method. Returns: argparse.parser """ parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=100)) parser.add_argument("-o", "--out_path", default=None, help=("Output path directory. All model results will go" " here. If a new directory, a new one will be " "created, as long as parent exists.")) parser.add_argument("-n", "--name", default=None, help=("Name of the experiment. If given, base name of " "output directory will be `--name`. If not given," " name will be the base name of the `--out_path`")) parser.add_argument("-r", "--reload", type=str, default=None, help=("Path to model to reload.")) parser.add_argument("-a", "--autoreload", default=False, action="store_true") parser.add_argument("-R", "--networks_to_reload", type=str, nargs="+", default=None) parser.add_argument("-L", "--load_networks", type=str, default=None, help=("Path to model to reload. Does not load args," " info, etc")) parser.add_argument("-m", "--meta", type=str, default=None) parser.add_argument("-c", "--config_file", default=None, help=("Configuration yaml file. " "See `exps/` for examples")) parser.add_argument("-k", "--clean", action="store_true", default=False, help=("Cleans the output directory. " "This cannot be undone!")) parser.add_argument("-v", "--verbosity", type=int, default=1, help="Verbosity of the logging. (0, 1, 2)") parser.add_argument("-d", "--device", type=int, default=0) return parser
2b532e06bf86e827fc52838b33fe350aaa0aaeeb
702,863
def iter_items(rows): """ """ member_load = {} for row in rows: try: member_load[row[0]].extend(list(row[2:])) except KeyError: member_load[row[0]] = list(row[2:]) return member_load
16ee5bca38a66dd9c2ba8b3428f169d2c624af85
702,864
def waber(lumin, lambdaValue): """ Weber's law nspired by Physhilogy experiment. """ # lambdaValue normally select 0.6 w = lumin**lambdaValue #w = (255*np.clip(w,0,1)).astype('uint8') return(w)
d9c495dd656161a67120f1e15e447021617180ab
702,865
import os def valid_engine_path(path): """ Check if the engine path is valid """ path = os.path.normcase(path) # Check that path is actually there if not os.path.exists(path): return False # Check that is has an engine folder if not os.path.exists(os.path.join(path,'engine')): return False # Check some specific engine folders if not os.path.exists(os.path.join(path,'engine','Binaries')): return False if not os.path.exists(os.path.join(path,'engine','Build')): return False if not os.path.exists(os.path.join(path,'engine','Config')): return False if not os.path.exists(os.path.join(path,'engine','Content')): return False return True
3a722675f244f2a88fdf33d7223e5dceccfeea54
702,866
def children(nodes): """ Search and list all the nodes childs. :param nodes: a list with tree nodes :return: a list with nodes childs """ child = [] for node in nodes: if node.left: child.append(node.left) if node.right: child.append(node.right) return child
19e60bbdceb60f85ffa9806e5ff61fd00030b368
702,867
def tensor_prod(a, b): """ Returns the tensor product using lists """ product = [] # loop through first tensor/vector for a_ in a: row = [] # multiply with each element of second tensor and add to row list for b_ in b: row.append(a_ * b_) # add row list to final product product.append(row) return product
c13ec6e0d3292d1124c567420d2b50b8d3a5501a
702,868
def _convert_version(value): """Extract 3 version numbers from 2 bytes""" # subversion bits 0.3, versin bits 4-7, type bits 8-15 type_ = value // 256 value = value % 256 version = value // 16 subversion = value % 16 return '{}.{}.{}'.format(type_, version, subversion)
2850b4729da445f2ee7007c469aaa91c2370e4dd
702,869
import argparse def parse_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser('Extract user sessions from log.') parser.add_argument('input', help='an input file') parser.add_argument('output', help='an output file') parser.add_argument('-p', '--protocol', default='6', type=str, help='extract sessions with a specified protocol') parser.add_argument('-t', '--maxtime', default=0.01, type=float, help='largest time within a session allowed') parser.add_argument('-d', '--duration', default=0.01, type=float, help='maximum session duration allowed') parser.add_argument('-n', '--ipnum', default=0, type=int, help='number of destination IPs for which sessions will be extracted') return parser.parse_args()
b07bc1fc60a73ac4f2089d12ae8ad34e106c1360
702,871
import glob def get_lists_in_dir(dir_path): """ Function to obtain a list of .jpg files in a directory. Parameters: - dir_path: directory for the training images (from camera output) """ image_list = [] for filename in glob.glob(dir_path + '/*.jpg'): image_list.append(filename) return image_list
c59701c5c8327569a5efe68c2751b0568de0498e
702,872