content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import base64 def hex_to_base64(hex_): """ Converts hex string to base64 """ return base64.b64encode(bytes.fromhex(hex_))
26f42b25c9e804bc1b786aadab033db104882f4b
706,891
def clean_tag(tag): """clean up tag.""" if tag is None: return None t = tag if isinstance(t, list): t = t[0] if isinstance(t, tuple): t = t[0] if t.startswith('#'): t = t[1:] t = t.strip() t = t.upper() t = t.replace('O', '0') t = t.replace('B', '8') return t
1d2709323c4d80f290701d5cdc3a993b4bac25d4
706,893
import math def hard_negative_mining(loss, labels, neg_pos_ratio=3): """ 用于训练过程中正负例比例的限制.默认在训练时,负例数量是正例数量的三倍 Args: loss (N, num_priors): the loss for each example. labels (N, num_priors): the labels. neg_pos_ratio: 正负例比例: 负例数量/正例数量 """ pos_mask = labels > 0 num_pos = pos_mask.long().sum(dim=1, keepdim=True) num_neg = num_pos * neg_pos_ratio loss[pos_mask] = -math.inf # 无穷 # 两次sort 找出元素在排序中的位置 _, indexes = loss.sort(dim=1, descending=True) # descending 降序 ,返回 value,index _, orders = indexes.sort(dim=1) neg_mask = orders < num_neg # loss 降序排, 背景为-无穷, 选择排前num_neg的 负无穷,也就是 背景 return pos_mask | neg_mask
3b2e38ab2b0bbd9732fceafdfd023ea220b3c5eb
706,894
def bbox_area(gt_boxes): """ gt_boxes: (K, 4) ndarray of float area: (k) """ K = gt_boxes.size(0) gt_boxes_area = ((gt_boxes[:,2] - gt_boxes[:,0] + 1) * (gt_boxes[:,3] - gt_boxes[:,1] + 1)).view(K) return gt_boxes_area
57ad16b8b339e4515dcd7e7126b9c6b35b6c3d8b
706,895
def compare_dicts(cloud1, cloud2): """ Compare the dicts containing cloud images or flavours """ if len(cloud1) != len(cloud2): return False for item in cloud1: if item in cloud2: if cloud1[item] != cloud2[item]: return False else: return False return True
4c13ed92da2cd40b543b75fac119b5da302717e3
706,896
import string def str2int(string_with_int): """ Collect digits from a string """ return int("".join([char for char in string_with_int if char in string.digits]) or 0)
86955812fa3b2e6af0b98a04a1516897ccf95c25
706,897
def find_routes(paths) -> list: """returns routes as tuple from path as list\ like 1,2,3 --> (1,2)(2,3)""" routes = [] for path in paths: for i in range(len(path)): try: route = (path[i], path[i + 1]) if route not in routes: routes.append(route) except IndexError: pass return routes
67fb8eb575dd45879f5e5b465a7886f2a2387b26
706,898
def deploy_tester_contract( web3, contracts_manager, deploy_contract, contract_deployer_address, get_random_address, ): """Returns a function that can be used to deploy a named contract, using conract manager to compile the bytecode and get the ABI""" def f(contract_name, libs=None, args=None): json_contract = contracts_manager.get_contract(contract_name) contract = deploy_contract( web3, contract_deployer_address, json_contract['abi'], json_contract['bin'], args, ) return contract return f
ee925e9632f3bfd66a843d336bd287c92543b2ed
706,899
def make_hashable_params(params): """ Checks to make sure that the parameters submitted is hashable. Args: params(dict): Returns: """ tuple_params = [] for key, value in params.items(): if isinstance(value, dict): dict_tuple = tuple([(key2, value2) for key2, value2 in value.items()]) tuple_params.append(dict_tuple) else: if isinstance(value, (list, set)): tuple_params.append((key, tuple(value))) else: tuple_params.append((key, value)) tuple_params = tuple(tuple_params) try: hash(tuple_params) except TypeError: raise TypeError('The values of keywords given to this class must be hashable.') return tuple_params
39d5de594b8caf776d2732e0e58b1c11127e5047
706,900
def mil(val): """convert mil to mm""" return float(val) * 0.0254
9071b0116a7062ef93d6bee56a08db2b9bec906a
706,901
def ask_number(question, low, high): """Poproś o podanie liczby z określonego zakresu.""" response = None while type(response) != int: try: response = int(input(question)) while response not in range(low, high): response = int(input(question)) except ValueError: print("Value must be a number") return response
fdae37e6a0cd34d36b647a23f4a0f58cad46680a
706,902
def laplacian_operator(data): """ apply laplacian operator on data """ lap = [] lap.append(0.0) for index in range(1, len(data) - 1): lap.append((data[index + 1] + data[index - 1]) / 2.0 - data[index]) lap.append(0.0) return lap
3d7755cdc52352cc445d5942e34c09f65f3e11db
706,903
from pathlib import Path def input_file_path(directory: str, file_name: str) -> Path: """Given the string paths to the result directory, and the input file return the path to the file. 1. check if the input_file is an absolute path, and if so, return that. 2. if the input_file is a relative path, combine it with the result_directory and return that. The resultant path must exist and be a file, otherwise raise an FileNotFoundException. """ path_to_file = Path(file_name) if path_to_file.is_absolute() and path_to_file.is_file(): return path_to_file input_directory_path = Path(directory) path_to_file = input_directory_path / path_to_file if path_to_file.is_file(): return path_to_file.resolve() else: raise FileNotFoundError( 'did not find the input file using result_directory={directory}, input_file={input_file}'.format( directory=directory, input_file=file_name ) )
dd866a5f8b6f776238269844d64686f7fb28347c
706,904
import os def get_ps_lib_dirs(): """ Add directory to list as required """ polysync_install = os.path.join('/', 'usr', 'local', 'polysync') polysync_lib = os.path.join(polysync_install, 'lib') polysync_vendor = os.path.join(polysync_install, 'vendor', 'lib') return [ polysync_lib, polysync_vendor, ]
ce4745ef5dcdb4c00051eff6fae6082f98c90498
706,906
import math def workout_train_chunk_length(inp_len: int, resampling_factor: int = 1, num_encoders: int = 5, kernel: int = 8, stride: int = 2) -> int: """ Given inp_len, return the chunk size for training """ out_len = inp_len * resampling_factor for _ in range(num_encoders): out_len = math.ceil((out_len - kernel) / stride) + 1 for _ in range(num_encoders): out_len = (out_len - 1) * stride + kernel return math.ceil(out_len / resampling_factor)
a7e7f42aa9670f1bda98c588e50052db0f4eb90f
706,907
def ft32m3(ft3): """ft^3 -> m^3""" return 0.028316847*ft3
74f55f722c7e90be3fa2fc1f79f506c44bc6e9bc
706,908
def _calculate_target_matrix_dimension(m, kernel, paddings, strides): """ Calculate the target matrix dimension. Parameters ---------- m: ndarray 2d Matrix k: ndarray 2d Convolution kernel paddings: tuple Number of padding in (row, height) on one side. If you put 2 padding on the left and 2 padding on the right, specify 2. strides: tuple Step size in (row, height) Returns ------- out: tuple Tuple containing (number of rows, number of columns) Raises ------ ValueError If kernel size is greater than m in any axis after padding """ source_height = m.shape[0] source_width = m.shape[1] padding_row = paddings[0] padding_column = paddings[1] kernel_height = kernel.shape[0] kernel_width = kernel.shape[1] if kernel_height > (source_height + padding_row) or kernel_width > (source_width + padding_column): raise ValueError("Kernel size is larger than the matrix") row_stride = strides[0] col_stride = strides[1] # (source_height - kernel_height)/strides[0] is how many steps you can go down. # + 1 to include the start position. target_height = int((source_height + padding_row - kernel_height) / row_stride) + 1 target_width = int((source_width + padding_column - kernel_width) / col_stride) + 1 return (target_height, target_width)
77b5cabd7101b957a27fc422d1ed1715525400a0
706,909
def pretty_duration(seconds): """Return a human-readable string for the specified duration""" if seconds < 2: return '%d second' % seconds elif seconds < 120: return '%d seconds' % seconds elif seconds < 7200: return '%d minutes' % (seconds // 60) elif seconds < 48 * 3600: return '%d hours' % (seconds // 3600) else: return '%d days' % (seconds // (24 * 3600))
8e34addedeeb98e1e028fa9374fcc8c4f134a9f7
706,910
from typing import Counter def train(training_data): """Trains the model on a given data set. Parameters ---------- training_data Returns ------- """ counts = Counter(training_data) model = {} # sort counts by lowest occurrences, up to most frequent. # this allows higher frequencies to overwrite related # values in the model for pair, _ in counts.most_common()[:-len(counts)-1:-1]: word, tag = pair model[word] = tag return model
328901b090392097d22b21a948691787e0128d48
706,911
def get_ref_kmer(ref_seq, ref_name, k_len): """ Load reference kmers. """ ref_mer = [] ref_set = set() for i in range(len(ref_seq) - k_len + 1): kmer = ref_seq[i:(i + k_len)] if kmer in ref_set: raise ValueError( "%s found multiple times in reference %s, at pos. %d" % ( kmer, ref_name, i) ) ref_mer.append(kmer) ref_set.add(kmer) return ref_mer
72b75dccfba122a986d50e144dea62bfafe0fb50
706,912
import ipaddress def is_valid_ip(ip: str) -> bool: """ Args: ip: IP address Returns: True if the string represents an IPv4 or an IPv6 address, false otherwise. """ try: ipaddress.IPv4Address(ip) return True except ValueError: try: ipaddress.IPv6Address(ip) return True except ValueError: return False
aa1d3b19828dd8c3dceaaa8d9d1017cc16c1f73b
706,913
import torch def pick_best_batch_size_for_gpu(): """ Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give you a good shot. """ free, available = torch.cuda.mem_get_info() availableGb = available / (1024 ** 3) if availableGb > 14: return 16 elif availableGb > 10: return 8 elif availableGb > 7: return 4 return 1
31d970697b417b40f8ef5b41fdeacc0e378543a0
706,914
def tree_to_newick_rec(cur_node): """ This recursive function is a helper function to generate the Newick string of a tree. """ items = [] num_children = len(cur_node.descendants) for child_idx in range(num_children): s = '' sub_tree = tree_to_newick_rec(cur_node.descendants[child_idx]) if sub_tree != '': s += '(' + sub_tree + ')' s += cur_node.descendants[child_idx].name items.append(s) return ','.join(items)
751d46dbb4e3a5204900601164410b5bf7f0578b
706,915
def indexData_x(x, ukn_words): """ Map each word in the given data to a unique integer. A special index will be kept for "out-of-vocabulary" words. :param x: The data :return: Two dictionaries: one where words are keys and indexes values, another one "reversed" (keys->index, values->words) """ # Retrieve all words used in the data (with duplicates) all_text = [w for e in x for w in e] # Create a DETERMINISTIC set of all words used = set() words = [x for x in all_text if x not in used and (used.add(x) or True)] print("Number of entries: ",len(all_text)) print("Individual entries: ",len(words)) # Assign an integer index for each individual word word2ind = {word: index for index, word in enumerate(words, 2)} ind2word = {index: word for index, word in enumerate(words, 2)} # To deal with out-of-vocabulary words word2ind.update({ukn_words:1}) ind2word.update({1:ukn_words}) # The index '0' is kept free in both dictionaries return word2ind, ind2word
3f6ffd97d33400c3418b78ad3b383766cc07bee3
706,917
def shimizu_mirioka(XYZ, t, a=0.75, b=0.45): """ The Shinizu-Mirioka Attractor. x0 = (0.1,0,0) """ x, y, z = XYZ x_dt = y y_dt = (1 - z) * x - a * y z_dt = x**2 - b * z return x_dt, y_dt, z_dt
60e5b52e1755de8bcc966364d828d47b05af3723
706,918
def pack_bidirectional_lstm_state(state, num_layers): """ Pack the hidden state of a BiLSTM s.t. the first dimension equals to the number of layers. """ assert (len(state) == 2 * num_layers) _, batch_size, hidden_dim = state.size() layers = state.view(num_layers, 2, batch_size, hidden_dim).transpose(1, 2).contiguous() state = layers.view(num_layers, batch_size, -1) return state
de102ce55deceb5ca7211def122dc2767c35cdd3
706,920
import time def convert_time(time_string): """ Input a time in HH:MM:SS form and output a time object representing that """ return time.strptime(time_string, "%H:%M")
f34b46fe8cd242ee12a9768102486cba243d94df
706,921
from typing import Dict def _build_request_url( base: str, params_dict: Dict[str, str]) -> str: """Returns an URL combined from base and parameters :param base: base url :type base: str :param params_dict: dictionary of parameter names and values :type params_dict: Dict[str, str] :return: a complete url :rtype: str """ parameters = "&".join([f"{k}={v}" for k, v in params_dict.items()]) url = base + "?" + parameters return url
30e27cf55692884be408218403c2f94279516ad2
706,922
def get_players(picks): """Return the list of players in the team """ players = [] for rd in picks: play = list(rd.keys()) players = players+play players = list(set(players)) return players
79963bc19af662d44d4eaf29a04995ede331706c
706,923
def sub_vectors(a, b): """Subtracts two vectors. Args: pos1 (tuple[int]): first position pos1:(tuple[int]): second position Returns: tuple[int]: element wise subtraction Examples: >>> sub_vectors((1,4,6), (1,3,7)) (0, 1, -1) """ return tuple(a[i] - b[i] for i in range(3))
02c35bf46311142a3f3e90cd803d908c6ff63896
706,924
import re def text_pre_process(result): """ 이미지에서 인식된 글자를 정제 합니다. 특수문자 제거, 1-2단어 제거, 줄바꿈 및 공백 제거 :param result: 이미지에서 인식된 글자 :return: 문자를 전처리한 결과 """ copy = str(result) copy2 = copy.replace("\n", "") copy3 = re.sub('[^ㄱ-힗]', '', copy2) # re.sub('[^A-Za-z0-9]', '', copy2) result = re.sub('[-=+,#}/\{:^$.@*\※~&%ㆍ!『「』\\‘|\(\)\[_ ""\]\<\>`\'…》]', '', copy3) # shortword = re.compile(r'\W*\b\w{1,2}\b') # shortword.sub('', result) # text2 = re.sub(r'\d','',result) if result is not None and len(result) > 3: # print(result) return result
c9a25fb19a723d38eb19a8a086a2134369223ea1
706,925
import argparse def ParseCommandYAML(): """Function for parsing command line arguments for input to YAML HDIprep""" # if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--im", nargs='*') parser.add_argument("--pars") parser.add_argument("--out_dir") args = parser.parse_args() # Create a dictionary object to pass to the next function dict = {"im": args.im, "pars": args.pars, "out_dir": args.out_dir} # Print the dictionary object print(dict) # Return the dictionary return dict
3a56d16d960f59f0afd888120c19d12b0b6f25b3
706,927
import torch def x_gate(): """ Pauli x """ return torch.tensor([[0, 1], [1, 0]]) + 0j
736d72d832380ea5a1d6c4a840cb6aa0050638e5
706,928
def merge_dictionaries(default_dictionary, user_input_dictionary, path=None): """Merges user_input_dictionary into default dictionary; default values will be overwritten by users input.""" return {**default_dictionary, **user_input_dictionary}
ea600efcd69e920ae536fa2f22a4c883a71d8ad3
706,929
def get_fiber_protein_intake( nutrients_lower_lists, nutrients_middle_lists,nutrients_upper_lists): """Gets financial class-wise fibee and protein intake data.""" lower_fiber_prot = nutrients_lower_lists.map(lambda x: (x[1], x[3])) middle_fiber_prot = nutrients_middle_lists.map(lambda x: (x[1], x[3])) upper_fiber_prot = nutrients_upper_lists.map(lambda x: (x[1], x[3])) return lower_fiber_prot, middle_fiber_prot, upper_fiber_prot
990293236a10ed18960393b39dbfb46652fca51d
706,930
def _format_rest_url(host: str, append: str = "") -> str: """Return URL used for rest commands.""" return f"http://{host}:8001/api/v2/{append}"
1d5ace3919da004e648cb6c7d6d80fe72903c0e1
706,931
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8): """ Returns dict where the key are the feature pairs and the items are booleans of whether the pair is linearly correlated above the given threshold. """ results = {} for pair in feature_pairs: f1, f2 = pair.split("__") corr = corr_matrix[f1][f2] results[pair] = round(corr, 3) >= rho_threshold return results
18afa0cc24f5d9205cde3c8ad23f70d73b5c395b
706,932
def find_password(liste, login): """ """ for user in liste: if user[0] == login: return user[1] return None
8f61072a8b1cc34eb27c1665b1cd34aeb6630ce2
706,933
def get_bin_values(base_dataset, bin_value): """Gets the values to be used when sorting into bins for the given dataset, from the configured options.""" values = None if bin_value == "results": values = base_dataset.get_output() elif bin_value == "all": # We set all values to 0, assuming single bin will also set its value to 0. values = [0] * base_dataset.get_number_of_samples() else: raise Exception(f"Invalid bin value configured: {bin_value}") return values
cf2419066d6e642e65d9a8747081ebfee417ed64
706,934
async def get_temperatures(obj): """Get temperatures as read by the thermostat.""" return await obj["madoka"].temperatures.query()
b4643d9c40f6aa8953c598dd572d291948ef34a4
706,935
import math def acos(x): """ """ return math.acos(x)
0a8ca8f716f0ea54b558ca27021830480dac662d
706,936
def transaction_update_spents(txs, address): """ Update spent information for list of transactions for a specific address. This method assumes the list of transaction complete and up-to-date. This methods loops through all the transaction and update all transaction outputs for given address, checks if the output is spent and add the spending transaction ID and index number to the outputs. The same list of transactions with updates outputs will be returned :param txs: Complete list of transactions for given address :type txs: list of Transaction :param address: Address string :type address: str :return list of Transaction: """ spend_list = {} for t in txs: for inp in t.inputs: if inp.address == address: spend_list.update({(inp.prev_txid.hex(), inp.output_n_int): t}) address_inputs = list(spend_list.keys()) for t in txs: for to in t.outputs: if to.address != address: continue spent = True if (t.txid, to.output_n) in address_inputs else False txs[txs.index(t)].outputs[to.output_n].spent = spent if spent: spending_tx = spend_list[(t.txid, to.output_n)] spending_index_n = \ [inp for inp in txs[txs.index(spending_tx)].inputs if inp.prev_txid.hex() == t.txid and inp.output_n_int == to.output_n][0].index_n txs[txs.index(t)].outputs[to.output_n].spending_txid = spending_tx.txid txs[txs.index(t)].outputs[to.output_n].spending_index_n = spending_index_n return txs
6ac33306cafd5c75b37e73c405fff4bcc732226f
706,937
def count_tilings(n: int) -> int: """Returns the number of unique ways to tile a row of length n >= 1.""" if n < 5: # handle recursive base case return 2**(n - 1) else: # place each tile at end of row and recurse on remainder return (count_tilings(n - 1) + count_tilings(n - 2) + count_tilings(n - 3) + count_tilings(n - 4))
70f9caa9a27c65c73862dd8c415d93f5a7122632
706,938
import math def _meters_per_pixel(zoom, lat=0.0, tilesize=256): """ Return the pixel resolution for a given mercator tile zoom and lattitude. Parameters ---------- zoom: int Mercator zoom level lat: float, optional Latitude in decimal degree (default: 0) tilesize: int, optional Mercator tile size (default: 256). Returns ------- Pixel resolution in meters """ return (math.cos(lat * math.pi / 180.0) * 2 * math.pi * 6378137) / ( tilesize * 2 ** zoom )
467d23bd437f153345c67c8c1cab1a086fde4995
706,939
def manhattanDistance( xy1, xy2 ): """Returns the Manhattan distance between points xy1 and xy2""" return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
ce0ee21237f253b1af33fbf088292405fd046fe3
706,940
def verifyIP(ip): """Verifies an IP is valid""" try: #Split ip and integer-ize it octets = [int(x) for x in ip.split('.')] except ValueError: return False #First verify length if len(octets) != 4: return False #Then check octet values for octet in octets: if octet < 0 or octet > 255: return False return True
72c373099a75adb2a1e776c863b6a2d1cb2698df
706,941
from datetime import datetime def get_datetime_now(t=None, fmt='%Y_%m%d_%H%M_%S'): """Return timestamp as a string; default: current time, format: YYYY_DDMM_hhmm_ss.""" if t is None: t = datetime.now() return t.strftime(fmt)
c4fc830b7ede9d6f52ee81c014c03bb2ef5552dc
706,942
def is_firstline(text, medicine, disease): """Detect if first-line treatment is mentioned with a medicine in a sentence. Use keyword matching to detect if the keywords "first-line treatment" or "first-or second-line treatment", medicine name, and disease name all appear in the sentence. Parameters ---------- text : str A single sentence. medicine : str A medicine's name. Returns ------- bool Return True if the medicine and first-line treatment are mentioned in the sentence, False otherwise. Examples -------- Import the module >>> from biomarker_nlp import biomarker_extraction Example >>> txt = "TECENTRIQ, in combination with carboplatin and etoposide, is indicated for the first-line treatment of adult patients with extensive-stage small cell lung cancer (ES-SCLC)." >>> medicine = "TECENTRIQ" >>> disease = "small cell lung cancer" >>> biomarker_extraction.is_firstline(text = txt, medicine = medicine, disease = disease) True """ text = text.lower() medicine = medicine.lower() disease = disease.lower() if medicine in text and ('first-line treatment' in text or 'first-or second-line treatment' in text) and disease in text: return True else: return False
c9f8a31c6089c4f7545780028ccb1a033372c284
706,943
def file_reader(file_name): """file_reader""" data = None with open(file_name, "r") as f: for line in f.readlines(): data = eval(line) f.close() return data
6d3d63840cc48ccfdd5beefedf0d3a60c0f44cf9
706,944
import os def dir_is_cachedir(path): """Determines whether the specified path is a cache directory (and therefore should potentially be excluded from the backup) according to the CACHEDIR.TAG protocol (http://www.brynosaurus.com/cachedir/spec.html). """ tag_contents = b'Signature: 8a477f597d28d172789f06886806bc55' tag_path = os.path.join(path, 'CACHEDIR.TAG') try: if os.path.exists(tag_path): with open(tag_path, 'rb') as tag_file: tag_data = tag_file.read(len(tag_contents)) if tag_data == tag_contents: return True except OSError: pass return False
b63f46ebafe6ff3c917325e19adfec551497ce68
706,945
def clean_str(string: str) -> str: """ Cleans strings for SQL insertion """ return string.replace('\n', ' ').replace("'", "’")
d3833293163114642b4762ee25ea7c8f850e9d54
706,946
import os def explode(req: str): """Returns the exploded dependency list for a requirements file. As requirements files can include other requirements files with the -r directive, it can be useful to see a flattened version of all the constraints. This method unrolls a requirement file and produces a list of strings for each constraint line in the order of inclusion. Args: req: path to a requirements file. Returns: list of lines of requirements """ res = [] d = os.path.dirname(req) with open(req) as f: for l in f.readlines(): l = l.rstrip("\n") l = l.lstrip(" ") if l.startswith("-r"): include = l.lstrip(" ").lstrip("-r").lstrip(" ") # assuming relative includes always res += explode(os.path.join(d, include)) elif l: res += [l] return res
1a4c389537dcc9e5abd34d19ed386c48d3d6ecc6
706,947
def get_output_attribute(out, attribute_name, cuda_device, reduction="sum"): """ This function handles processing/reduction of output for both DataParallel or non-DataParallel situations. For the case of multiple GPUs, This function will sum all values for a certain output attribute in various batches together. Parameters --------------------- :param out: Dictionary, output of model during forward pass, :param attribute_name: str, :param cuda_device: list or int :param reduction: (string, optional) reduction to apply to the output. Default: 'sum'. """ if isinstance(cuda_device, list): if reduction == "sum": return out[attribute_name].sum() elif reduction == "mean": return out[attribute_name].sum() / float(len(out[attribute_name])) else: raise ValueError("invalid reduction type argument") else: return out[attribute_name]
c09ff6a3dd4ae2371b1bbec12d4617e9ed6c6e1e
706,948
def get_ref_aidxs(df_fs): """Part of the hotfix for redundant FCGs. I did not record the occurrence id in the graphs, which was stupid. So now I need to use the df_fs to get the information instead. Needs to be used with fid col, which is defined in filter_out_fcgs_ffs_all. """ return {k: v for k, v in zip(df_fs['fid'], df_fs['_aidxf'])}
9b57d7297d96f6b711bb9d3c37f85a17c4ccacd5
706,949
def format_info(info): """ Print info neatly """ sec_width = 64 eq = ' = ' # find key width key_widths = [] for section, properties in info.items(): for prop_key, prop_val in properties.items(): if type(prop_val) is dict: key_widths.append(len(max(list(prop_val.keys()), key=len)) + 4) else: key_widths.append(len(prop_key)) key_width = max(key_widths) # format items msg = [] for section, properties in info.items(): n0 = (sec_width - 2 - len(section)) // 2 n1 = n0 if n0 * 2 + 2 + len(section) == sec_width else n0 + 1 msg.append('\n' + '=' * n0 + f' {section} ' + '=' * n1) for prop_key, prop_val in properties.items(): if type(prop_val) is dict: msg.append((prop_key + ' ').ljust(sec_width, '_')) for sub_key, sub_val in prop_val.items(): msg.append(' ' * 4 + sub_key.ljust(key_width - 4) + eq + str(sub_val)) else: msg.append(prop_key.ljust(key_width) + eq + str(prop_val)) msg.append('=' * (n0 + n1 + 2 + len(section))) return '\n'.join(msg)
9dd3a6ef15909230725f2be6eb698e7ca08a2d8b
706,950
import collections def _get_ordered_label_map(label_map): """Gets label_map as an OrderedDict instance with ids sorted.""" if not label_map: return label_map ordered_label_map = collections.OrderedDict() for idx in sorted(label_map.keys()): ordered_label_map[idx] = label_map[idx] return ordered_label_map
4c5e56789f57edda61409f0693c3bccb57ddc7cf
706,951
def eight_interp(x, a0, a1, a2, a3, a4, a5, a6, a7): """``Approximation degree = 8`` """ return ( a0 + a1 * x + a2 * (x ** 2) + a3 * (x ** 3) + a4 * (x ** 4) + a5 * (x ** 5) + a6 * (x ** 6) + a7 * (x ** 7) )
98be2259c9e0fae214234b635a3ff55608f707d1
706,952
def _splitaddr(addr): """ splits address into character and decimal :param addr: :return: """ col='';rown=0 for i in range(len(addr)): if addr[i].isdigit(): col = addr[:i] rown = int(addr[i:]) break elif i==len(addr)-1: col=addr return col,rown
6f4ef43ed926a468ae5ae22fc062fe2b2701a18a
706,953
def remoteness(N): """ Compute the remoteness of N. Parameters ---------- N : Nimber The nimber of interest. Returns ------- remote : int The remoteness of N. """ if N.n == 0: return 0 remotes = {remoteness(n) for n in N.left} if all(remote % 2 == 1 for remote in remotes): return 1 + max(remotes) else: return 1 + min(remote for remote in remotes if remote % 2 == 0)
6ea40df2a79a2188b3d7c9db69ee9038ec2e6462
706,954
def extendCorrespondingAtomsDictionary(names, str1, str2): """ extends the pairs based on list1 & list2 """ list1 = str1.split() list2 = str2.split() for i in range(1, len(list1)): names[list1[0]][list2[0]].append([list1[i], list2[i]]) names[list2[0]][list1[0]].append([list2[i], list1[i]]) return None
cb586be8dcf7a21af556b332cfedbdce0be6882a
706,955
def split_to_sentences(data): """ Split data by linebreak "\n" Args: data: str Returns: A list of sentences """ sentences = data.split('\n') # Additional clearning (This part is already implemented) # - Remove leading and trailing spaces from each sentence # - Drop sentences if they are empty strings. sentences = [s.strip() for s in sentences] sentences = [s for s in sentences if len(s) > 0] return sentences
56540da88e982615e3874ab9f6fd22229a076565
706,956
def read_config_file(fp: str, mode='r', encoding='utf8', prefix='#') -> dict: """ 读取文本文件,忽略空行,忽略prefix开头的行,返回字典 :param fp: 配置文件路径 :param mode: :param encoding: :param prefix: :return: """ with open(fp, mode, encoding=encoding) as f: ll = f.readlines() ll = [i for i in ll if all([i.strip(), i.startswith(prefix) == False])] params = {i.split('=')[0].strip(): i.split('=')[1].strip() for i in ll} print(params) return params
94e6130de22b05ca9dd6855206ec748e63dad8ad
706,957
def custom_address_validator(value, context): """ Address not required at all for this example, skip default (required) validation. """ return value
06ec3af3b6103c06be5fc9cf30d1af28bd072193
706,958
def get_trajectory_for_weight(simulation_object, weight): """ :param weight: :return: """ print(simulation_object.name+" - get trajectory for w=", weight) controls, features, _ = simulation_object.find_optimal_path(weight) weight = list(weight) features = list(features) return {"w": weight, "phi": features, "controls": controls}
e68827fc3631d4467ae1eb82b3c319a4e45d6a9b
706,959
import warnings def get_integer(val=None, name="value", min_value=0, default_value=0): """Returns integer value from input, with basic validation Parameters ---------- val : `float` or None, default None Value to convert to integer. name : `str`, default "value" What the value represents. min_value : `float`, default 0 Minimum allowed value. default_value : `float` , default 0 Value to be used if ``val`` is None. Returns ------- val : `int` Value parsed as an integer. """ if val is None: val = default_value try: orig = val val = int(val) except ValueError: raise ValueError(f"{name} must be an integer") else: if val != orig: warnings.warn(f"{name} converted to integer {val} from {orig}") if not val >= min_value: raise ValueError(f"{name} must be >= {min_value}") return val
9c967a415eaac58a4a4778239859d1f6d0a87820
706,960
import re def _get_variable_name(param_name): """Get the variable name from the tensor name.""" m = re.match("^(.*):\\d+$", param_name) if m is not None: param_name = m.group(1) return param_name
4f6258667383c80b584054af20ac9a61cf25381f
706,961
def get_mwis(input_tree): """Get minimum weight independent set """ num_nodes = input_tree['num_nodes'] nodes = input_tree['nodes'] if num_nodes <= 0: return [] weights = [0, nodes[0][0]] for idx, node_pair in enumerate(nodes[1:], start=1): node_weight, node_idx = node_pair wis_prime = weights[idx] prime2_index = max(1, idx) - 1 wis_prime2 = weights[prime2_index] + node_weight weights.append(max(wis_prime, wis_prime2)) return weights
3df82615d1060756b1a4863fe168ea542dfed4f9
706,962
def trim_to_min_length(bits): """Ensures 'bits' have min number of leading zeroes. Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks. """ bits = bits[:] # copy # make sure we can be split into 5 bit blocks while bits.len % 5 != 0: bits.prepend('0b0') # Get minimal length by trimming leading 5 bits at a time. while bits.startswith('0b00000'): if len(bits) == 5: break # v == 0 bits = bits[5:] return bits
d740ce27e0ebce30f382844a9810f7792c9b4669
706,963
def display(choices, slug): """ Get the display name for a form choice based on its slug. We need this function because we want to be able to store ACS data using the human-readable display name for each field, but in the code we want to reference the fields using their slugs, which are easier to change. :param choices: A list of tuples representing Django-style form choices. :param slug: The slug of the choice to select. :return: The display name for the given slug. """ for choice_slug, display_name in choices: if choice_slug == slug: return display_name raise NameError('No choice for for slug {} in {}'.format(slug, str(choices)))
e177fa4596de8a9921d05216d51344e95dce89ab
706,964
def yes_or_no(question, default="no"): """ Returns True if question is answered with yes else False. default: by default False is returned if there is no input. """ answers = "yes|[no]" if default == "no" else "[yes]|no" prompt = "{} {}: ".format(question, answers) while True: answer = input(prompt).lower() if answer == '': answer = default if answer in ['no', 'n']: return False elif answer in ['yes', 'y']: return True
496137bcd3d99a3f0bcc5bb87ab3dc090f8fc414
706,965
def decode(encoded: list): """Problem 12: Decode a run-length encoded list. Parameters ---------- encoded : list The encoded input list Returns ------- list The decoded list Raises ------ TypeError If the given argument is not of `list` type """ if not isinstance(encoded, list): raise TypeError('The argument given is not of `list` type.') decoded = [] for x in encoded: if isinstance(x, list): decoded.extend(x[0] * [x[1]]) else: decoded.append(x) return decoded
8fb273140509f5a550074c6d85e485d2dc1c79d0
706,966
import random def create_offset(set_point_value): """Docstring here (what does the function do)""" offset_value = random.randint(-128, 128) offset_value_incrementation = float(offset_value / 100) return set_point_value - offset_value_incrementation
8b41ce32d98edd87c2317a971d87f9b74c3f1b6c
706,967
import re def clean_url(str_text_raw): """This function eliminate a string URL in a given text""" str_text = re.sub("url_\S+", "", str_text_raw) str_text = re.sub("email_\S+", "", str_text) str_text = re.sub("phone_\S+", "", str_text) return(re.sub("http[s]?://\S+", "", str_text))
f14d4647bad72ec08aa64f19bbdd2726eb47d63b
706,968
def calculate_prec_at_k(k, prediction, target): """ Calculating precision at k. """ best_k_pred = prediction.argsort()[:k] best_k_target = target.argsort()[:k] return len(set(best_k_pred).intersection(set(best_k_target))) / k
61637938078b938e90f6ada70888512a97435ca1
706,969
def get_ttl(cur): """Get the 'extract' table as lines of Turtle (the lines are returned as a list).""" # Get ttl lines cur.execute( """WITH literal(value, escaped) AS ( SELECT DISTINCT value, replace(replace(replace(value, '\\', '\\\\'), '"', '\\"'), ' ', '\\n') AS escaped FROM tmp_extract ) SELECT '@prefix ' || prefix || ': <' || base || '> .' FROM prefix UNION ALL SELECT DISTINCT subject || ' ' || predicate || ' ' || coalesce( object, '"' || escaped || '"^^' || datatype, '"' || escaped || '"@' || language, '"' || escaped || '"' ) || ' .' FROM tmp_extract LEFT JOIN literal ON tmp_extract.value = literal.value;""" ) lines = [] for row in cur.fetchall(): line = row[0] if not line: continue # Replace newlines line = line.replace("\n", "\\n") lines.append(line) return lines
454b843bfc47b5a6f11cc06ea881773421499eed
706,970
def geom_cooling(temp, k, alpha = 0.95): """Geometric temperature decreasing.""" return temp * alpha
4263e4cc8a5de21d94bc560e8ff364d8c07f97fd
706,971
import re def bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False): """ Take bytes and return a safe string that can be displayed to the user. Single quotes are always escaped, double quotes are never escaped: "'" + bytes_to_escaped_str(...) + "'" gives a valid Python string. Args: keep_spacing: If True, tabs and newlines will not be escaped. """ if not isinstance(data, bytes): raise ValueError("data must be bytes, but is {}".format(data.__class__.__name__)) # We always insert a double-quote here so that we get a single-quoted string back # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their ret = repr(b'"' + data).lstrip("b")[2:-1] if not escape_single_quotes: ret = re.sub(r"(?<!\\)(\\\\)*\\'", lambda m: (m.group(1) or "") + "'", ret) if keep_spacing: ret = re.sub( r"(?<!\\)(\\\\)*\\([nrt])", lambda m: (m.group(1) or "") + dict(n="\n", r="\r", t="\t")[m.group(2)], ret ) return ret
fe8aa0ed3a8e3f2c7a2cf1aaeebc555b7281bde7
706,972
def is_polindrom(string): """ This function checks whether the given string is a polindrom or not. """ for i,char in enumerate(string): if char != string[-i-1]: return False return True
94e3cdb68c538da7b18e4567dc62fb35a58ebebb
706,973
def int_div_test(equation, val): """ Comparison for the integer division binary search. :equation: Equation to test :val: Input to the division """ r1 = equation(val) if r1 == None: return None r2 = equation(val - 1) if r2 == None: return None if r1 == 1 and r2 == 0: return 0 elif r1 >= 1: return 1 else: return -1
16b9106ddb1fc7472339019926a891c6c1942d18
706,975
import re def clean_caption(text): """ Remove brackets with photographer names or locations at the end of some captions :param text: a photo caption :return: text cleaned """ text = str(text) text = re.sub(r'\s*\[.+?\]$', '.', text) text = re.sub(r'\s*\(photo.+?\)', '', text) return re.sub(r'-- --.+', '.', text).strip()
f07713de58c8304e437904914c78f89c795d9776
706,976
import math def score_mod(mod, word_count, mod_count, mod_match_unlabel): """计算模式的评分""" p = word_count[mod] u = len(mod_match_unlabel[mod]) t = mod_count[mod] return (p / t) * math.log(u + 1, 2) * math.log(p + 1, 2)
1184800a2b6a2ebfbbbdcbcbf4a0d8f8cb261e98
706,977
import json def read_anno_content(anno_file: str): """Read anno content.""" with open(anno_file) as opened: content = json.load(opened) return content
208d5f92d479ebfc0aa1e93d26ca68d3ce2a1e7e
706,978
def hex_form(hash): """Returns the hash formatted in hexadecimal form""" final_hash = '' for i in range(len(hash)): final_hash += format(hash[i], '02x') return final_hash
67c1d376352517a9f368dfc56f03f1af3d45e128
706,979
def user_tickets(raffle_prize, user): """return the allocate ticket for user""" return raffle_prize.allocated_tickets(user)
a29c578713664018f639088539f2404fc7a63171
706,980
def get_corners(n): """Returns corner numbers of layer n""" end = end = (2*n + 1) * (2*n + 1) return [end-m*n for m in range(0,8,2)]
8d78135f13675d01fc2b6736b7c1fb1e7cf3e5f5
706,981
from datetime import datetime def datetime_to_timestamp(dt, epoch=datetime(1970,1,1)): """takes a python datetime object and converts it to a Unix timestamp. This is a non-timezone-aware function. :param dt: datetime to convert to timestamp :param epoch: datetime, option specification of start of epoch [default: 1/1/1970] :return: timestamp """ td = dt - epoch return (td.microseconds + (td.seconds + td.days * 86400))
2fbd5b3d6a56bc04066f7aaa8d4bef7c87a42632
706,982
def connectivity_dict_builder(edge_list, as_edges=False): """Builds connectivity dictionary for each vertex (node) - a list of connected nodes for each node. Args: edge_list (list): a list describing the connectivity e.g. [('E7', 'N3', 'N6'), ('E2', 'N9', 'N4'), ...] as_edges (bool): whether to return connected vertices / nodes or edges Returns: (dict): connectivity dictionary, each node is a key and the value is a set of connected nodes e.g. {'N3': {'N6', 'N11', 'N7'}, 'N9': {'N4'}, etc} """ connectivity_dict = {} for b, n1, n2 in edge_list: n_set = connectivity_dict.get(n1,set()) n_set.add(b if as_edges else n2) connectivity_dict[n1] = n_set n_set = connectivity_dict.get(n2,set()) n_set.add(b if as_edges else n1) connectivity_dict[n2] = n_set return connectivity_dict
58f24c6465fa1aaccca92df4d06662b0ce1e1e77
706,983
import socket def init_socket(): """Returns a fresh socket""" return socket.socket()
429d790f3007a357d4a14d57066d890f14f42178
706,984
import os def get_file_size(path: str): """ Return the size of a file, reported by os.stat(). Args: path: File path. """ return os.path.getsize(path)
f6e7dc89c1fc046f1492bad43eae8c8a14e335af
706,985
import torch def predict(model, dataloader): """Returns: numpy arrays of true labels and predicted probabilities.""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) model.eval() labels = [] probs = [] for batch_idx, batch in enumerate(dataloader): inputs, label = batch inputs = inputs.to(device) label = label.to(device) labels.append(label) outputs = model(inputs) probs.append(torch.sigmoid(outputs[:, 1])) labels = torch.cat(labels).cpu().numpy() probs = torch.cat(probs).cpu().numpy() return labels, probs
1e4b6e1f72127174a8bdbc693665ace8cbe8e4af
706,986
def get_model_config(model): """Returns hyper-parameters for given mode""" if model == 'maml': return 0.1, 0.5, 5 if model == 'fomaml': return 0.1, 0.5, 100 return 0.1, 0.1, 100
dcdfb3c00026a172b22611ad3203a7c32d8e59d7
706,987
def find_longest_substring(s: str, k: int) -> str: """ Speed: ~O(N) Memory: ~O(1) :param s: :param k: :return: """ # longest substring (found) lss = "" # current longest substring c_lss = "" # current list of characters for the current longest substring c_c = [] i = 0 for i, c in enumerate(s): # current character is in list of characters of the current substring ? if c in c_c: # if yes, increase/update current substring c_lss += c else: # else # Can we add the new character in the current substring ? if len(c_c) < k: # if yes: increase/updating the current substring c_lss += c else: # else => compare the current result (substring) & start a new substring research # compare the current substring with the longest substring found as far # Current substring is larger ? if len(c_lss) > len(lss): # if yes: update the longest substring lss = c_lss # in any case => start a new substring research # first element is: the last character of the previous current substring c_c = [c_lss[-1]] c_lss = c_lss[-1] + c # Early exit: at this moment, can we found a larger substring ? if (len(s) - i + len(c_lss)) <= len(lss): break # add the new character in list of current character for substring c_c += [c] # perform a last comparaison for current substring if len(c_lss) > len(lss): lss = c_lss # print(len(s) - i - 1) return lss
78936d140ea1e54945c6b4dd849b38f0c5604a36
706,988
def _fixTool2(scModel,gopLoader): """ :param scModel: :param gopLoader: :return: @type scModel: ImageProjectModel """ def replace_tool(tool): return 'jtui' if 'MaskGenUI' in tool else tool modifier_tools = scModel.getGraph().getDataItem('modifier_tools') if modifier_tools is not None: scModel.getGraph().setDataItem('modifier_tools', [replace_tool(x) for x in modifier_tools]) creator_tool= scModel.getGraph().getDataItem('creator_tool') scModel.getGraph().setDataItem('creator_tool', replace_tool(creator_tool))
3eb3bf8a47514a28c2e699a2eeefb084f9f7923b
706,989
import re def name_convert_to_camel(name: str) -> str: """下划线转驼峰""" contents = re.findall('_[a-z]+', name) for content in set(contents): name = name.replace(content, content[1:].title()) return name
109a1035a3efa98861b6a419206823b1114268e2
706,990
import decimal def as_decimal(dct): """Decodes the Decimal datatype.""" if '__Decimal__' in dct: return decimal.Decimal(dct['__Decimal__']) return dct
d25b3ff73d7559a9018666d5f2cd189e6503a268
706,991
def calc_recall(TP, FN): """ Calculate recall from TP and FN """ if TP + FN != 0: recall = TP / (TP + FN) else: recall = 0 return recall
8f3513e11f8adad111eee32740c271aad31fbe28
706,992
def make_segment(segment, discontinuity=False): """Create a playlist response for a segment.""" response = [] if discontinuity: response.append("#EXT-X-DISCONTINUITY") response.extend(["#EXTINF:10.0000,", f"./segment/{segment}.m4s"]), return "\n".join(response)
8419b100409934f902c751734c396bc72d8a6917
706,993
from typing import Any def from_dicts(key: str, *dicts, default: Any = None): """ Returns value of key in first matchning dict. If not matching dict, default value is returned. Return: Any """ for d in dicts: if key in d: return d[key] return default
508febc48fd22d3a23dc0500b0aa3824c99fdbc3
706,994
def time_in_words(h, m): """Hackerrank Problem: https://www.hackerrank.com/challenges/the-time-in-words/problem Given the time in numerals we may convert it into words, as shown below: ---------------------------------------------- | 5:00 | -> | five o' clock | | 5:01 | -> | one minute past five | | 5:10 | -> | ten minutes past five | | 5:15 | -> | quarter past five | | 5:30 | -> | half past five | | 5:40 | -> | twenty minutes to six | | 5:45 | -> | quarter to six | | 5:47 | -> | thirteen minutes to six | | 5:28 | -> | twenty eight minutes past five | ---------------------------------------------- At minutes = 0, use o' clock. For 1 <= minutes <= 30, use past, and for 30 < minutes use to. Note the space between the apostrophe and clock in o' clock. Write a program which prints the time in words for the input given in the format described. Args: h (int): hour of the day m (int): minutes after the hour Returns: str: string representation of the time """ time = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty", "twenty one", "twenty two", "twenty three", "twenty four", "twenty five", "twenty six", "twenty seven", "twenty eight", "twenty nine"] # We check for a certain set of cases: # Case 1 - we're on the hour, so we use o' clock if m == 0: return "{0} o' clock".format(time[h-1]) # Case 2 - we're one minute after, so we use minute (versus minutes later on to describe the time) if m == 1: return "{0} minute past {1}".format(time[m-1], time[h-1]) # Case 3 - we're a quarter past the hour if m == 15: return "quarter past {0}".format(time[h-1]) # Case 4 - we're half past the hour if m == 30: return "half past {0}".format(time[h-1]) # Case 5 - we're a quarter to the next hour if m == 45: return "quarter to {0}".format(time[h]) # Case 6 - we check for minutes after the hour, which is until we hit minute 30 if m < 30: return "{0} minutes past {1}".format(time[m-1], time[h-1]) # Case 7 - this covers the cases where the minutes are after 30 so we're mintues to the next hour return "{0} minutes to {1}".format(time[59-m], time[h])
85f2247f01df36ef499105a9940be63eee189100
706,995
def majorityElement(nums): """超过三分之一的数,最多不超过两个数""" num1, num2 = -1, -1 count1, count2 = 0, 0 for i in range(len(nums)): curNum = nums[i] if curNum == num1: count1 += 1 elif curNum == num2: count2 += 1 elif count1 == 0: num1 = curNum count1 = 1 elif count2 == 0: num2 = curNum count2 = 1 else: count1 -= 1 count2 -= 2 count1, count2 = 0, 0 for n in nums: if n == num1: count1 += 1 elif n == num2: count2 += 1 print("num1: {}, count1: {}; num2: {}, count2: {}".format(num1, count1, num2, count2)) numLens = len(nums) ret = [] if count1 > numLens//3: ret.append(num1) if count2 > numLens//3: ret.append(num2) return ret
ef71fa445c3bc16bbaf79a1ab4e9548125e71b7b
706,996