content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import math def distance_on_unit_sphere(FoLat, FoLng, ToLat, ToLng): """ Convert latitude and longitude to spherical coordinates in radians.""" phi1 = math.radians(90.0 - FoLat) phi2 = math.radians(90.0 - ToLat) theta1 = math.radians(FoLng) theta2 = math.radians(ToLng) """Compute spherical distance from spherical coordinates. For two locations in spherical coordinates (1, theta, phi) and (1, theta', phi') cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi' distance = rho * arc length""" cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2)) arc = math.acos(cos) """Remember to multiply arc by the radius of the earth in your favorite set of units to get length.""" return arc
98c9294697e36c5b45cd165ba96529187f2750de
706,456
import pandas # noqa def check_pandas_support(caller_name): """Raise ImportError with detailed error message if pandsa is not installed. Plot utilities like :func:`fetch_openml` should lazily import pandas and call this helper before any computation. Parameters ---------- caller_name : str The name of the caller that requires pandas. """ try: return pandas except ImportError as e: raise ImportError( "{} requires pandas.".format(caller_name) ) from e
f3d484bb3a5dbca43a81cca83b7343e1fcd7cbcf
706,457
def encode_line(line, vocab): """Given a string and a vocab dict, encodes the given string""" line = line.strip() sequence = [vocab.get(char, vocab['<UNK>']) for char in line] sequence_length = len(sequence) return sequence, sequence_length
feb14d86dd6c219d57cffc4cd9d90d16c4e9c987
706,458
import math def distance(s1, s2): """ Euclidean distance between two sequences. Supports different lengths. If the two series differ in length, compare the last element of the shortest series to the remaining elements in the longer series. This is compatible with Euclidean distance being used as an upper bound for DTW. :param s1: Sequence of numbers :param s2: Sequence of numbers :return: Euclidean distance """ n = min(len(s1), len(s2)) ub = 0 for v1, v2 in zip(s1, s2): ub += (v1 - v2)**2 # If the two series differ in length, compare the last element of the shortest series # to the remaining elements in the longer series if len(s1) > len(s2): v2 = s2[n - 1] for v1 in s1[n:]: ub += (v1 - v2)**2 elif len(s1) < len(s2): v1 = s1[n-1] for v2 in s2[n:]: ub += (v1 - v2)**2 return math.sqrt(ub)
61c308da89b98b4bbde1bba690c86559fd5e1400
706,460
import re def valid_attribute(attr_filter_key, attr_filter_val, hit): """Validates the hit according to a filter attribute.""" if (attr_filter_key != "None") and (attr_filter_val != "None"): try: # If key for filtering is not correct or doesn't exist-> error # should be ignored hit_attrib_val = re.split( "; " + attr_filter_key + " ", hit[8])[1].split(';')[0].strip('"\'').rstrip('\"') except IndexError: # if key doesn't exist re.split will give error hit_attrib_val = "not.found" # If biotype of hit == attr_value from query-> continue annotation return attr_filter_val == hit_attrib_val else: return True
c7480008f24e011f0803d82f1243a5d00c5a4030
706,461
def mirror_notes(key_position: int) -> int: """ 指定したキーポジションを反転させた値を返します 引数 ---- key_position : int -> キーポジション 戻り値 ------ int -> キーポジションを反転したときのキーポジション """ return 512 - key_position
03ad894eca67405bb79cbf6ea1ecef12b19958ed
706,462
def arithmetic_series(a: int, n: int, d: int = 1) -> int: """Returns the sum of the arithmetic sequence with parameters a, n, d. a: The first term in the sequence n: The total number of terms in the sequence d: The difference between any two terms in the sequence """ return n * (2 * a + (n - 1) * d) // 2
168f0b07cbe6275ddb54c1a1390b41a0f340b0a6
706,463
def get_external_repos(gh): """ Get all external repositories from the `repos.config` file """ external_repos = [] with open("repos.config") as f: content = f.readlines() content = [x.strip() for x in content] for entry in content: org_name, repo_name = entry.split('/') external_repos.append(gh.get_organization(org_name).get_repo(repo_name)) return external_repos
a83515acd77c7ef9e30bf05d8d4478fa833ab5bc
706,465
def get_agent(runmode, name): # noqa: E501 """get_agent # noqa: E501 :param runmode: :type runmode: str :param name: :type name: str :rtype: None """ return 'do some magic!'
065302bb7793eff12973208db5f35f3494a83930
706,466
def find_splits(array1: list, array2: list) -> list: """Find the split points of the given array of events""" keys = set() for event in array1: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) for event in array2: keys.add(event["temporalRange"][0]) keys.add(event["temporalRange"][1]) return list(sorted(keys))
c52f696caddf35fa050621e7668eec06686cee14
706,467
def to_subtask_dict(subtask): """ :rtype: ``dict`` """ result = { 'id': subtask.id, 'key': subtask.key, 'summary': subtask.fields.summary } return result
5171d055cc693b1aa00976c063188a907a7390dc
706,468
def vectorize_with_similarities(text, vocab_tokens, vocab_token_to_index, vocab_matrix): """ Generate a vector representation of a text string based on a word similarity matrix. The resulting vector has n positions, where n is the number of words or tokens in the full vocabulary. The value at each position indicates the maximum similarity between that corresponding word in the vocabulary and any of the words or tokens in the input text string, as given by the input similarity matrix. Therefore, this is similar to an n-grams approach but uses the similarity between non-identical words or tokens to make the vector semantically meaningful. Args: text (str): Any arbitrary text string. vocab_tokens (list of str): The words or tokens that make up the entire vocabulary. vocab_token_to_index (dict of str:int): Mapping between words in the vocabulary and an index in rows and columns of the matrix. vocab_matrix (numpy.array): A pairwise distance matrix holding the similarity values between all possible pairs of words in the vocabulary. Returns: numpy.Array: A numerical vector with length equal to the size of the vocabulary. """ doc_tokens = [token for token in text.split() if token in vocab_tokens] vector = [max([vocab_matrix[vocab_token_to_index[vocab_token]][vocab_token_to_index[doc_token]] for doc_token in doc_tokens]) for vocab_token in vocab_tokens] return(vector)
5b843ffbfdefbf691fb5766bbe6772459568cf78
706,469
def is_word(s): """ String `s` counts as a word if it has at least one letter. """ for c in s: if c.isalpha(): return True return False
524ed5cc506769bd8634a46d346617344485e5f7
706,470
def index_all_messages(empty_index): """ Expected index of `initial_data` fixture when model.narrow = [] """ return dict(empty_index, **{'all_msg_ids': {537286, 537287, 537288}})
ea2c59a4de8e62d2293f87e26ead1b4c15f15a11
706,471
def Bern_to_Fierz_nunu(C,ddll): """From semileptonic Bern basis to Fierz semileptonic basis for Class V. C should be the corresponding leptonic Fierz basis and `ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.""" ind = ddll.replace('l_','').replace('nu_','') return { 'F' + ind + 'nu': C['nu1' + ind], 'F' + ind + 'nup': C['nu1p' + ind], }
4f08f79d6614c8929c3f42096fac71b04bfe7b4b
706,472
import six def format_host(host_tuple): """ Format a host tuple to a string """ if isinstance(host_tuple, (list, tuple)): if len(host_tuple) != 2: raise ValueError('host_tuple has unexpeted length: %s' % host_tuple) return ':'.join([six.text_type(s) for s in host_tuple]) elif isinstance(host_tuple, six.string_types): return host_tuple else: raise ValueError('host_tuple unexpected type: (%s) %s' % (type(host_tuple), host_tuple))
f4822aec5143a99ccc52bb2657e1f42477c65400
706,473
def submission_parser(reddit_submission_object): """Parses a submission and returns selected parameters""" post_timestamp = reddit_submission_object.created_utc post_id = reddit_submission_object.id score = reddit_submission_object.score ups = reddit_submission_object.ups downs = reddit_submission_object.downs # post_body = np.nan thread_title = reddit_submission_object.title thread_url = reddit_submission_object.url subreddit = reddit_submission_object.subreddit.display_name return post_timestamp, post_id, score, ups, downs, thread_title, thread_url, subreddit
d2b406f38e799230474e918df91d55e48d27f385
706,474
import math def fnCalculate_Bistatic_Coordinates(a,B): """ Calculate the coordinates of the target in the bistatic plane A,B,C = angles in the triangle a,b,c = length of the side opposite the angle Created: 22 April 2017 """ u = a*math.cos(B); v = a*math.sin(B); return u,v
cc1dce6ef0506b987e42e3967cf36ea7b46a30d7
706,475
def location_parser(selected_variables, column): """ Parse the location variable by creating a list of tuples. Remove the hyphen between the start/stop positions. Convert all elements to integers and create a list of tuples. Parameters: selected_variables (dataframe): The dataframe containing the location of the variables contained in the cps_selected_variables file column (character): The name of the column containing the start/stop positions Returns: selected_fields: A list of tuples containing the start/stop positions """ fields = [] for field in selected_variables[column]: field = field.split('-') field = [int(i) for i in field] fields.append(field) return fields
106f669269276c37652e92e62eb8c2c52dfe7637
706,476
import torch import math def get_qmf_bank(h, n_band): """ Modulates an input protoype filter into a bank of cosine modulated filters Parameters ---------- h: torch.Tensor prototype filter n_band: int number of sub-bands """ k = torch.arange(n_band).reshape(-1, 1) N = h.shape[-1] t = torch.arange(-(N // 2), N // 2 + 1) p = (-1)**k * math.pi / 4 mod = torch.cos((2 * k + 1) * math.pi / (2 * n_band) * t + p) hk = 2 * h * mod return hk
87e8cf3b0d85a6717cce9dc09f7a0a3e3581e498
706,477
def action_to_upper(action): """ action to upper receives an action in pddl_action_representation, and returns it in upper case. :param action: A action in PddlActionRepresentation :return: PddlActionRepresentation: The action in upper case """ if action: action.name = action.name.upper() action.types = [type.upper() for type in action.types] action.predicates = [pred.upper() for pred in action.predicates] action.requirements = [req.upper() for req in action.requirements] action.action = action.action.upper() return action
e9266ad79d60a58bf61d6ce81284fa2accbb0b8d
706,478
def _get_param_combinations(lists): """Recursive function which generates a list of all possible parameter values""" if len(lists) == 1: list_p_1 = [[e] for e in lists[0]] return list_p_1 list_p_n_minus_1 = _get_param_combinations(lists[1:]) list_p_1 = [[e] for e in lists[0]] list_p_n = [p_1 + p_n_minus_1 for p_1 in list_p_1 for p_n_minus_1 in list_p_n_minus_1] return list_p_n
b4903bea79aebeabf3123f03de986058a06a21f4
706,479
def sanitize_tag(tag: str) -> str: """Clean tag by replacing empty spaces with underscore. Parameters ---------- tag: str Returns ------- str Cleaned tag Examples -------- >>> sanitize_tag(" Machine Learning ") "Machine_Learning" """ return tag.strip().replace(" ", "_")
40ac78846f03e8b57b5660dd246c8a15fed8e008
706,480
def find_named_variables(mapping): """Find correspondance between variable and relation and its attribute.""" var_dictionary = dict() for relation_instance in mapping.lhs: for i, variable in enumerate(relation_instance.variables): name = relation_instance.relation.name field = relation_instance.relation.fields[i] if variable not in var_dictionary.keys(): var_dictionary.update({variable: []}) var_dictionary[variable].append((name, field)) else: if (name, field) not in var_dictionary[variable]: var_dictionary[variable].append((name, field)) return var_dictionary
0b9a78ca94b25e7a91fe88f0f15f8a8d408cb2fd
706,481
def smoothed_epmi(matrix, alpha=0.75): """ Performs smoothed epmi. See smoothed_ppmi for more info. Derived from this: #(w,c) / #(TOT) -------------- (#(w) / #(TOT)) * (#(c)^a / #(TOT)^a) ==> #(w,c) / #(TOT) -------------- (#(w) * #(c)^a) / #(TOT)^(a+1)) ==> #(w,c) ---------- (#(w) * #(c)^a) / #(TOT)^a ==> #(w,c) * #(TOT)^a ---------- #(w) * #(c)^a """ row_sum = matrix.sum(axis=1) col_sum = matrix.sum(axis=0).power(alpha) total = row_sum.sum(axis=0).power(alpha)[0, 0] inv_col_sum = 1 / col_sum # shape (1,n) inv_row_sum = 1 / row_sum # shape (n,1) inv_col_sum = inv_col_sum * total mat = matrix * inv_row_sum mat = mat * inv_col_sum return mat
e2f72c4169aee2f394445f42e4835f1b55f347c9
706,482
import six def encode(input, errors='strict'): """ convert from unicode text (with possible UTF-16 surrogates) to wtf-8 encoded bytes. If this is a python narrow build this will actually produce UTF-16 encoded unicode text (e.g. with surrogates). """ # method to convert surrogate pairs to unicode code points permitting # lone surrogate pairs (aka potentially ill-formed UTF-16) def to_code_point(it): hi = None try: while True: c = ord(next(it)) if c >= 0xD800 and c <= 0xDBFF: # high surrogate hi = c c = ord(next(it)) if c >= 0xDC00 and c <= 0xDFFF: # paired c = 0x10000 + ((hi - 0xD800) << 10) + (c - 0xDC00) else: yield hi hi = None yield c except StopIteration: if hi is not None: yield hi buf = six.binary_type() for code in to_code_point(iter(input)): if (0 == (code & 0xFFFFFF80)): buf += six.int2byte(code) continue elif (0 == (code & 0xFFFFF800)): buf += six.int2byte(((code >> 6) & 0x1F) | 0xC0) elif (0 == (code & 0xFFFF0000)): buf += six.int2byte(((code >> 12) & 0x0F) | 0xE0) buf += six.int2byte(((code >> 6) & 0x3F) | 0x80) elif (0 == (code & 0xFF300000)): buf += six.int2byte(((code >> 18) & 0x07) | 0xF0) buf += six.int2byte(((code >> 12) & 0x3F) | 0x80) buf += six.int2byte(((code >> 6) & 0x3F) | 0x80) buf += six.int2byte((code & 0x3F) | 0x80) return buf, len(buf)
525199690f384304a72176bd1eaeeb1b9cb30880
706,483
def concat(l1, l2): """ Join two possibly None lists """ if l1 is None: return l2 if l2 is None: return l1 return l1 + l2
9e87bead7eedc4c47f665808b9e0222437bc01b5
706,484
def skip(): """ Decorator for marking test function that should not be executed.""" def wrapper(fn): fn.__status__ = "skip" return fn return wrapper
0b966c306515073bfb52427b78c65822ee09a060
706,485
def imthresh(im, thresh): """ Sets pixels in image below threshold value to 0 Args: im (ndarray): image thresh (float): threshold Returns: ndarray: thresholded image """ thresh_im = im.copy() thresh_im[thresh_im < thresh] = 0 return thresh_im
180dc1eba6320c21273e50e4cf7b3f28c786b839
706,486
import re def remove_links(txt: str): """ Remove weblinks from the text """ pattern = r'[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)' txt = re.sub(pattern, " ", txt) txt = re.sub('http|https', " ", txt) return txt
4ccaae84d12ab47e70482d15100ba2e60ef476e8
706,487
def is_generic_alias_of(to_check, type_def): """ :param to_check: the type that is supposed to be a generic alias of ``type_def`` if this function returns ``True``. :param type_def: the type that is supposed to be a generic version of ``to_check`` if this function returns \ ``True``. :return: ``True`` if ``to_check`` is a generic alias of ``type_def``, ``False`` otherwise. """ if isinstance(to_check, type) and issubclass(to_check, type_def): return True origin = getattr(to_check, "__origin__", None) if origin is not None: return issubclass(origin, type_def) return False
d09b255e9ff44a65565196dd6643564aea181433
706,489
def phedex_url(api=''): """Return Phedex URL for given API name""" return 'https://cmsweb.cern.ch/phedex/datasvc/json/prod/%s' % api
a642cd138d9be4945dcbd924c7b5c9892de36baa
706,490
import os def get_autotune_level() -> int: """Get the autotune level. Returns: The autotune level. """ return int(os.environ.get("BAGUA_AUTOTUNE", 0))
661fc4a7580fffdc7eef18ff7eb22e56ece2b468
706,491
def revcumsum(U): """ Reverse cumulative sum for faster performance. """ return U.flip(dims=[0]).cumsum(dim=0).flip(dims=[0])
da147820073f5be9d00b137e48a28d726516dcd0
706,492
import torch def format_attn(attention_tuples: tuple): """ Input: N tuples (N = layer num) Each tuple item is Tensor of shape Batch x num heads x from x to Output: Tensor of shape layer x from x to (averaged over heads) """ # Combine tuples into large Tensor, then avg return torch.cat([l for l in attention_tuples], dim=0).mean(dim=1)
8d25d081992099835a21cdbefb406f378350f983
706,493
def total_curtailment_expression_rule(mod, g, tmp): """ **Expression Name**: GenVar_Total_Curtailment_MW **Defined Over**: GEN_VAR_OPR_TMPS Available energy that was not delivered There's an adjustment for subhourly reserve provision: 1) if downward reserves are provided, they will be called upon occasionally, so power provision will have to decrease and additional curtailment will be incurred; 2) if upward reserves are provided (energy is being curtailed), they will be called upon occasionally, so power provision will have to increase and less curtailment will be incurred The subhourly adjustment here is a simple linear function of reserve Assume cap factors don't incorporate availability derates, so don't multiply capacity by Availability_Derate here (will count as curtailment). """ return ( mod.Capacity_MW[g, mod.period[tmp]] * mod.gen_var_cap_factor[g, tmp] - mod.GenVar_Provide_Power_MW[g, tmp] + mod.GenVar_Subhourly_Curtailment_MW[g, tmp] - mod.GenVar_Subhourly_Energy_Delivered_MW[g, tmp] )
9a1466dbbbc945b30c1df04dc86a2134b3d0659a
706,494
def reverse_string(string): """Solution to exercise C-4.16. Write a short recursive Python function that takes a character string s and outputs its reverse. For example, the reverse of "pots&pans" would be "snap&stop". """ n = len(string) def recurse(idx): if idx == 0: return string[0] # Base case, decremented to beginning of string return string[idx] + recurse(idx-1) return recurse(n-1)
6d4472fb9c042939020e8b819b4c9b705afd1e60
706,495
def _sp_sleep_for(t: int) -> str: """Return the subprocess cmd for sleeping for `t` seconds.""" return 'python -c "import time; time.sleep({})"'.format(t)
20ac8022a2438ceb62123f534ba5911b7c560502
706,497
import hashlib def sha256(firmware_filename, firmware_size=None): """Returns the sha256 hash of the firmware""" hasher = hashlib.sha256() # If firmware size is supplied, then we want a sha256 of the firmware with its header if firmware_size is not None: hasher.update(b"\x00" + firmware_size.to_bytes(4, "little")) with open(firmware_filename, "rb", buffering=0) as file: while True: chunk = file.read(128) if not chunk: break hasher.update(chunk) return hasher.digest()
62fabc35796b9fe21ca2489b317550f93f6774ca
706,498
def is_serial_increased(old, new): """ Return true if serial number was increased using RFC 1982 logic. """ old, new = (int(n) for n in [old, new]) diff = (new - old) % 2**32 return 0 < diff < (2**31 - 1)
44a33a1c7e8caebe3b74284002c7c4be6ac29b40
706,499
import decimal def split_amount(amount, splits, places=2): """Return list of ``splits`` amounts where sum of items equals ``amount``. >>> from decimal import Decimal >>> split_amount(Decimal('12'), 1) Decimal('12.00') >>> split_amount(Decimal('12'), 2) [Decimal('6.00'), Decimal('6.00')] Amounts have a max of ``places`` decimal places. Last amount in the list may not be the same as others (will always be lower than or equal to others). >>> split_amount(Decimal('100'), 3) [Decimal('33,34'), Decimal('33,34'), Decimal('33,32')] >>> split_amount(Decimal('100'), 3, 4) [Decimal('33,3334'), Decimal('33,3334'), Decimal('33,3332')] >>> split_amount(Decimal('12'), 7) # Doctest: +ELLIPSIS [Decimal('1.72'), ..., Decimal('1.72'), ..., Decimal('1.68')] >>> split_amount(Decimal('12'), 17) # Doctest: +ELLIPSIS [Decimal('0.71'), ..., Decimal('0.71'), Decimal('0.64')] """ one = decimal.Decimal(10) ** -places amount = amount.quantize(one) with decimal.localcontext() as decimal_context: decimal_context.rounding = decimal.ROUND_UP upper_split = (amount / splits).quantize(one) splitted_amounts = [upper_split] * (splits - 1) lower_split = amount - sum(splitted_amounts) splitted_amounts.append(lower_split) return splitted_amounts
8c8a17ed9bbcab194550ea78a9b414f51ca5610d
706,500
from datetime import timedelta def shift_compare_date(df, date_field, smaller_eq_than_days=1, compare_with_next=False): """ ATENTION: This Dataframe need to be sorted!!! """ if compare_with_next: s = ( (df[date_field].shift(-1) - df[date_field] ) <= timedelta(days=smaller_eq_than_days) ) & ( (df[date_field].shift(-1) - df[date_field]) > timedelta(days=0) ) else: s = ( (df[date_field] - df[date_field].shift(1) ) <= timedelta(days=smaller_eq_than_days) ) & ( (df[date_field] - df[date_field].shift(1)) >= timedelta(days=0) ) return s
56d4466f61cb6329ec1e365ad74f349d6043dd0a
706,501
def format_alleles(variant): """Gets a string representation of the variant's alleles. Args: variant: nucleus.genomics.v1.Variant. Returns: A string ref_bases/alt1,alt2 etc. """ return '{}/{}'.format(variant.reference_bases, ','.join( variant.alternate_bases))
775fe3e112ff0b7e73780600e0621a8695fa5ad0
706,502
def parse_dict(input_data): """Return a rules dict of the format: { 'light red': [(1, 'bright white'), (2, 'muted yellow')], 'dark orange': [(3, bright white), (4, muted yellow)], 'faded blue': [(0, 'bags')] } """ bags = dict() for line in input_data.split('\n'): outer, inner = line.strip().split(' bags contain ') inner = [i.split(' ') for i in inner.split(", ")] if 'no' in inner[0]: bags[outer] = [(0, 'bags')] else: bags[outer] = [(int(i[0]), ' '.join(i[1:3])) for i in inner] return bags
a1aad66a16e4754c35c9b3518d5641096e393530
706,503
import argparse def parse_args(): """Parse command line arguments""" parser = argparse.ArgumentParser() parser.add_argument('-i', '--input-dir', help="Directory which contains the input data", required=True) parser.add_argument('-o', '--output-dir', help="Directory which will hold the output data", required=True) parser.add_argument('-p', '--num-processes', default=4, help="Number of processes to spawn for file conversion") parser.add_argument('-c', '--compression', default=None, help="Compression Type.") return parser.parse_args()
aeabf459569b5981eba66460ba0bbf15fb4c96f7
706,504
from typing import List from typing import Dict def _clean_empty_and_duplicate_authors_from_grobid_parse(authors: List[Dict]) -> List[Dict]: """ Within affiliation, `location` is a dict with fields <settlement>, <region>, <country>, <postCode>, etc. Too much hassle, so just take the first one that's not empty. """ # stripping empties clean_authors_list = [] for author in authors: clean_first = author['first'].strip() clean_last = author['last'].strip() clean_middle = [m.strip() for m in author['middle']] clean_suffix = author['suffix'].strip() if clean_first or clean_last or clean_middle: author['first'] = clean_first author['last'] = clean_last author['middle'] = clean_middle author['suffix'] = clean_suffix clean_authors_list.append(author) # combining duplicates (preserve first occurrence of author name as position) key_to_author_blobs = {} ordered_keys_by_author_pos = [] for author in clean_authors_list: key = (author['first'], author['last'], ' '.join(author['middle']), author['suffix']) if key not in key_to_author_blobs: key_to_author_blobs[key] = author ordered_keys_by_author_pos.append(key) else: if author['email']: key_to_author_blobs[key]['email'] = author['email'] if author['affiliation'] and (author['affiliation']['institution'] or author['affiliation']['laboratory'] or author['affiliation']['location']): key_to_author_blobs[key]['affiliation'] = author['affiliation'] dedup_authors_list = [key_to_author_blobs[key] for key in ordered_keys_by_author_pos] return dedup_authors_list
5a02b877ee074270c544c7dbb06dd1ceab487e79
706,505
def get_followers(api, user_id): """Returns list of followers""" followers = [] next_max_id = '' while next_max_id is not None: _ = api.getUserFollowers(user_id, maxid=next_max_id) followers.extend(api.LastJson.get('users', [])) next_max_id = api.LastJson.get('next_max_id', '') return followers
debfb11fe0b8b22232b82e9a8ea360a4d2a8cdc1
706,506
def map(v, ds, de, ts, te): """\ Map the value v, in range [ds, de] to the corresponding value in range [ts, te] """ d1 = de - ds d2 = te - ts v2 = v - ds r = v2 / d1 return ts + d2 * r
2c2ba49b2acc283ca25b07c10b7ad717ad6a280d
706,507
def get_Q_body(hs_type, Theta_SW_hs): """温水暖房用熱源機の筐体放熱損失 (2) Args: hs_type(str): 温水暖房用熱源機の種類 Theta_SW_hs(ndarray): 温水暖房用熱源機の往き温水温度 Returns: ndarray: 温水暖房用熱源機の筐体放熱損失 """ if hs_type in ['石油従来型暖房機', '石油従来型温水暖房機', '石油従来型給湯温水暖房機', '不明']: # (2a) return [234 * 3600 * 10 ** (-6)] * 24 * 365 elif hs_type in ['石油潜熱回収型暖房機', '石油潜熱回収型温水暖房機', '石油潜熱回収型給湯温水暖房機']: # (2b) return (5.3928 * Theta_SW_hs - 71.903) * 3600 * 10 ** (-6) else: raise ValueError(hs_type)
60e35a31d9c9b2f5d77d3d6f1518b7a20484fad2
706,508
def merge(list_geo, npts=5): """ merge a list of cad_geometries and update internal/external faces and connectivities Args: list_geo: a list of cad_geometries Returns: a cad_geometries """ geo_f = list_geo[0] for geo in list_geo[1:]: geo_f = geo_f.merge(geo, npts=npts) return geo_f
70db1b52be8ae70d21f689c8f12e051d9c41cd64
706,510
import re def ischapter_name(text_str): """判断是否是章节名""" if re.match(r'^第(.{1,9})([章节回卷集部篇])(\s*)(.*)', text_str): return True else: return False
c89a34408def2c2f9026045925212c2dde88a41d
706,511
import dill import base64 def check_finished(worker, exec_id): """ :param worker: :param exec_id: :return: """ result = worker.status(exec_id) status = dill.loads(base64.b64decode(result.data)) if status["status"] == "FAILED": raise Exception("Remote job execution failed") elif status["status"] == "INVALID ID": raise Exception("Invalid Id") elif status["status"] == "COMPLETED": return True, status else: return False, status
285090fd0fcdfce6964aa43f4af0fae836175ab1
706,512
def splitBinNum(binNum): """Split an alternate block number into latitude and longitude parts. Args: binNum (int): Alternative block number Returns: :tuple Tuple: 1. (int) Latitude portion of the alternate block number. Example: ``614123`` => ``614`` 2. (int) Longitude portion of the alternate block number. Example: ``614123`` => ``123`` """ latBin = int(binNum / 1000) longBin = binNum - (latBin * 1000) return (latBin, longBin)
da9b9cc67d592e73da842f4b686c0d16985f3457
706,514
def split_in_pairs(s, padding = "0"): """ Takes a string and splits into an iterable of strings of two characters each. Made to break up a hex string into octets, so default is to pad an odd length string with a 0 in front. An alternative character may be specified as the second argument. """ if not isinstance(padding, str) or len(padding) != 1: raise TypeError("Padding must be a single character.") s = padding + s if len(s) % 2 else s v = iter(s) return (a+b for a,b in zip(v,v))
8807448bb8125c80fa78ba32f887a54ba9bab1dd
706,515
def has_global(node, name): """ check whether node has name in its globals list """ return hasattr(node, "globals") and name in node.globals
7a2ef301cb25cba242d8544e2c191a537f63bf19
706,516
from typing import Iterable def negate_objective(objective): """Take the negative of the given objective (converts a gain into a loss and vice versa).""" if isinstance(objective, Iterable): return (list)((map)(negate_objective, objective)) else: return -objective
e24877d00b7c84e04c0cb38b5facdba85694890f
706,517
def parse_plot_set(plot_set_string): """ Given one of the string arguments to the --plot-sets option, parse out a data structure representing which conditions ought to be compared against each other, and what those comparison plots/tables should be called. The syntax of a plot set is [title:]condition[,condition[,condition...]]. The first condition is the comparison baseline, when applicable. Returns a tuple of a plot set title, or None if unspecified, and a list of condition names. """ colon_pos = plot_set_string.find(':') if colon_pos != -1: # Pull out the title before the colon title = plot_set_string[0:colon_pos] # And the rest of the specifier after it plot_set_string = plot_set_string[colon_pos + 1:] else: # No title given title = None # Return the title and condition list tuple return (title, plot_set_string.split(','))
1df83681aa3110dfd9302bd7918f15dfbfa497ab
706,518
def add(num1, num2): """ Adds two numbers >>> add(2,4) 6 """ return num1 + num2
932981ca91c01817242e57e1be55c35441337fc4
706,519
def is_palindrome1(str): """ Create slice with negative step and confirm equality with str. """ return str[::-1] == str
39dbc19d0d73b956c9af24abc1babae18c816d73
706,520
def prior_min_field(field_name, field_value): """ Creates prior min field with the :param field_name: prior name (field name initial) :param field_value: field initial properties :return: name of the min field, updated field properties """ name = field_name value = field_value.copy() value.update({ 'label': 'Min', 'required': False, }) return name + '_min', value
9f331ee58e699318e678d881c0028486b746c05c
706,521
def checkpoint_save_config(): """Fixture to create a config for saving attributes of a detector.""" toolset = { "test_id": "Dummy_test", "saved_attributes": { "FeatureExtraction": [ "dummy_dict", "dummy_list", "dummy_tuple", "dummy_tensor", "dummy_val", ], }, "save_attributes": True, "attributes": {}, "save_elementwise": True, } return toolset
6cb7e05a5eb680f6915fc58f40e72403787eea8b
706,522
def mean_zero_unit_variance(arr, mean_vector=None, std_vector=None, samples_in='row'): """ Normalize input data to have zero mean and unit variance. Return the normalized data, the mean, and the calculated standard deviation which was used to normalize the data [normalized, meanvec, stddev] = mean_zero_unit_variance(data) or [normalized, meanvec, stddev] = mean_zero(data, mean_vector=provided_mean_vector) etc. """ samplesIn = 1 if samples_in == 'col' else 0 dimsIn = int(not samplesIn) nSamples = arr.shape[samplesIn] nDims = arr.shape[dimsIn] theshape = [1, 1] theshape[dimsIn] = nDims if not mean_vector: mean_vector = arr.mean(axis=samplesIn).reshape(theshape) if not std_vector: std_vector = arr.std(axis=samplesIn).reshape(theshape) # If you have a row with absolutely no information, you will divide by zero. Hence... std_vector[std_vector < 1e-6] = 1 norma = (arr - mean_vector) / std_vector return norma, mean_vector, std_vector
38a1ca262362b3f04aed06f3f0d21836eca8d5ad
706,523
def renorm_flux_lightcurve(flux, fluxerr, mu): """ Normalise flux light curves with distance modulus.""" d = 10 ** (mu/5 + 1) dsquared = d**2 norm = 1e18 # print('d**2', dsquared/norm) fluxout = flux * dsquared / norm fluxerrout = fluxerr * dsquared / norm return fluxout, fluxerrout
97f2606d54b106d2051983dfc29d942112e7a1e3
706,525
def is_valid_sudoku(board): """ Checks if an input sudoku board is valid Algorithm: For all non-empty squares on board, if value at that square is a number, check if the that value exists in that square's row, column, and minor square. If it is, return False. """ cols = [set() for _ in range(9)] squares = [[set() for _ in range(3)] for x in range(3)] for row in range(9): rows = set() for col in range(9): if board[row][col] == ".": continue # Check row if board[row][col] in rows: return False else: rows.add(board[row][col]) # Check col if board[row][col] in cols[col]: return False else: cols[col].add(board[row][col]) # Check square if board[row][col] in squares[row // 3][col // 3]: return False else: squares[row // 3][col // 3].add(board[row][col]) return True
001a02a47acbaa192215d985f3d743c42a9fb42b
706,526
def is_wrapped_exposed_object(obj): """ Return True if ``obj`` is a Lua (lupa) wrapper for a BaseExposedObject instance """ if not hasattr(obj, 'is_object') or not callable(obj.is_object): return False return bool(obj.is_object())
117a43f9dcc886dc88a77c2ace016b89e43b3c4c
706,527
def no_transform(image): """Pass through the original image without transformation. Returns a tuple with None to maintain compatability with processes that evaluate the transform. """ return (image, None)
25b45a5c77d3c2864ebc7a046e0f47b2fafb067b
706,528
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None): """Builds a menu with the given style using the provided buttons :return: list of buttons """ menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)] if header_buttons: menu.insert(0, [header_buttons]) if footer_buttons: menu.append([footer_buttons]) return menu
f068ef9222b7e16cf19d901961f0315b2d6aebe3
706,529
import socket def check_port_open(port: int) -> bool: """ Проверка на свободный порт port Является частью логики port_validation """ try: sock = socket.socket() sock.bind(("", port)) sock.close() print(f"Порт {port} свободен") return True except OSError: print(f"Порт {port} занят") return False
76ba3ddd03bf1672b8b4ce5fd048561c3a9e78e8
706,530
def irpf(salario,base=12.5,prorrateo=0): """Entra el salario y la base, opcionalmente un parametro para prorratear Si no se da el valor de la bas3e por defecto es 12.5""" if type(salario)==float and type(base)==float: if prorrateo==True: return (salario*(1+2/12))*(base/100) elif prorrateo==False: return salario*(base/100) else: return None
b549e78f2cbd3227cc99d4ce7277a90058696895
706,531
import argparse def str2bool(val): """enable default constant true arguments""" # https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse if isinstance(val, bool): return val elif val.lower() == 'true': return True elif val.lower() == 'false': return False else: raise argparse.ArgumentTypeError('Boolean value expected')
ca229cd53674c6e9a8f37c60909826bf50c6accb
706,532
def get_scalefactor(metadata): """Add scaling factors to the metadata dictionary :param metadata: dictionary with CZI or OME-TIFF metadata :type metadata: dict :return: dictionary with additional keys for scling factors :rtype: dict """ # set default scale factore to 1 scalefactors = {'xy': 1.0, 'zx': 1.0 } try: # get the factor between XY scaling scalefactors['xy'] = metadata['XScale'] / metadata['YScale'] # get the scalefactor between XZ scaling scalefactors['zx'] = metadata['ZScale'] / metadata['YScale'] except KeyError as e: print('Key not found: ', e) return scalefactors
0619d5fa8f24008ddf4364a965268755c07d09c3
706,533
def NodeToString(xml_node): """Returns an XML string. Args: xml_node: xml.dom.Node object Returns: String containing XML """ return xml_node.toxml()
043072bbb40f33947febedf967679e3e39931834
706,534
def difference(data, interval): """ difference dataset parameters: data: dataset to be differenced interval: the interval between the two elements to be differenced. return: dataset: with the length = len(data) - interval """ return [data[i] - data[i - interval] for i in range(interval, len(data))]
611f4ad36935000ae7dc16f76aef7cbb494b36ac
706,535
def add_two_values(value1, value2): """ Adds two integers Arguments: value1: first integer value e.g. 10 value2: second integer value e.g. 2 """ return value1 + value2
10f71fcbde9d859f094724c94568eee55a7b989a
706,537
def get_mongo_database(connection, database_name): """ Access the database Args: connection (MongoClient): Mongo connection to the database database_name (str): database to be accessed Returns: Database: the Database object """ try: return connection.get_database(database_name) except: return None
9299cbe0b697dec2e548fb5e26e2013214007575
706,538
def find_u_from_v(matrix, v, singular_value): """ Finds the u column vector of the U matrix in the SVD UΣV^T. Parameters ---------- matrix : numpy.ndarray Matrix for which the SVD is calculated v : numpy.ndarray A column vector of V matrix, it is the eigenvector of the Gramian of `matrix`. singular_value : float A singular value of `matrix` corresponding to the `v` vector. Returns ------- numpy.ndarray u column vector of the U matrix in the SVD. """ return matrix @ v / singular_value
ef2871c86bf7ddc4c42446a54230068282ad85df
706,539
from typing import List def double(items: List[str]) -> List[str]: """ Returns a new list that is the input list, repeated twice. """ return items + items
9e4b6b9e84a80a9f5cbd512ca820274bb8cad924
706,540
def getwpinfo(id,wps): """Help function to create description of WP inputs.""" try: wpmin = max([w for w in wps if 'loose' in w.lower()],key=lambda x: len(x)) # get loose WP with most 'V's wpmax = max([w for w in wps if 'tight' in w.lower()],key=lambda x: len(x)) # get tight WP with most 'V's info = f"{id} working point: {wpmin}-{wpmax}" except: info = f"{id} working point: {', '.join(wps)}" return info
0dcf6c205a1988227e23a77e169a9114f1fdf2cc
706,541
import inspect def get_file_name(file_name): """ Returns a Testsuite name """ testsuite_stack = next(iter(list(filter(lambda x: file_name in x.filename.lower(), inspect.stack()))), None) if testsuite_stack: if '/' in testsuite_stack.filename: split_character = '/' else: split_character = '\\' return testsuite_stack.filename.split(split_character)[-1].split(".")[0] else: return ""
97172600d785339501f5e58e8aca6581a0a690e0
706,542
import torch def track_edge_matrix_by_spt(batch_track_bbox, batch_track_frames, history_window_size=50): """ :param batch_track_bbox: B, M, T, 4 (x, y, w, h) :return: """ B, M, T, _ = batch_track_bbox.size() batch_track_xy = batch_track_bbox[:, :, :, :2] batch_track_wh = batch_track_bbox[:, :, :, 2:] batch_track_t = batch_track_frames[:, :, :, None] batch_track_diff_t = 1 - torch.abs(batch_track_t[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_t[:, :, None, :, :].expand(-1, -1, T, -1, -1)) / history_window_size batch_track_diff_xy = 1 - torch.abs(batch_track_xy[:, :, :, None, :].expand(-1, -1, -1, T, -1) - batch_track_xy[:, :, None, :, :].expand(-1, -1, T, -1, -1)) batch_track_diff_wh = 1 - torch.abs(batch_track_wh[:, :, :, None, :].expand(-1, -1, -1, T, -1) - (batch_track_wh[:, :, None, :, :].expand(-1, -1, T, -1, -1))) # B, M, T, T, 5 track_edge_matrix = torch.cat([batch_track_diff_t, batch_track_diff_xy, batch_track_diff_wh], dim=-1) return track_edge_matrix
5303f401d925c26a1c18546ba371a2119a41ec3d
706,543
def _find_popular_codon(aa): """ This function returns popular codon from a 4+ fold degenerative codon. :param aa: dictionary containing amino acid information. :return: """ codons = [c[:2] for c in aa["codons"]] counts = [] for i in range(len(codons)): pc = codons[i] count = 0 for j in range(len(codons)): if codons[j] == pc: count += 1 counts.append(count) # find index of the higest entry highest = 0 for i in range(len(counts)): if counts[i] > counts[highest]: highest = i return aa["codons"][highest]
a555a9d42ea4dfa0260d9d4d2040de3c6fca69a0
706,545
import pathlib def initialize_cluster_details(scale_version, cluster_name, username, password, scale_profile_path, scale_replica_config): """ Initialize cluster details. :args: scale_version (string), cluster_name (string), username (string), password (string), scale_profile_path (string), scale_replica_config (bool) """ cluster_details = {} cluster_details['scale_version'] = scale_version cluster_details['scale_cluster_clustername'] = cluster_name cluster_details['scale_service_gui_start'] = "True" cluster_details['scale_gui_admin_user'] = username cluster_details['scale_gui_admin_password'] = password cluster_details['scale_gui_admin_role'] = "Administrator" cluster_details['scale_sync_replication_config'] = scale_replica_config cluster_details['scale_cluster_profile_name'] = str( pathlib.PurePath(scale_profile_path).stem) cluster_details['scale_cluster_profile_dir_path'] = str( pathlib.PurePath(scale_profile_path).parent) return cluster_details
5508733e0bfbd20fb76ecaaf0df7f41675b0c5c8
706,546
def tolist(obj): """ Convert given `obj` to list. If `obj` is not a list, return `[obj]`, else return `obj` itself. """ if not isinstance(obj, list): return [obj] return obj
f511f4ebb86977b2db8646e692abc9840c2ae2d1
706,547
def score_matrix(motifs, k): """returns matrix score formed from motifs""" nucleotides = {'A': [0]*k, 'T': [0]*k, 'C': [0]*k, 'G': [0]*k} for motif in motifs: for index, nucleotide in enumerate(motif): nucleotides[nucleotide][index] = nucleotides[nucleotide][index] + 1 i = 0 matrix_score = 0 while i < k: output = [] column_score = 0 for key in nucleotides: output.append(nucleotides[key][i]) max_consumed = False max_item = max(output) for item in output: if item == max_item: if not max_consumed: max_consumed = True continue else: column_score = column_score + item else: column_score = column_score+item matrix_score = matrix_score + column_score i = i + 1 return matrix_score
ce9f7b770ce75d4e872da7b3c9b4fa3fbcd1e900
706,548
def khinalug_input_normal(field, text): """ Prepare a string from one of the query fields for subsequent processing: replace common shortcuts with valid Khinalug characters. """ if field not in ('wf', 'lex', 'lex2', 'trans_ru', 'trans_ru2'): return text text = text.replace('c1_', 'č̄') text = text.replace('c1\'', 'č̣') text = text.replace('7', 'ˁ') text = text.replace('g1', 'ǧ') text = text.replace('s1', 'š') text = text.replace('z1', 'ž') text = text.replace('c1', 'č') text = text.replace('j1', 'ǯ') text = text.replace('a1', 'ä') text = text.replace('u1', 'ü') text = text.replace('o1', 'ö') text = text.replace('i1', 'ı') text = text.replace('k_', 'k̄') text = text.replace('t_', 't̄') text = text.replace('q_', 'q̄') text = text.replace('c_', 'c̄') text = text.replace('c\'', 'c̣') text = text.replace('k\'', 'ḳ') text = text.replace('q\'', 'q̇') text = text.replace('x\'', 'x̣') text = text.replace('t\'', 'ṭ') text = text.replace('h\'', 'ḥ') return text
b9b9413ae461b6a03aa8c0db4396658dbe242c91
706,549
def _matches(o, pattern): """Match a pattern of types in a sequence.""" if not len(o) == len(pattern): return False comps = zip(o,pattern) return all(isinstance(obj,kind) for obj,kind in comps)
e494016affa28e9018f337cb7184e96858701208
706,550
def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal and keyword arguments to connect:: @receiver(signal_object, sender=sender) def signal_receiver(sender, **kwargs): ... """ def _decorator(func): signal.connect(func, **kwargs) return func return _decorator
dbbde0855b2a657adaff9fa688aa158053e46579
706,551
def unnormalise_x_given_lims(x_in, lims): """ Scales the input x (assumed to be between [-1, 1] for each dim) to the lims of the problem """ # assert len(x_in) == len(lims) r = lims[:, 1] - lims[:, 0] x_orig = r * (x_in + 1) / 2 + lims[:, 0] return x_orig
1d4cd35f45ab8594e297eb64e152a481c01905cd
706,553
def get_list_from(matrix): """ Transforms capability matrix into list. """ only_valuable = [] counter = 1 for row_number in range(matrix.shape[0]): only_valuable += matrix[row_number, counter::].tolist() counter += 1 return only_valuable
bbfa52ff6a960d91d5aece948e9d416c3dcf0667
706,554
import math def lead_angle(target_disp,target_speed,target_angle,bullet_speed): """ Given the displacement, speed and direction of a moving target, and the speed of a projectile, returns the angle at which to fire in order to intercept the target. If no such angle exists (for example if the projectile is slower than the target), then None is returned. """ """ One can imagine the gun, target and point of target collision at some time t forming a triangle --o-.-.-.--- St collision of which one side has length St*t where St is . /' ' ' ' . . . o the target speed, and another has length Sb*t . /z . . where Sb is the bullet speed. We can eliminate . . . t by scaling all sides of the triangle equally . A. . leaving one side St and another Sb. This . . . Sb triangle can be split into 2 right-angled . a__ . triangles which share line A. Angle z can then . / . be calculated and length A found . . (A = sin(z)/St), and from this angle a can be -----o----- found (a = arcsin(A/Sb) leading to the gun calculation of the firing angle. """ # Check for situations with no solution if target_speed > bullet_speed: return None if target_disp[0]==0 and target_disp[1]==0: return None # Find angle to target ang_to_targ = math.atan2(target_disp[1],target_disp[0]) # Calculate angle return math.asin(target_speed/bullet_speed*math.sin( ang_to_targ-target_angle-math.pi )) + ang_to_targ
fb5dfddf8b36d4e49df2d740b18f9aa97381d08f
706,555
from typing import Any import sys def toStr(s: Any) -> str: """ Convert a given type to a default string :param s: item to convert to a string :return: converted string """ return s.decode(sys.getdefaultencoding(), 'backslashreplace') if hasattr(s, 'decode') else str(s)
10adab737ab909760215810b94743a15e39b9035
706,557
def file2bytes(filename: str) -> bytes: """ Takes a filename and returns a byte string with the content of the file. """ with open(filename, 'rb') as f: data = f.read() return data
f917a265c17895c917c3c340041586bef0c34dac
706,558
def mock_dataset(mocker, mock_mart, mart_datasets_response): """Returns an example dataset, built using a cached response.""" mocker.patch.object(mock_mart, 'get', return_value=mart_datasets_response) return mock_mart.datasets['mmusculus_gene_ensembl']
bb9a8b828f0ac5bfa59b3faee0f9bcc22c7d954e
706,559
def info2lists(info, in_place=False): """ Return info with: 1) `packages` dict replaced by a 'packages' list with indexes removed 2) `releases` dict replaced by a 'releases' list with indexes removed info2list(info2dicts(info)) == info """ if 'packages' not in info and 'releases' not in info: return info if in_place: info_lists = info else: info_lists = info.copy() packages = info.get('packages') if packages: info_lists['packages'] = list(packages.values()) releases = info.get('releases') if releases: info_lists['releases'] = list(releases.values()) return info_lists
313fda757d386332e16a0a91bb4408fe3cb8c070
706,560
def is_chitoi(tiles): """ Returns True if the hand satisfies chitoitsu. """ unique_tiles = set(tiles) return (len(unique_tiles) == 7 and all([tiles.count(tile) == 2 for tile in unique_tiles]))
c04149174bb779cd07616d4f419fc86531ab95dd
706,561
import itertools def get_hpo_ancestors(hpo_db, hpo_id): """ Get HPO terms higher up in the hierarchy. """ h=hpo_db.hpo.find_one({'id':hpo_id}) #print(hpo_id,h) if 'replaced_by' in h: # not primary id, replace with primary id and try again h = hpo_db.hpo.find_one({'id':h['replaced_by'][0]}) hpo=[h] if 'is_a' not in h: return hpo for hpo_parent_id in h['is_a']: #p=hpo_db.hpo.find({'id':hpo_parent_id}): hpo+=list(itertools.chain(get_hpo_ancestors(hpo_db,hpo_parent_id))) #remove duplicates hpo={h['id'][0]:h for h in hpo}.values() return hpo
2ef2c968bc3001b97529ccd269884cefad7a899f
706,562
def space_boundaries_re(regex): """Wrap regex with space or end of string.""" return rf"(?:^|\s)({regex})(?:\s|$)"
68861da6218165318b6a446c173b4906a93ef850
706,563
def flop_gemm(n, k): """# of + and * for matmat of nxn matrix with nxk matrix, with accumulation into the output.""" return 2*n**2*k
b217b725e2ac27a47bc717789458fd20b4aa56c1
706,564
def index() -> str: """Rest endpoint to test whether the server is correctly working Returns: str: The default message string """ return 'DeChainy server greets you :D'
ce0caeb9994924f8d6ea10462db2be48bbc126d0
706,565
from typing import AnyStr from typing import List import json def load_json_samples(path: AnyStr) -> List[str]: """ Loads samples from a json file :param path: Path to the target file :return: List of samples """ with open(path, "r", encoding="utf-8") as file: samples = json.load(file) if isinstance(samples, list): return samples else: raise RuntimeError(f"File's content must be list-like")
b735e7265a31f6bc6d19381bfe9d0cbe26dcf170
706,566