content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _not_exhausted(last_fetched): """Check if the last fetched tasks were the last available.""" return len(last_fetched) == 100
570cf94ba9c723cced8ec3a746f2ce070d780fd5
704,891
import os def files_by_extension(root, extensions): """ Returns a list of files that match the extensions given after crawling the root directory """ assert(os.path.isdir(root)) file_list = [] for roots, _, files in os.walk(root): for f in files: ext = os.path.splitext(f)[1][1:].strip().lower() if ext in extensions: file_list.append(os.path.join(roots, f)) return file_list
637d8f2fc8d35f1f78e81c328541519b32b34d7b
704,892
import os import argparse def extant_file(x): """ 'Type' for argparse - checks that file exists but does not open. """ if not os.path.exists(x): # Argparse uses the ArgumentTypeError to give a rejection message like: # error: argument input: x does not exist raise argparse.ArgumentTypeError("{0} does not exist".format(x)) return x
2572516acbc1b6a661e4d85f36d8adb96f832d0f
704,894
def has_oxidation_states(comp): """Check if a composition object has oxidation states for each element Args: comp (Composition): Composition to check Returns: (boolean) Whether this composition object contains oxidation states """ for el in comp.elements: if not hasattr(el, "oxi_state") or el.oxi_state is None: return False return True
702595070b588761142055bc1532ce26acd287fb
704,895
def check_Latitude(observation): """ Validates that observation contains valid age value Returns: - assertion value: True if age is valid, False otherwise - error message: empty if age is valid, False otherwise """ value = observation.get("Latitude") if isinstance(value, str): error = "Field `Latitude` is not an float or a null value. Current value: " + str(value) return False, error return True, ""
65582eea8a5c40a08054eb5b4889aa3bc6d0af68
704,896
from typing import Optional def parse_opt_int(s: Optional[str]) -> Optional[int]: """ parse_opt_int(s: Optional[str]) -> Optional[int] If s is a string, parse it for an integer value (raising a ValueError if it cannot be parsed correctly.) If s is None, return None. Otherwise, raise a TypeError. """ if s is None: return None if isinstance(s, str): return int(s) raise TypeError(f"value must be a string or None: {type(s).__name__}")
91a102c8c8e6a6ee109e9c88c56d9a6959f1f838
704,897
def setup(hass, config): """Mock a successful setup.""" return True
fd2977534aa8a165b49c4fbddc513c8f77b0588d
704,898
def getIdFromVpcArn(resources): """ given a vpc arn, strip off all but the id """ vpcStr = 'vpc/' ids = [] for resource in resources: if vpcStr in resource: index = resource.rfind(vpcStr) id = resource[index+len(vpcStr):] ids.append(id) return ids
c1c5e5aef145ee8d3a2072604a1f531ad9198385
704,899
def input_output_mapping(): """Build a mapping dictionary from pfb input to output numbers.""" # the polyphase filter bank maps inputs to outputs, which the MWA # correlator then records as the antenna indices. # the following is taken from mwa_build_lfiles/mwac_utils.c # inputs are mapped to outputs via pfb_mapper as follows # (from mwa_build_lfiles/antenna_mapping.h): # floor(index/4) + index%4 * 16 = input # for the first 64 outputs, pfb_mapper[output] = input pfb_mapper = [0, 16, 32, 48, 1, 17, 33, 49, 2, 18, 34, 50, 3, 19, 35, 51, 4, 20, 36, 52, 5, 21, 37, 53, 6, 22, 38, 54, 7, 23, 39, 55, 8, 24, 40, 56, 9, 25, 41, 57, 10, 26, 42, 58, 11, 27, 43, 59, 12, 28, 44, 60, 13, 29, 45, 61, 14, 30, 46, 62, 15, 31, 47, 63] # build a mapper for all 256 inputs pfb_inputs_to_outputs = {} for p in range(4): for i in range(64): pfb_inputs_to_outputs[pfb_mapper[i] + p * 64] = p * 64 + i return pfb_inputs_to_outputs
1c88c7aba95218a4ce4ae792b2001e4f912da178
704,900
import subprocess def rexec(cmd): """ executes shell command cmd with the output returned as a string when the command has finished. """ try: output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) except subprocess.CalledProcessError as exc: return exc.output[:-1] else: return output[:-1]
79a7441c9798c53c2a9f064cec15d38adbe1dd6b
704,901
def get_attachment_file_upload_to(instance, filename): """ Returns a valid upload path for the file of an attachment. """ return instance.get_file_upload_to(filename)
e38c51a2ca947bebe1ed274c4265081c6b9e7c41
704,902
def handle_negations(tweet_tokens, lexicon_scores): """ Handling of negations occuring in tweets -> shifts meaning of words -> if a negation was found the polarity of the following words will change Parameters ---------- tweet_tokens : List list of tweet tokens that were already prepocessed (cleaning, etc.) lexicon_scores : List list of assigned sentiment scores per token (decoded as Integers) Returns ------- new_scores : List list of sentiment scores (as Integers) after negiation handling """ # new score list new_scores = lexicon_scores # Words defining negations # taken from https://github.com/gkotsis/negation-detection/blob/master/negation_detection.py # and Kolchyna et al. (2015) negations_adverbs = ["no", "without", "nil","not", "n't", "never", "none", "neith", "nor", "non", "seldom", "rarely", "scarcely", "barely", "hardly", "lack", "lacking", "lacks", "neither", "cannot", "can't", "daren't", "doesn't", "didn't", "hadn't", "wasn't", "won't", "without", "hadnt", "haven't", "weren't"] negations_verbs = ["deny", "reject", "refuse", "subside", "retract", "non"] # find negations in the tweet_tokens list and change the scores of negative and positive tokens # immediatly following the negation index = 0 for ii in range(len(tweet_tokens)): token = tweet_tokens[index] if (token in negations_adverbs or token in negations_verbs): # makle sure that the end of the tweet isn't reached yet if (index < len(tweet_tokens)-1): # if the sentiment of the next token is positive change it to negative if (lexicon_scores[index + 1] == 1): new_scores[index + 1] = -1 index += 2 # else change it to positive if it is negative elif (lexicon_scores[index + 1] == -1): new_scores[index + 1] = 1 index += 2 else: index += 1 # endif else: break # endif # if neutral let it neutral -> go to the next token else: index +=1 # endif # exit the loop when all tokens have been checked if (index >= len(tweet_tokens)-1): break # endif # endfor # return the new scores return new_scores
cca56e5fa1b611aa6adb2e74ab580fed49b923ee
704,904
def log_level(non_prod_value: str, prod_value: str) -> str: """ Helper function for setting an appropriate log level in prod. """ return prod_value
86f098cfe9137519da1d160c22dfc1f43303c546
704,905
import os def partitioning_df(stats_df,plus_and_minus,tmp_dir,chunk_size = 1000): """ the first state for large files is very large. We split the first state in a separate file. Then all the other states are splitted into several files. """ # stats_df.to_csv('stats_df.csv', index=False, header=True, float_format='%.15f', compression='gzip', # encoding='utf-8') stats_df.sort_values(['prev_state','concept:name','state'], ascending=True, inplace=True) plus_and_minus.sort_values(['prev_state','concept:name','state'], ascending=True, inplace=True) # unique_states = stats_df.state.unique() unique_states = stats_df.groupby(['prev_state','concept:name','state']).size().reset_index().rename(columns={0:'count'}).drop('count',axis=1) large_states=stats_df.groupby(['prev_state','concept:name','state']).relative_time.count() #separating large states from the others large_states=large_states[large_states>1000].reset_index() #['prev_state', 'concept:name', 'state'] curr_dir = os.getcwd() idx=0 """large state separately""" for index,row in large_states.iterrows(): res = stats_df.loc[(stats_df.state==row['state']) & (stats_df.prev_state==row['prev_state']) & (stats_df['concept:name']==row['concept:name']), :] res.to_pickle(os.path.join( tmp_dir, 'stats_df_%s' % (idx))) plus_and_minus.loc[ (plus_and_minus.state==row['state']) & (plus_and_minus.prev_state==row['prev_state']) & (plus_and_minus['concept:name']==row['concept:name']), :] \ .to_pickle(os.path.join( tmp_dir, 'plus_and_minus_%s' % (idx))) # unique_states=unique_states[unique_states!=current_state] row_id=unique_states.index[ (unique_states.state==row['state'] )& (unique_states.prev_state==row['prev_state']) & (unique_states['concept:name']==row['concept:name'])].tolist()[0] unique_states.drop(row_id, axis=0,inplace=True) idx += 1 """ splitting other states regularly""" max_index_of_large_states=idx # print("partition of large states is %s"%(max_index_of_large_states-1)) for i in range(0, unique_states.shape[0], chunk_size): # print("Current Chunck is: %s" % (i)) current_states = unique_states[i:i + chunk_size] # res = stats_df.loc[stats_df.state.isin(current_states), :] res = stats_df.iloc[current_states.index] res.to_pickle(os.path.join( tmp_dir,'stats_df_%s'%(idx))) # plus_and_minus.loc[plus_and_minus.state.isin(current_states), :]\ # .to_pickle(os.path.join( tmp_dir,'plus_and_minus_%s'%(idx))) plus_and_minus.iloc[current_states.index] \ .to_pickle(os.path.join( tmp_dir, 'plus_and_minus_%s' % (idx))) idx+=1 # return len(list(range(0, unique_states.shape[0], chunk_size))) #number of chunks return idx , max_index_of_large_states
3a62c7abe0ac67b095351ea69bad642b3479a36e
704,906
def distinguishable_paths(path1, path2): """ Checks if two model paths are distinguishable in a deterministic way, without looking forward or backtracking. The arguments are lists containing paths from the base group of the model to a couple of leaf elements. Returns `True` if there is a deterministic separation between paths, `False` if the paths are ambiguous. """ e1, e2 = path1[-1], path2[-1] for k, e in enumerate(path1): if e not in path2: depth = k - 1 break else: depth = 0 if path1[depth].max_occurs == 0: return True univocal1 = univocal2 = True if path1[depth].model == 'sequence': idx1 = path1[depth].index(path1[depth + 1]) idx2 = path2[depth].index(path2[depth + 1]) before1 = any(not e.is_emptiable() for e in path1[depth][:idx1]) after1 = before2 = any(not e.is_emptiable() for e in path1[depth][idx1 + 1:idx2]) after2 = any(not e.is_emptiable() for e in path1[depth][idx2 + 1:]) else: before1 = after1 = before2 = after2 = False for k in range(depth + 1, len(path1) - 1): univocal1 &= path1[k].is_univocal() idx = path1[k].index(path1[k + 1]) if path1[k].model == 'sequence': before1 |= any(not e.is_emptiable() for e in path1[k][:idx]) after1 |= any(not e.is_emptiable() for e in path1[k][idx + 1:]) elif path1[k].model in ('all', 'choice'): if any(e.is_emptiable() for e in path1[k] if e is not path1[k][idx]): univocal1 = before1 = after1 = False else: if len(path2[k]) > 1 and all(e.is_emptiable() for e in path1[k] if e is not path1[k][idx]): univocal1 = before1 = after1 = False for k in range(depth + 1, len(path2) - 1): univocal2 &= path2[k].is_univocal() idx = path2[k].index(path2[k + 1]) if path2[k].model == 'sequence': before2 |= any(not e.is_emptiable() for e in path2[k][:idx]) after2 |= any(not e.is_emptiable() for e in path2[k][idx + 1:]) elif path2[k].model in ('all', 'choice'): if any(e.is_emptiable() for e in path2[k] if e is not path2[k][idx]): univocal2 = before2 = after2 = False else: if len(path2[k]) > 1 and all(e.is_emptiable() for e in path2[k] if e is not path2[k][idx]): univocal2 = before2 = after2 = False if path1[depth].model != 'sequence': if before1 and before2: return True elif before1: return univocal1 and e1.is_univocal() or after1 or path1[depth].max_occurs == 1 elif before2: return univocal2 and e2.is_univocal() or after2 or path2[depth].max_occurs == 1 else: return False elif path1[depth].max_occurs == 1: return before2 or (before1 or univocal1) and (e1.is_univocal() or after1) else: return (before2 or (before1 or univocal1) and (e1.is_univocal() or after1)) and \ (before1 or (before2 or univocal2) and (e2.is_univocal() or after2))
140f8f18f030df233490ef242504649f175f62c7
704,908
def read_to_ulens_in_intvls(read, intvls): """Extract units within `intvls` from `read.units`.""" return [unit.length for unit in read.units if unit.length in intvls]
11159bea8bbf0cb68f0e9a7355c82e93b430065d
704,909
def fillNaToNone(data): """Iterates through NA values and changes them to None Parameters: dataset (pd.Dataset): Both datasets Returns: data (pd.Dataset): Dataset with any NA values in the columns listed changed to None """ columns = ["PoolQC", "MiscFeature", "Alley", "Fence", "FireplaceQu", "GarageType", "GarageFinish", "GarageQual", "GarageCond", "BsmtQual", "BsmtCond", "BsmtExposure", "BsmtFinType1", "BsmtFinType2", "MasVnrType"] for column in columns: data[column] = data[column].fillna("None") return data
2a6fc8008447abefd9f993b01606c1afc5aa5a8a
704,910
import re def is_ld_block_defn_line(mdfl): """ Parse GFM link definition lines of the form... [10]: https://www.google.com [11]: https://www.google.com "Title Info" [1a]: https://www.google.com "Title Info {}" [2b]: https://www.google.com "Title Info {biblio info}" Returns footnote id, url, title, biblio-info as a list """ retval = re.findall("^\[([a-zA-Z0-9_-]*)\]:\s*(https?://\S*)\s*\"?([^{]*)([^\"]*)\"?$", mdfl) if not retval: return None ref_hdl = retval[0][0] ref_url = retval[0][1].strip() ref_tit = retval[0][2].strip().strip('"') ref_bib = retval[0][3].strip().strip('"{}') return [ref_hdl, ref_url, ref_tit, ref_bib]
0ebd01c0c05634ee33a320fa4c280ad575ee9b25
704,911
def shared_template(testconfig): """Shared template for hyperfoil test""" shared_template = testconfig.get('hyperfoil', {}).get('shared_template', {}) return shared_template.to_dict()
160daa08699ae973d5cbbfe28b75f08ff3eb2f52
704,912
def find_merge_commit_in_prs(needle, prs): """Find the merge commit `needle` in the list of `prs` If found, returns the pr the merge commit comes from. If not found, return None """ for pr in prs[::-1]: if pr['merge_commit'] is not None: if pr['merge_commit']['hash'] == needle[1][:12]: return pr return None
42320473aff84985e35cdf9024a64a18fe6f14f1
704,913
def create_valid_url(url: str) -> str: """ Generate a video direct play url. """ return url
a04a22ec64b346be83b020745aeb33f74ca90b74
704,914
import numpy def circular_weight(angle): """This function utilizes the precomputed circular bezier function with a fit to a 10th order curve created by the following code block: .. code-block:: python x = numpy.arange(.5, 180, 0.5) y = [] for i in x: y.append(bezier.find_circular_weight(i, tol=1e-12, max_iters=500)) y = numpy.array(y) z = numpy.polyfit(x, y, 10) Parameters ---------- angle : float enter the angle to be traversed in degrees Returns ------- weight : float Weight value for calculating bezier control points that approximate a circular curve. """ z = numpy.array([ -2.45143082907626980583458614241573e-24, 1.58856196152315352138612607918623e-21, -5.03264989277462933391916020538014e-19, 8.57954915199159887348249578203777e-17, -1.09982713519619074150585319501519e-14, 6.42175701661701683377126465867012e-13, -1.95012445981222027957307425487916e-10, 6.98338125134285339870680633234242e-10, -1.27018636324842636571531492850617e-05, 5.58069196465371404519196542326487e-08, 6.66666581437823202449521886592265e-01 ]) p = numpy.poly1d(z) return p(angle)
4341173c3e3584fcddbe04c60f7dd43fe859ac89
704,915
import subprocess def runprog(*args): """Runs specified program and args, returns (exitcode, stdout, stderr).""" p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return (p.returncode, out, err)
2146d967c961f4c9ed1f62288436a82cc7a62189
704,916
def encode(s, c): """ s is the scret code c is the clear text """ secret_code_list = list(s) clear_text_list = list(c) encoded_text_list= [] count = 0 for letter in clear_text_list: if letter == ' ': encoded_text_list.append(' ') continue encoded_letter = (ord(secret_code_list[count]) - ord('A') + ord(letter) - ord('A')) % 26 + ord('A') encoded_letter = chr(encoded_letter) count = (count + 1 ) % (len(secret_code_list)) encoded_text_list.append(encoded_letter) encoded = ''.join(encoded_text_list) return encoded
3af6297fb79b77c542b19789ccf9bc0668f6afd5
704,917
def update_board(position, board, player): """ Update the board with the user input position if position not taken returns board, True=position taken or False=position not taken and board updated args: position (int 1-9, user input) board (np.array 2d) player ("X" or "O") """ #make position 1-9 compatible with an 3x3 2d array indexed 0-8 position = position - 1 #logic to find row,col, uncomment 2 lines to print/test #print(position, 'int(/3) ', int(position/3)) #print(position, '%3 ',position%3) #find position in array, obtain row/col index row = int(position/3) if position>2: col = position%3 else: col = position #If position not taken, update board if board[row][col] == '-': board[row][col] = player return board, False #else position is taken, do not update board else: return board, True
eb53d24c4976499e6611c97757d0c33b4cb3254f
704,918
def tle_fmt_int(num, digits=5): """ Return an integer right-aligned string with DIGITS of precision, all blank if num=0 Ignores sign. """ if num: num = abs(num) else: return " "*digits string_int = "{:>{DIGITS}d}".format(num,DIGITS=digits) return string_int
8db7938e7a88e68c4a22013b10debbc4f5a9ca72
704,919
def get_status(): """Return classifier status.""" return 'ok'
84aedac3659ac2321867b02d3f6e7acb523923a3
704,920
def marks(category, mark=None, category_marks=None, public=False): """Assign marks to a test or suite of tests, grouped by a category.""" def decorator(test_item): if mark is None and category_marks is None: raise ValueError("One of mark or category_marks must be defined") test_item.__marks_category__ = category test_item.__marks_mark__ = mark test_item.__marks_category_marks__ = category_marks test_item.__marks_public__ = public return test_item return decorator
2d47a8df4f610dbc081dd57fce169e2f89b88ca4
704,922
import secrets def get_random_ua(): """return a random user-agent string from file""" # stop condition, file does not exists, not readable... # + file operation with open('headers.txt') as hbuffer: all = hbuffer.readlines() return secrets.choice(all).strip()
0d6a924c07bbad2398966bed590bf3307f5c475d
704,923
def underscore_to_camelcase(value): """ Converts underscore notation (something_named_this) to camelcase notation (somethingNamedThis) >>> underscore_to_camelcase('country_code') 'countryCode' >>> underscore_to_camelcase('country') 'country' >>> underscore_to_camelcase('price_GBP') 'priceGBP' >>> underscore_to_camelcase('recommended_horizontal_resolution') 'recommendedHorizontalResolution' >>> underscore_to_camelcase('postal_or_zip_code') 'postalOrZipCode' >>> underscore_to_camelcase('test_ABC_test') 'testABCTest' """ words = value.split('_') return '%s%s' % (words[0], ''.join(x if x.isupper() else x.capitalize() for x in words[1:]))
94bb5c007d3b50112c62ca9b3e97c5bf4f155fff
704,925
def findCenter(S): """Find the approximate center atom of a structure. The center of the structure is the atom closest to (0.5, 0.5, 0.5) Returns the index of the atom. """ best = -1 bestd = len(S) center = [0.5, 0.5, 0.5] # the cannonical center for i in range(len(S)): d = S.lattice.dist(S[i].xyz, center) if d < bestd: bestd = d best = i return best
634945a5560b3791f3835f3da090decd1b06b933
704,926
def html_color_to_rgba(html_colour, alpha): """ :param html_colour: Colour string like FF0088 :param alpha: Alpha value (opacity) :return: RGBA semitransparent version of colour for use in css """ html_colour = html_colour.upper() if html_colour[0] == '#': html_colour = html_colour[1:] r_str = html_colour[0:2] g_str = html_colour[2:4] b_str = html_colour[4:6] r = int(r_str, 16) g = int(g_str, 16) b = int(b_str, 16) return 'rgba(%s, %s, %s, %s)' % (r, g, b, alpha)
4f28938aa89d62198cc3052a480e0e0744560a79
704,927
from typing import OrderedDict def _assign_category_colors(uses, cmap, use_colors=None, assigned_colors=None): """Set a dictionary of nice colors for the use blocks. Options allow specifing pre-defined elements for some categories.""" use_colors = OrderedDict() if use_colors is None else use_colors assigned_colors = ( OrderedDict() if assigned_colors is None else assigned_colors ) available_idxs = list(range(cmap.N)) used_colors = set(use_colors.values()) for cmap_idx in range(cmap.N): # Skip gray because it is confusing against background this_cmap = cmap(cmap_idx) if this_cmap[0] == this_cmap[1] and this_cmap[0] == this_cmap[2]: available_idxs.remove(cmap_idx) # If a color has been used already, don't reuse it for something else if cmap(cmap_idx) in used_colors: available_idxs.remove(cmap_idx) for use, cmap_idx in assigned_colors.items(): if cmap_idx in available_idxs: available_idxs.remove(cmap_idx) use_colors[use] = cmap(cmap_idx) else: assert use_colors[use] == cmap(cmap_idx) for use in uses: if use not in use_colors: use_idx = available_idxs[0] use_colors[use] = cmap(use_idx) available_idxs.remove(use_idx) return use_colors
085d0ca707990c84cd51464ee8f65f90500b7060
704,928
import os def create_list_of_file_lists(possible_storage_locations, dir_path_to_files, dir_name_appendix): """This function creates a list of the files within each of the file storage locations so the user can inspect them to look for comparison options.""" # Output list of files files_to_compare = [] for location in possible_storage_locations: location_filepath = dir_path_to_files + location + "_" + dir_name_appendix print("Filepath: ", location_filepath) os.chdir(location_filepath) all_files = os.listdir() for file in all_files: files_to_compare.append(file) return files_to_compare
11886751557e6994e3994f820dee21848584e265
704,929
from datetime import datetime def _make_todays_date() -> str: """ build today's date as a standard format """ return datetime.now().strftime("%a %d-%b")
fdb9bc420689081586ac19fe91a17ea871576d59
704,930
import re def add_review_suggestion_flags(df, text_col, result_col='result_binary'): """ attempt to add on some logical "manual review suggested" flags onto cases to reduce false positive/negative classifications. currently flags cases with "flora" in text, >=1 species capture, and currently classifid as negative. inspired by some challenging to classify cases in our 2d validation set. """ ### flora flag testing flora_bool1=df[text_col].apply(lambda x: re.search(r'flora',str(x).lower())is not None) flora_bool2=df['species_capt'].apply(lambda x: len(x))>0 flora_bool3= df['result_num']==0 flora_flag= (flora_bool1) & (flora_bool2) & (flora_bool3) df['flora_flag']=0 df.loc[flora_flag,'flora_flag']=1 return(df)
e2083d65f54b82dd9eba19b6b2d32806e2cd086d
704,931
def number_of_fishers(): """ Prompt the user for the number of fishermen entering the draw.""" try: number = int(input("How many fishermen will enter the competition: ")) return number except ValueError: print("Please enter an integer for the number of competing fishermen")
bd3ff25865d67851c8a1742a8cfa808a317716f0
704,932
def summarize_samples(samples, run_parallel): """Back compatibility for existing pipelines. Should be replaced with summary when ready. """ return samples
20c742e751f9ea1f783572f031fe144baf73293e
704,933
def get_keywords(string): """Get keywords for a given string. Args: string (str): A string to get keywords for. Returns: (list): A list of keywords. """ keywords = string.lower().split(' ') keywords = [x.strip() for x in keywords if x] keywords = list(set(keywords)) return keywords
8d4e0781701dc3574583baf417c573967638e86f
704,934
def distancia(ponto1, ponto2): """ Calcula a distância entre dois pontos """ xdif = ponto2.getx() - ponto1.getx() ydif = ponto2.gety() - ponto1.gety() dif = (xdif**2 + ydif**2)**0.5 return dif
36a980a1081133fb6496585c25cca5782ceef06d
704,935
import time def foo(x, sleep_time): """Dummy function for the tests""" time.sleep(sleep_time) return [{"type": "objective", "name": "objective", "value": x}]
3d55a0b0776acec0badd10e38be724afc3015c2f
704,936
def bt_search(btree, key): """基于二叉树查询操作""" bt = btree while bt is not None: entry = bt.data if key < entry.key: bt = bt.left elif key > entry.key: bt = bt.right else: return entry.values return None
1b358087c10a4d0d6fe79b023340fafeafb81914
704,937
import os def file_exists(filepath): """Check whether a file exists by given file path.""" return os.path.isfile(filepath)
157caa4e5ce39243b46dda915808de79d7cf76c0
704,938
def prep_tweet_body(tweet_obj, args, processed_text): """ Format the incoming tweet Args: tweet_obj (dict): Tweet to preprocess. args (list): Various datafields to append to the object. 0: subj_sent_check (bool): Check for subjectivity and sentiment. 1: subjectivity (num): Subjectivity result. 2: sentiment (dict): Sentiment result. processed_text (list): List of tokens and ngrams etc. Returns: dict: Tweet with formatted fields """ subj_sent_check = args[0] result = tweet_obj if subj_sent_check: subjectivity = args[1] sentiment = args[2] result["subjectivity"] = subjectivity result["compound_score"] = sentiment["compound"] result["neg_score"] = sentiment["neg"] result["neu_score"] = sentiment["neu"] result["pos_score"] = sentiment["pos"] result["hs_keyword_count"] = len(processed_text[4]) result["hs_keyword_matches"] = processed_text[4] result["tokens"] = processed_text[0] result["stopwords"] = processed_text[1] result["hashtags"] = processed_text[2] result["user_mentions"] = processed_text[3] result["unigrams"] = processed_text[5][0] result["bigrams"] = processed_text[5][1] result["trigrams"] = processed_text[5][2] result["quadgrams"] = processed_text[5][3] result["pentagrams"] = processed_text[5][4] result["stopword_ngrams"] = processed_text[6] result["ordered_tokens"] = processed_text[7] return result
9163d7bb10e3bb31849090d8ebfe4d00c19db2df
704,939
import time def timedcall(fn, *args): """ Run a function and measure execution time. Arguments: fn : function to be executed args : arguments to function fn Return: dt : execution time result : result of function Usage example: You want to time the function call "C = foo(A,B)". --> "T, C = timedcall(foo, A, B)" """ t0 = time.time() result = fn(*args) t1 = time.time() dt = t1 - t0 return dt, result
60779c4f4b63796995d722133c304edf519ecd8f
704,940
def tokuda_gap(i): """Returns the i^th Tokuda gap for Shellsort (starting with i=0). The first 20 terms of the sequence are: [1, 4, 9, 20, 46, 103, 233, 525, 1182, 2660, 5985, 13467, 30301, 68178, 153401, 345152, 776591, 1747331, 3931496, 8845866, ...] h_i = ceil( (9*(9/4)**i-4)/5 ) for i>=0. If 9*(9/4)**i-4)/5 is not an integer, I believe this is the same as h_i = ((9**(i+1)>>(i<<1))-4)//5 + 1, and I believe the above should be non-integer valued for all i>0. (We have to explicitly return 1 when i=0, as the above formula would return 2.) """ return 1 if i==0 else ((9**(i+1)>>(i<<1))-4)//5 + 1
710633e924cb6e31a866683b91da6489c781ba4a
704,941
from typing import Dict from typing import Pattern import re def get_xclock_hints() -> Dict[str, Pattern]: """Retrieves hints to match an xclock window.""" return {"name": re.compile(r"^xclock$")}
99e1fe51b46cb5e101c2a1c86cf27b2b60c0a38e
704,942
import math def fnCalculate_ReceivedPower(P_Tx,G_Tx,G_Rx,rho_Rx,rho_Tx,wavelength,RCS): """ Calculate the received power at the bistatic radar receiver. equation 5 in " PERFORMANCE ASSESSMENT OF THE MULTIBEAM RADAR SENSOR BIRALES FOR SPACE SURVEILLANCE AND TRACKING" Note: ensure that the distances rho_Rx,rho_Tx,wavelength are converted to metres before passing into this function. Created on: 26 May 2017 """ denominator = (4*math.pi)**3 * (rho_Rx**2)*(rho_Tx**2); numerator = P_Tx*G_Tx*G_Rx*RCS*(wavelength**2); P_Rx = numerator/denominator; return P_Rx
944fb485e9d9a3d2da130e4ddc415e63ab814380
704,943
from datetime import datetime def tzdt(fulldate: str): """ Converts an ISO 8601 full timestamp to a Python datetime. Parameters ---------- fulldate: str ISO 8601 UTC timestamp, e.g. `2017-06-02T16:23:14.815Z` Returns ------- :class:`datetime.datetime` Python datetime representing ISO timestamp. """ if fulldate[-1] == "Z": fulldate = fulldate[0:-1] + "+0000" return datetime.strptime(fulldate, "%Y-%m-%dT%H:%M:%S.%f%z")
e327c23f9aecf587432fa0170c8bcd3a9a534bd1
704,945
import math def lat2y(latitude): """ Translate a latitude coordinate to a projection on the y-axis, using spherical Mercator projection. :param latitude: float :return: float """ return 180.0 / math.pi * (math.log(math.tan(math.pi / 4.0 + latitude * (math.pi / 180.0) / 2.0)))
59a0a111c22c99dd23e80ed64d6355b67ecffd42
704,946
def filenameValidator(text): """ TextEdit validator for filenames. """ return not text or len(set(text) & set('\\/:*?"<>|')) == 0
435032f32080b52165756cf147830308537e292d
704,947
def makepdb(title,parm,traj): """ Make pdb file from first frame of a trajectory """ cpptrajdic ={'title':title,'parm':parm,'traj':traj} cpptrajscript="""parm {parm} trajin {traj} 0 1 1 center rms first @CA,C,N strip :WAT strip :Na+ strip :Cl- trajout {title}.pdb pdb run exit""" return cpptrajscript.format(**cpptrajdic)
8ca8c95adef74525ac6018146418dd5e2314ff94
704,948
import random def secure_randint(min_value, max_value, system_random=None): """ Return a random integer N such that a <= N <= b. Uses SystemRandom for generating random numbers. (which uses os.urandom(), which pulls from /dev/urandom) """ if not system_random: system_random = random.SystemRandom() return system_random.randint(min_value, max_value)
f4b61457c6e384e6185a5d22d95539001903670d
704,949
import time def date(): """ Return date string """ return time.strftime("%B %d, %Y")
b26cf8a5012984bbd76f612b19f79a3c387b9d27
704,950
import math def convert_weight(prob): """Convert probility to weight in WFST""" weight = -1.0 * math.log(10.0) * float(prob) return weight
d9f6c38fd2efa49ddd515878a0943f9c82d42e1a
704,951
import numpy def retrieve_m_hf(eri): """Retrieves TDHF matrix directly.""" d = eri.tdhf_diag() m = numpy.array([ [d + 2 * eri["knmj"] - eri["knjm"], 2 * eri["kjmn"] - eri["kjnm"]], [- 2 * eri["mnkj"] + eri["mnjk"], - 2 * eri["mjkn"] + eri["mjnk"] - d], ]) return m.transpose(0, 2, 1, 3).reshape( (m.shape[0] * m.shape[2], m.shape[1] * m.shape[3]) )
ad407f0294f906125ef6b5ecd7f8300114afb4a5
704,952
def GetDiv(number): """Разложить число на множители""" #result = [1] listnum = [] stepnum = 2 while stepnum*stepnum <= number: if number % stepnum == 0: number//= stepnum listnum.append(stepnum) else: stepnum += 1 if number > 1: listnum .append(number) return listnum
fbbd4b9e73ebe9af6ef6dcc0151b8d241adbb45d
704,953
def my_decorator(view_func): """定义装饰器""" def wrapper(request, *args, **kwargs): print('装饰器被调用了') return view_func(request, *args, **kwargs) return wrapper
1e857263d6627f1a2216e0c2573af5935ba58637
704,954
def CalculateMediationPEEffect(PointEstimate2, PointEstimate3): """Calculate derived effects from simple mediation model. Given parameter estimates from a simple mediation model, calculate the indirect effect, the total effect and the indirect effects Parameters ---------- PointEstimate2 : array This is an array of parameter estimates for the regression equation of A on B. With no covariates, this will be an array of length 1 PointEstimate3 : array This is an array of parameter estimates for the regression equation of A and B on C. With no covariates, this will be an array of length 2 Returns ------- IE The indirect effect, parameter a times b TE The total effect, which is IE plus DE DE The direct effect, the effect of A on C, when B is in the model a The effect of A on B b The effect of B on C, when A is in the model """ # Indirect Effect a = PointEstimate2[0] # The model of B with A has one beta which is index 0 b = PointEstimate3[1] # The model of C with A and B has two betas, b has index = 1 IE = a*b # Direct Effect DE = PointEstimate3[0] # This is c' # Total Effect TE = DE + IE return IE, TE, DE, a, b
d2247985e46a78bc3333983e09a1030fd59f139d
704,957
def rgb_to_hex(red_component=None, green_component=None, blue_component=None): """Return color as #rrggbb for the given color tuple or component values. Can be called as TUPLE VERSION: rgb_to_hex(COLORS['white']) or rgb_to_hex((128, 63, 96)) COMPONENT VERSION rgb_to_hex(64, 183, 22) """ if isinstance(red_component, tuple): red_component, green_component, blue_component = red_component return '#{:02X}{:02X}{:02X}'.format( red_component, green_component, blue_component)
37f5216f7f22f82072db6980541a815d87d02ef3
704,958
def params(kernels, time, target, target_frame, observer, corr): """Input parameters from WGC API example.""" return { 'kernels': kernels, 'times': time, 'target': target, 'target_frame': target_frame, 'observer': observer, 'aberration_correction': corr, }
d030ad459b294a268c8bc3a851a32495dcbf5c02
704,960
import json def read_cities_db(fname="world-cities_json.json"): """Read a database file containing names of cities from different countries. Source: https://pkgstore.datahub.io/core/world-cities/world-cities_json/data/5b3dd46ad10990bca47b04b4739a02ba/world-cities_json.json """ with open(fname) as f: world_cities = json.load(f) country_city_pairs = set() processed_sub_countries = [] for city_record in world_cities: country = city_record['country'].lower() if country == "south korea": # See my comment above regarding the special handling of South Korea country = "korea" city = city_record['name'].lower() country_city_pairs.add((country, city)) subcountry = city_record['subcountry'].lower() if city_record['subcountry'] is not None else None if subcountry is not None and subcountry not in processed_sub_countries: # Add (subcountry, country) processed_sub_countries.append(subcountry) country_city_pairs.add((country, subcountry)) # People use these abbreviations, so we can't ignore them country_city_pairs.add(('united states', 'usa')) country_city_pairs.add(('united states', 'u.s.a.')) country_city_pairs.add(('united kingdom', 'uk')) country_city_pairs.add(('united kingdom', 'u.k.')) country_city_pairs.add(('china', 'prc')) country_city_pairs.add(('china', 'p.r.c.')) # Sort by longest city name first, because later we want to do long-string-match country_city_pairs = sorted(country_city_pairs, key=lambda pair: len(pair[1]), reverse=True) return country_city_pairs
1edb970e329e7781cebb61853a13a6f45d349250
704,961
def isRef(obj): """ """ if isinstance(obj, dict) == True and '_REF' in obj: return obj['_REF'] else: return False
0f1ad92cfafff5dcbc9e90e8544956b05c3452ec
704,962
def int_to_binary(x, n): """Convert an integer into its binary representation Args: x (int): input integer n (int): number of leading zeros to display Returns: (str) binary representation """ if type(x) != int: raise ValueError('x must be an integer.') return format(x, 'b').zfill(n)
c3d68a798f84988290bd4e845a5bcc015872b054
704,963
def convert_examples_to_features(examples, tokenizer, max_seq_length, max_program_length, is_training, op_list, op_list_size, const_list, const_list_size, verbose=True): """Converts a list of DropExamples into InputFeatures.""" unique_id = 1000000000 res = [] for (example_index, example) in enumerate(examples): features = example.convert_single_example( is_training=is_training, tokenizer=tokenizer, max_seq_length=max_seq_length, max_program_length=max_program_length, op_list=op_list, op_list_size=op_list_size, const_list=const_list, const_list_size=const_list_size, cls_token=tokenizer.cls_token, sep_token=tokenizer.sep_token) for feature in features: feature.unique_id = unique_id feature.example_index = example_index res.append(feature) unique_id += 1 return res
d7024a0ff97d94a5c2aa32e63230e972584fb1d2
704,965
def determina_putere(n): """ Determina ce putere a lui 2 este prima cea mai mare decat n :param (int) n: numarul de IP-uri necesare citit de la tastatura :return (int) putere: puterea lui 2 potrivita """ putere = 1 while 2**putere < n+2: putere += 1 return putere
85e2c1dcd2ea5d86b5db3c6ced28dd65e244c467
704,966
def parse_commands(log_content): """ parse cwl commands from the line-by-line generator of log file content and returns the commands as a list of command line lists, each corresponding to a step run. """ command_list = [] command = [] in_command = False line = next(log_content) while(line): line = line.strip('\n') if '[job' in line and line.endswith('docker \\'): line = 'docker \\' # remove the other stuff in_command = True if in_command: command.append(line.strip('\\').rstrip(' ')) if not line.endswith('\\'): in_command = False command_list.append(command) command = [] line = next(log_content) return(command_list)
dff555cd0ec84619425fc05e4c8892c603bcc994
704,968
import os def sp_cpu(file): """Read single-point output for cpu time.""" spe, program, data, cpu = None, None, [], None if os.path.exists(os.path.splitext(file)[0] + '.log'): with open(os.path.splitext(file)[0] + '.log') as f: data = f.readlines() elif os.path.exists(os.path.splitext(file)[0] + '.out'): with open(os.path.splitext(file)[0] + '.out') as f: data = f.readlines() else: raise ValueError("File {} does not exist".format(file)) for line in data: if line.find("Gaussian") > -1: program = "Gaussian" break if line.find("* O R C A *") > -1: program = "Orca" break if line.find("NWChem") > -1: program = "NWChem" break for line in data: if program == "Gaussian": if line.strip().startswith('SCF Done:'): spe = float(line.strip().split()[4]) if line.strip().find("Job cpu time") > -1: days = int(line.split()[3]) hours = int(line.split()[5]) mins = int(line.split()[7]) secs = 0 msecs = int(float(line.split()[9]) * 1000.0) cpu = [days, hours, mins, secs, msecs] if program == "Orca": if line.strip().startswith('FINAL SINGLE POINT ENERGY'): spe = float(line.strip().split()[4]) if line.strip().find("TOTAL RUN TIME") > -1: days = int(line.split()[3]) hours = int(line.split()[5]) mins = int(line.split()[7]) secs = int(line.split()[9]) msecs = float(line.split()[11]) cpu = [days, hours, mins, secs, msecs] if program == "NWChem": if line.strip().startswith('Total DFT energy ='): spe = float(line.strip().split()[4]) if line.strip().find("Total times") > -1: days = 0 hours = 0 mins = 0 secs = float(line.split()[3][0:-1]) msecs = 0 cpu = [days,hours,mins,secs,msecs] return cpu
ca9cb22b0981b3a14eafdd2637eccbe448597432
704,970
def to_geojson(series): """Return a GeoJSON geometry collection from the series (must be in EPSG:4326). Did not use the builtin for the series since it introduces a lot of bloat. """ return { "type": "GeometryCollection", "geometries": series.apply(lambda x: x.__geo_interface__).to_list(), }
2ebdc001ed7a6fb3ee6e6cac9fc7722e19518e20
704,971
def getConstraintWeightAttr(leader, constraint): """ Return the weight attribute from a constraint that corresponds to a specific leader node. Args: leader (PyNode): A node that is one of the leaders of a constraint constraint (PyNode): A constraint node """ for i, target in enumerate(constraint.getTargetList()): if leader == target: return constraint.getWeightAliasList()[i]
e53ef981f505f1c8fc21fff7b71605764d6da3e0
704,972
import os def file_contains_exact_text(filename, text): """Returns True iff the file exists and it already contains the given text.""" if not os.path.isfile(filename): return False with open(filename, "r") as infile: intext = infile.read() return text == intext return False
49bbb86c30a5df5d41e78cd64ddb58d44eaaf899
704,973
def sort_characters(text, alphabet): """Counting Sort""" dim = len(text) order = [0] * dim count = {k: 0 for v, k in enumerate(alphabet)} for char in text: count[char] += 1 for j in range(1, len(alphabet)): count[alphabet[j]] += count[alphabet[j-1]] for i, char in reversed(tuple(enumerate(text))): count[char] -= 1 order[count[char]] = i return order
9beb0f28a7f1ffb892e1393522b94f12e873ba66
704,974
import requests def get_api_result(url): """ Retrieve JSON data from API via a supplied URL """ s = requests.Session() r = s.get(url) return r.json()
933bd000b2e352f950ec86f8b6f1470ff2b0ecbd
704,975
def fn(r): """ Returns the number of fields based on their radial distance :param r: radial distance :return: number of fields at radial distance """ return 4 * r + 4
5fa4a5e8f2304f907b9dd806281dc77a2152f431
704,976
import argparse def parse_arguments(): """ Parse the command line arguments """ ap = argparse.ArgumentParser() ap.add_argument("-ann", "--annotations_path", required=True, help="Path to the directory containing the annotation files or path to the single annotation file.") ap.add_argument("-ann_type", "--annotations_type", required=False, default="voc", help="Annotations type ('voc', 'coco').") ap.add_argument("-det", "--detections_dir_path", required=True, help="Path to the '.pkl' file containing detections generated from class agnostic OD method. The " "detections should be a dictionary with keys as 'image names' and values as the tuple of " "predicted boxes & scores (i.e. ([boxes], [scores]))") ap.add_argument("-N", "--top_N_dets", required=False, type=int, default=50, help="Maximum number of top N detections sorted by confidence to be used for metrics calculations. " "Note that the script also reports average number of ") ap.add_argument("-iou", "--iou_thresh", required=False, type=float, default=0.5, help="IOU threshold to be used for computing AP and Recall. Default is 0.5.") ap.add_argument("--extra_metrics", action='store_true', help="Flag to decide if to evaluate AP-small, AP-medium and AP-large.") args = vars(ap.parse_args()) return args
e564699cbc74fba69b2bba90372a0513a81ae84c
704,977
def transformToUTM(gdf, utm_crs, estimate=True, calculate_sindex=True): """Transform GeoDataFrame to UTM coordinate reference system. Arguments --------- gdf : :py:class:`geopandas.GeoDataFrame` :py:class:`geopandas.GeoDataFrame` to transform. utm_crs : str :py:class:`rasterio.crs.CRS` string for destination UTM CRS. estimate : bool, optional .. deprecated:: 0.2.0 This argument is no longer used. calculate_sindex : bool, optional .. deprecated:: 0.2.0 This argument is no longer used. Returns ------- gdf : :py:class:`geopandas.GeoDataFrame` The input :py:class:`geopandas.GeoDataFrame` converted to `utm_crs` coordinate reference system. """ gdf = gdf.to_crs(utm_crs) return gdf
02405ca581054b5d804c6e4eb49be96d0915e3de
704,978
import re def remove_prohibited_characters(prompt_str: str) -> str: """ Remove prohibited characters. """ prohibited_chars = ["[", "]", "<", ">", "#", "%", "$", ":", ";", "~", "\r", " ", "\n"] result_str = prompt_str for ch in prohibited_chars: result_str = result_str.replace(ch, "") if "\x1b" in result_str: # for powerline. result_str = re.sub("\x1b.*h", "", result_str) result_str = re.sub("\x1b.*m", "", result_str) return result_str
8eabb923b5ee59656fb41164d14be0ba6e4535f4
704,979
import subprocess def bzr_find_files(dirname): """Find versioned files using bzr, for use in 'setuptools.file_finders' entry point in setup.py.""" cmd = 'bzr ls --versioned ' + dirname proc = subprocess.Popen( cmd.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, _stderr = proc.communicate() return stdout.splitlines()
8bfc6975b3aaaabc3a955dbef92d96dcea15f518
704,980
def pyramid_sum(lower, upper, margin = 0): """Returns the sum of the numbers from lower to upper, and outputs a trace of the arguments and return values on each call.""" blanks = " " * margin print(blanks, lower, upper) # Print the arguments if lower > upper: print(blanks, 0) # Print the returned value return 0 else: result = lower + pyramid_sum(lower + 1, upper, margin + 4) print(blanks, result) # Print the returned value return result
751facb309f362c35257aab2b239a37b39a98a04
704,982
import unicodedata def normalize_caseless(text): """Normalize a string as lowercase unicode KD form. The normal form KD (NFKD) will apply the compatibility decomposition, i.e. replace all compatibility characters with their equivalents. """ return unicodedata.normalize("NFKD", text.casefold())
c26f8470ea6312cce7a97930999d489ee30eb692
704,983
def calculate_maximum_potential_edge_counts(channel_composition, N, max_ble_span): """Computes the maximum number of possible occurrences per potential edge type. Parameters ---------- channel_composition : Dict[str, int] Channel composition description. N : int Number of BLEs in the cluster. max_ble_span : int Maximum BLE span in the pattern. Returns ------- Dict[str, int] Maximum number of occurrences of each edge type. """ back_dir = {'L' : 'R', 'R' : 'L', 'U' : 'D', 'D' : 'U'} counts = {} for src_ble in range(0, N): for sink_ble in range(max(0, src_ble - max_ble_span),\ min(N - 1, src_ble + max_ble_span) + 1): for w_src in channel_composition: src_dirs = ('L', 'R') if w_src[0] == 'V': src_dirs = ('U', 'D') for src_dir in src_dirs: for w_sink in channel_composition: sink_dirs = ('L', 'R') if w_sink[0] == 'V': sink_dirs = ('U', 'D') for sink_dir in sink_dirs: if sink_dir == back_dir[src_dir]: continue inc = channel_composition[w_src] * channel_composition[w_sink] try: counts[(w_src, w_sink)] += inc except: counts.update({(w_src, w_sink) : inc}) e_str = lambda e : "potential_edge__%s%s__%s%s"\ % (e[0], "_tap_0" if e[0][0] == 'V' else '',\ e[1], "_tap_0" if e[1][0] == 'V' else '') return {e_str(e) : counts[e] for e in counts}
55f891631bd109066735e9997cbb3dc35de8d21a
704,984
def g_logv(s): """read a logical variable :param str s: :return bool: """ return s == '1' or s.lower() == 'yes' or s.lower() == 'true'
e9984eced79cccc09a465b07bfac5185db72a604
704,986
import os def record_mode(request): """Manage compatibility with DD client libraries.""" mode = os.getenv("RECORD", "false") if mode is not None: if mode == "none": request.config.option.disable_vcr = True else: setattr( request.config.option, "vcr_record", {"true": "all", "false": "none",}[mode], ) request.config.option.disable_vcr = False return mode
a68af5c9449bf300fec18c51168e2f9096038b1f
704,987
import json def try_to_replace_line_json(line, json_type, new_json, json_prefix=""): """Attempts to replace a JSON declaration if it's on the line. Parameters ---------- line: str A line from a JavaScript code file. It's assumed that, if it declares a JSON, this declaration will only take up one line (i.e. it will be of the form "[whitespace?]var [JSON prefix?][JSON name] = {JSON contents};"). If a replacement is made, everything on and after the { in this line will be replaced with the contents of the new JSON, followed by ";\n". json_type: str One of "rank", "sample", or "count". Other values will result in a ValueError being thrown. new_json: dict A JSON to try replacing the current variable declaration (if present) on the input line with. json_prefix: str (default value: "") An optional prefix that will be appended to any JSON names we try to replace. If this is anything but "", this *won't replace normal JSON lines* (e.g. "var rankPlotJSON = {") -- instead, this will only replace lines with the given prefix (e.g. if the prefix is "SST", then only JSON lines of the format "var SSTrankPlotJSON = {" will be replaced. Returns ------- (line, replacement_made): str, bool If no replacement was made, replacement_made will be False and line will just equal to the input line. If a replacement was made, replacement_made will be True and line will be equal to the new line with the JSON replaced. """ prefixToReplace = "" if json_type == "rank": prefixToReplace = "var {}rankPlotJSON = {{" elif json_type == "sample": prefixToReplace = "var {}samplePlotJSON = {{" elif json_type == "count": prefixToReplace = "var {}countJSON = {{" else: raise ValueError( "Invalid json_type argument. Must be 'rank', " "'sample', or 'count'." ) prefixToReplace = prefixToReplace.format(json_prefix) if line.lstrip().startswith(prefixToReplace): return ( ( line[: line.index("{")] + json.dumps(new_json, sort_keys=True) + ";\n" ), True, ) return line, False
602897349b52be3f10a41cf90d211ad70a6d4cc2
704,988
def read(file_path, lines=False): """Returns contents of file either as a string or list of lines.""" with open(file_path, 'r') as fp: if lines: return fp.readlines() return fp.read()
86b36dbc2792ac70bd9a71c74486643b3cdef690
704,989
def UnescapeUnderscores(s: str): """Reverses EscapeWithUnderscores.""" i = 0 r = '' while i < len(s): if s[i] == '_': j = s.find('_', i + 1) if j == -1: raise ValueError('Not a valid string escaped with `_`') ss = s[i + 1:j] if not ss: r += '_' else: r += chr(int(ss, 16)) i = j + 1 else: r += s[i] i += 1 return r
c793666527b37ee66f832e650e6c6aac47bc8a82
704,990
def filter_(stream_spec, filter_name, *args, **kwargs): """Alternate name for ``filter``, so as to not collide with the built-in python ``filter`` operator. """ return filter(stream_spec, filter_name, *args, **kwargs)
0e55c8c6093fafed58ced08c757e6a489fcefa17
704,991
from typing import List from typing import Dict def parse_secrets(raw: List[str]) -> Dict[str, str]: """Parses secrets""" result: Dict[str, str] = {} for raw_secret in raw: keyval = raw_secret.split('=', 1) if len(keyval) != 2: raise ValueError(f'Invalid secret "{raw_secret}"') result[keyval[0]] = keyval[1] return result
d209c954c75353c17f0bca561c3ad94fc26a9ad0
704,992
def get_state(initial, input_value=None): """Get new state, filling initial and optional input_value.""" return { 'last_position': None, 'initial': [initial], 'input': [input_value] if input_value is not None else [], 'output': [], }
7520341debf6b7287a445be1a44e51bd5675472f
704,993
import operator def predictkNNLabels(closest_neighbors, y_train): """This function predicts the label of a individual point in X_test based on the labels of the nearest neighbour(s). And sums up the total of appearences of the labels and returns the label that occurs the most """ labelPrediction = {} for i in range(len(closest_neighbors)): if y_train[closest_neighbors[i]][0] in labelPrediction: labelPrediction[y_train[closest_neighbors[i]][0]] += 1 else: labelPrediction[y_train[closest_neighbors[i]][0]] = 1 sortedLabelPrediction = sorted(labelPrediction.items(), key=operator.itemgetter(1), reverse=True) return sortedLabelPrediction[0][0] # gives the most in common label
aa7ce9383253230f2c0535e3e27e2f2442dec043
704,994
def cell_snippet(x, is_date=False): """create the proper cell snippet depending on the value type""" if type(x) == int: return { 'userEnteredValue': {'numberValue': x}, 'userEnteredFormat': { 'numberFormat': { 'type': 'NUMBER', 'pattern': '#,##0' } } } elif type(x) == float: return { 'userEnteredValue': {'numberValue': x}, 'userEnteredFormat': { 'numberFormat': { 'type': 'DATE' if is_date else 'NUMBER', 'pattern': 'yyyy/mm/dd hh:mm:ss' if is_date else '#,##0.00' } } } else: return { 'userEnteredValue': {'stringValue': x} }
bc91279e5e9b4e9e6b853badf28081e0e4746549
704,995
def lowerUserList(inputList): """Lowercase user inputLists in case there are misspellings. (e.g. 3-6KB)""" # clean list loweredList = [] for item in inputList: loweredItem = item.lower() loweredList.append(loweredItem) return loweredList
e5d55a39a98b741758c8b1e8306a4ee486c7a29d
704,996
import torch def get_disp_samples(max_dis, feature_map, stage_id=0, disprity_map=None, step=1, samp_num=9, sample_spa_size=None) : """function: get the sampled disparities args: max_dis: the maximum disparity; feature map: left or right feature map, N*C*H*W; disprity_map: if it is not the first stage, we need disparity map to be the sampling center in new cost volume, N*H*W; step: the step size between each samples, where -1 represents the traditional sampling from 0 to max_disp; samp_num: the total number of samples; return: the sampled disparities for each pixel, N*S*H*W; """ # print("disprity_map: {}".format(disprity_map.size())) batch_size, channels, height, width = feature_map.size() if disprity_map is None or step==-1 or stage_id==0 : disp_samples = torch.arange(max_dis, dtype=feature_map.dtype, device=feature_map.device).expand(batch_size,height,width,-1).permute(0,3,1,2) else : # # get the range only from one pixel # lower_bound = disprity_map-(samp_num/2)*step # upper_bound = disprity_map+(samp_num/2)*step # lower_bound = lower_bound.clamp_(min=0.0) # upper_bound = upper_bound.clamp_(max=max_dis) # get the range from the pixel and its neighbors if sample_spa_size is None : kernel_size = 3 if stage_id==1 else 5 if stage_id==2 else 7 else : kernel_size = sample_spa_size lower_bound = torch.abs( torch.max_pool2d(-disprity_map, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)//2)) ) upper_bound = torch.max_pool2d(disprity_map, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)//2) ) modified_disp_range = (samp_num*step - (upper_bound-lower_bound)).clamp(min=0) / 2 lower_bound = (lower_bound - modified_disp_range).clamp(min=0, max=max_dis) upper_bound = (upper_bound + modified_disp_range).clamp(min=0, max=max_dis) new_step = (upper_bound-lower_bound) / (samp_num-1) disp_samples = lower_bound.unsqueeze(1) + (torch.arange(0, samp_num, device=disprity_map.device, dtype=disprity_map.dtype, requires_grad=False).reshape(1, -1, 1, 1) * new_step.unsqueeze(1)) # disp_samples = [] # for i in np.arange(samp_num) : # disp_samples.append(lower_bound+i*step) # disp_samples = torch.stack(disp_samples,dim=1) # disp_samples = [] # for i in np.arange(-(samp_num//2)*step, samp_num//2*step, step) : # disp_samples.append(disprity_map+i) # disp_samples = torch.stack(disp_samples,dim=1) # disp_samples = disp_samples.clamp_(min=0,max=max_dis-1) # print("disp_samples: {}".format(disp_samples.size())) return disp_samples
d07bc06e69b0015f9604a91fe4455ef61bb3c505
704,998
def short_comment(x): """Ham comments are often short, such as 'cool video!'""" return len(x.text.split()) < 5
f8d91feb4549219275dd5bace104cd8d89b96748
704,999
import random import string def generate_random_name(): """Generate a random name to use as a handle for a job.""" return "".join(random.choice(string.ascii_lowercase) for j in range(8))
c793c77289e7813cfd679b23613a9b1cd38af941
705,000
def validate_index(n: int, ind: int, command: str): """ Simple function to validate existence of index within in the model repository. Args: n (int): length of indices ind (int): selected index command (str): name of command for "tailored" help message """ # ensure index exists in indices if -n <= ind < n: return ind else: raise IndexError(f"Index {ind} does not exist... Run `kaos {command} list` again")
3aef711caef041d2f4aa1dfdf0b5135d9f626b3c
705,002
def wrap_ddp(cls): """Return wrapper class for the torch.DDP and apex. Delegete getattr to the inner module. """ class _Wrap(cls): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __getattr__(self, name): wrapped_module = super().__getattr__('module') if hasattr(wrapped_module, name): return getattr(wrapped_module, name) return super().__getattr__(name) return _Wrap
d8d4148a4e26e28bca76bbac7ecd0e0cea64b3ba
705,004
def mortgage_max(buyer_dsr): """ Calculates maximum loan available to offer based on buyer's proposed downpayment and downpayment percent ... Returns ------- loan : float Returns maximum available loan to be offered """ downpayment_percent = 0 min_downpayment = 0 try: if buyer_dsr.home_price <= 500000: downpayment_percent = 0.05 elif buyer_dsr.home_price > 500000 and buyer_dsr.home_price <= 1000000: downpayment_percent = 0.1 else: downpayment_percent = 0.2 except Exception: return None #loan = self.max_dp() / downpayment_percent loan = buyer_dsr.downpayment / downpayment_percent return loan
c1fa429d61100dce20b5a4cbeaef365d39ad14af
705,005
import uuid import base64 import os def save_files(images): """ Save encoded image to image file on local machine. Image is decoded and saved as JPEG file with UUID name in "imstore" directory. :param images: encoded image string (string) :return: file name of stored image (string) """ filename = str(uuid.uuid1()) img = base64.b64decode(images) filename = filename + '.jpg' pathname = os.path.join('imstore/', filename) with open(pathname, 'wb') as file: file.write(img) return filename
ba3e6f99570b540470fe687f69fc13ac803c9a39
705,006
def InterferenceDict(data_list): """Creates an interferenceReturns a double dict from a list of lat,lng, interferences.""" if not isinstance(data_list, list): data_list = [data_list] result = {} for lat, lon, data in data_list: if lat not in result: result[lat] = {} result[lat][lon] = data return result
fae34ea1182c6f709691ef1bab72f1796d073a2a
705,007