content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import time def seconds_since(t): """seconds_since returns the seconds since `t`. `t` is assumed to be a time in epoch seconds since time.time() returns the current time in epoch seconds. Args: t (int) - a time in epoch seconds Returns int: the number of seconds since `t` """ return time.time() - t
db398fa2a18689c5ccd05d365948e9b5cd1f99d5
24,519
def intersect(box1, box2): """Calculate the intersection of two boxes.""" b1_x0, b1_y0, b1_x1, b1_y1 = box1 b2_x0, b2_y0, b2_x1, b2_y1 = box2 x0 = max(b1_x0, b2_x0) y0 = max(b1_y0, b2_y0) x1 = min(b1_x1, b2_x1) y1 = min(b1_y1, b2_y1) if x0 > x1 or y0 > y1: # No intersection, return None return None return (x0, y0, x1, y1)
70daa13f0f0e59bbb21f52a74434935a4424dc68
24,520
def donottrack(request): """ Adds ``donottrack`` to the context, which is ``True`` if the ``HTTP_DNT`` header is ``'1'``, ``False`` otherwise. This context processor requires installtion of the ``donottrack.middleware.DoNotTrackMiddleware``. Note that use of this context processor is not strictly necessary. (Though it is quite convenient.) If you are using the ``django.core.context_processors.request`` context processor, you have access to ``{{ request.donottrack }}``. """ # We could just use the ``donottrack.utils.get_donottrack`` function rather # than rely on the middlware, but we want to require the middleware so that # the VARY header is properly patched to account for DNT. try: return {'donottrack': request.donottrack} except AttributeError: raise AttributeError("'WSGIRequest' object has no attribute 'donottrack'" " - 'donottrack.middleware.DoNotTrackMiddleware' must be in your" " MIDDLEWARE_CLASSES")
50dc0c60ed70137c9aa1473ae436e48ef859ae9a
24,522
def _sort_orders(orders): """Sort a list of possible orders that are to be tried so that the simplest ones are at the beginning. """ def weight(p, d, q, P, D, Q): """Assigns a weight to a given model order which accroding to which orders are sorted. It is only a simple heuristic that makes it so that the simplest models are sorted first. """ cost = 0 if P + D + Q: cost += 1000 cost += p**2 + d**2 + q**2 cost += P**3 + 2*d**3 + 3*q**3 return cost orders = [(weight(*o), o) for o in orders] orders.sort() orders = [x[1] for x in orders] return orders
a164aa850e44ddb59429436b78ea2cf0603a1aae
24,524
def ToGLExtensionString(extension_flag): """Returns GL-type extension string of a extension flag.""" if extension_flag == "oes_compressed_etc1_rgb8_texture": return "OES_compressed_ETC1_RGB8_texture" # Fixup inconsitency with rgb8, # unfortunate. uppercase_words = [ 'img', 'ext', 'arb', 'chromium', 'oes', 'amd', 'bgra8888', 'egl', 'atc', 'etc1', 'angle'] parts = extension_flag.split('_') return "_".join( [part.upper() if part in uppercase_words else part for part in parts])
78d767beba572291193c9819f885c8eb46650c1c
24,525
def uniqued(iterable): """Return unique list of items preserving order. >>> uniqued([3, 2, 1, 3, 2, 1, 0]) [3, 2, 1, 0] """ seen = set() add = seen.add return [i for i in iterable if i not in seen and not add(i)]
51bc142d6872a2e811724cd0371f982a390d8f06
24,528
def genKgris(k): """ Calcule une liste de ``k`` teintes de gris allant du noir au blanc. Paramètre: k --> nombre de teintes (>=2) La liste génére doit nécessairement commencer par la couleur noir (0,0,0) et nécessairement terminer par la couleur blanc (255,255,255). Les autres valeurs doivent être des teintes de gris uniformément réparties entre le noir et le blanc. :: EXEMPLES:: >>> genKgris(2) [(0, 0, 0), (255, 255, 255)] >>> genKgris(3) [(0, 0, 0), (127, 127, 127), (255, 255, 255)] >>> genKgris(4) [(0, 0, 0), (85, 85, 85), (170, 170, 170), (255, 255, 255)] """ coef = 255//(k-2+1) # -2 (blanc et noir) +1 (1 élément minimum) # teintes contient les valeurs de chaque pixel pour éviter la répétition # teintes commence et fini par du blanc... teintes = [0] teintes += [n*coef for n in range(1, k-1)] # valeurs intermédiaires # et se fini par du noir. teintes += [255] return [(v,v,v) for v in teintes]
3e480d5bba5f60e3448392da97c7d7738f5decad
24,531
def fromRoman(r): """Return the numeric value of a valid roman numeral.""" # Handle subtractions by trying to match two characters first. # If this fails, match only one character. romanMap = dict((('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100), ('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5), ('IV', 4), ('I', 1))) num, i = 0, 0 try: while i < len(r) - 1: if r[i:i+2] in romanMap: num += romanMap[r[i:i+2]] i += 2 else: num += romanMap[r[i]] i += 1 if i < len(r): num += romanMap[r[i]] return num except KeyError as e: raise ValueError('Bad roman numeral: ' + r)
b03c49bf1dfb3f72585ff2667cf9df2eea9e7a4c
24,532
import glob def summfile(ifldpth,idx): """ Find _x1dsum file ending with idx + '_x1dsum.fits' Parameters ---------- ifldpth : str path to the files idx : str the file ends with idx + '_x1dsum.fits' e.g. idx = '10' Returns ------- fname : str sum file name """ fname = glob.glob(ifldpth+'*'+idx+'_x1dsum.fits') if len(fname) != 1: print('multiple or zero '+idx+'_x1dsum files: ', len(fname)) print(ifldpth) else: fname = fname[0] return fname
05f6778276487de8feb684dca41c326321335049
24,534
def get_hashable_cycle(cycle): """ Cycle as a tuple in a deterministic order. Args ---- cycle: list List of node labels in cycle. """ # get index of minimum index in cycle. m = min(cycle) mi = cycle.index(m) mi_plus_1 = (mi + 1) if (mi < len(cycle) - 1) else 0 if cycle[mi-1] > cycle[mi_plus_1]: result = cycle[mi:] + cycle[:mi] else: result = list(reversed(cycle[:mi_plus_1])) + \ list(reversed(cycle[mi_plus_1:])) return tuple(result)
28e81992a4fc306151e8ac87426721f41d516beb
24,535
def get_susceptibility_matrix_index(age): """ The age matrix is 16x16 and it's split in groups of 4, We can use whole division to quickly get the index """ if age >= 75: return 15 else: return age // 5
6efb3665f8578820d4bd1c969538c3026f2bdcf9
24,541
def truncate(string, length=60): """Truncate the given string when it exceeds the given length.""" return string[:length - 4] + '.' * 3 if len(string) > length else string
c3bfc6d78703830e23240e671bc5c29f1cdcb19d
24,543
from PIL import Image def pilnew_imageobject(image_file: str): """Returns a PIL image object from a referenced image file. Args: image_file (str): Reference an existing image file. Returns: PIL.Image.Image: Returns a PIL image object """ # image_object = Image.open(image_file) return image_object
f80e8b2cb9a4a93f94fe1d3005c53caa54c00877
24,545
def string_handler(item): """ Create a string out of an item if isn't it already. Parameters: - item: The variable to make sure it is a string. Returns: The input as a string. """ return ( str(item) if not isinstance(item, str) else item )
dcd767d1e05bab1f2ce770347b578b627005acf4
24,546
import random def random_word_swap(sentence, p): """Swaps words from a sentence with probability p.""" def swap_word(new_words): idx_1 = random.randint(0, len(new_words) - 1) idx_2 = idx_1 counter = 0 while idx_2 == idx_1: idx_2 = random.randint(0, len(new_words) - 1) counter += 1 if counter > 3: return new_words new_words[idx_1], new_words[idx_2] = new_words[idx_2], new_words[idx_1] return new_words words = sentence.split(' ') new_words = words.copy() n = int(p * len(words)) for _ in range(n): new_words = swap_word(new_words) sentence = ' '.join(new_words) return sentence
f81467776a3c1e8bae3fdb411bd95f8c6440fea2
24,550
def is_array_type(rtype): """ Test to see if return type parameter is a NumPy array. :param str rtype: Return type parameter. :return: *True* if return type parameter is a NumPy array, *False* if not. :rtype: bool """ if rtype.lower() in ['ndarray', 'array', 'arr', 'np', 'a']: return True return False
6973a9e1830b89fea87c70e42df97bbfc39e49c2
24,554
def check_magnetic_atoms(in_path: str) -> list: """ Args: in_path (str) - path for the prepared to siman structure reader POSCAR type file. Returns: magnetic_atoms (list) - list of magnetic atoms in prepared structures with "fake" atoms e.g >>> magnetic_atoms ['Po', 'Eu'] """ with open(in_path, 'r') as in_f: in_data = in_f.readlines() magnetic_atoms = in_data[5].split()[: 2] return magnetic_atoms
b0428fea0767688823b30cab14a11052fed0d149
24,560
import torch from typing import Callable from typing import Iterable from typing import Tuple def make_batcher( x: torch.Tensor, y: torch.Tensor, batch_size: int ) -> Callable[[], Iterable[Tuple[torch.Tensor, torch.Tensor]]]: """Returns a function that can be called to yield batches over x and y.""" assert x.shape[0] == y.shape[0] idxes = range(0, x.shape[0], batch_size) def batcher() -> Iterable[Tuple[torch.Tensor, torch.Tensor]]: for start in idxes: end = start + batch_size yield x[start:end], y[start:end] return batcher
2bcb36b9eca07f86aed565a437bdb2b9b5cb21f6
24,564
import re def munge_subject_to_filename(subject): """Derive a suitable filename from a commit's subject""" if subject.endswith('.patch'): subject = subject[:-6] return re.sub(r'[^A-Za-z0-9-]+', '_', subject).strip('_').lower() + '.patch'
b67350f2f4003f04c234103a2e3f7f1bf34dedbf
24,567
def abstract_class(cls_): """Decorate a class, overriding __new__. Preventing a class from being instantiated similar to abc.ABCMeta but does not require an abstract method. """ def __new__(cls, *args, **kwargs): if cls is cls_: raise TypeError(f'{cls.__name__} is an abstract class and may not be instantiated.') return object.__new__(cls) cls_.__new__ = __new__ return cls_
15e6f5472b3a6e29ad26fd9c705ea418e9917ceb
24,569
def is_upvoted(submission): """ If a comment is upvoted, we assume the question is welcomed, and that there's no need for a template answer. """ min_score = 3 min_comment_count = 1 return ( submission.score > min_score and len(submission.comments) > min_comment_count )
90fe43e6cd681a15daa97dba039e7fa94ac617ca
24,572
def get_label(filename, labels=["head", "body", "arm","tail", "leg", "ear"]): """ If the filename contains the word in the list called labels, it returns a pair of name and id number. example: get_labels(./raccoon/labels/head.ply, ["head, leg"]): It will return ("head",0) get_labels(./raccoon/somefilename.ply, ["goose","raccoon"]): It will return ("raccoon,1") """ for label in labels: if label.lower() in filename.lower(): return (label, labels.index(label)) return -1 #raise Exception("There exists no label with "+ filename +". Provide the label of the contained file through the folder name or filename.")
6c83e714de6c0b4959524110ac8dbe2f5210485d
24,581
def _get_dropbox_path_from_dictionary(info_dict, account_type): """ Returns the 'path' value under the account_type dictionary within the main dictionary """ return info_dict[account_type]['path']
0869c08cd9d0f70d9dcdeb5bee843a6c9046d5bc
24,585
def binary_search(array, value): """Search for value in sorted array, using binary search Continually divide (sub-) array in half until value is found, or entire array has been searched. Iterative approach. Parameters array : list List to search. Must be sorted. value : any Value to search for. Must be same type as elements in array. Returns ------- bool True if array contains value, False otherwise """ # Set starting indexes, which will be used to index sub-array as it shrinks low = 0 high = len(array) - 1 # Keep searching until low and high pointers overlap while (high - low) > 0: mid = int((high + low) / 2) # Check to see if dividing index (mid) equals value we're searching for if array[mid] == value: return True elif value < array[mid]: # -1 since don't want to check value at mid again (redundant) high = array[mid] - 1 elif value > array[mid]: # +1 since don't want to check value at mid again (redundant) low = array[mid] + 1 return False
08fc6be6571854a0003a7ccc354c397dfb791059
24,586
from typing import Tuple def blend_colors( rgba: Tuple[int, int, int, float], rgb: Tuple[int, int, int] ) -> Tuple[int, int, int]: """Get the resulting RGB color of an RGBA color rendered over an RGB color.""" red = (rgba[0] * rgba[3]) + (rgb[0] * (1 - rgba[3])) blue = (rgba[1] * rgba[3]) + (rgb[1] * (1 - rgba[3])) green = (rgba[2] * rgba[3]) + (rgb[2] * (1 - rgba[3])) result_color = int(red), int(blue), int(green) return result_color
bf48b7193002a48aa8996c543c3bd5633ae06362
24,587
def stripnl(s): """remove newlines from a string""" return str(s).replace("\n", " ")
da0a46707b19b6faa054cd1e4dad96b9651b3f0b
24,589
from typing import List def solution(board: List[List[int]]) -> int: """ 입력된 2차원 배열에서 가장 큰 1로 된 정사각형을 구하라 Args: board (List[List[int]]): 각각의 원소가 1이나 0으로 되어 있는 2차원 배열 Returns: int: 가장 큰 정사각형의 넓이 """ row, calumn = len(board), len(board[0]) global_max = 0 for i in range(row): for j in range(calumn): if not (i and j): global_max = board[i][j] if board[i][j] > global_max else global_max continue if board[i][j]: near = [board[i - 1][j - 1], board[i - 1][j], board[i][j - 1]] board[i][j] = min(near) + 1 global_max = board[i][j] if board[i][j] > global_max else global_max return global_max * global_max
a34b7decafc1dc79bce566335e4edba7900e7744
24,592
import re def distribute(string, delim, groups=[r'\S+', r'\S+']): """Distributes one part of a string to other parts of that string (seperated by a delimiter), returning a list of strings. Args: string: input string delim: regex matching delimiter between two parts of string to be distributed upon groups: list of regular expressions that match the parts of the string receiving the distributed portion of the string. defaults to [r'\S+', r'\S+'] (i.e. two blocks of non-whitespace) Returns: If delimiter and groups are found, returns a list of strings. If they are not found, returns a list containing just the original string. Examples: >>> distribute('hot spam/eggs', r'/') ['hot spam', 'hot eggs'] >>> distribute('hot spam/eggs on toast', r'/') ['hot spam on toast', 'hot eggs on toast'] >>> distribute('hot spam/eggs on toast', r'/', [r'\S+', r'\S+ on toast']) ['hot spam', 'hot eggs on toast'] """ output = [] n = len(re.findall(delim, string)) + 1 gps = groups + [groups[-1]] * (n - len(groups)) rx = re.compile(delim.join([r'(' + group + r')' for group in gps])) re_match = rx.search(string) if re_match: output = [rx.sub(re_match.group(i), string) for i in range(1, n+1)] else: return [string] return output
3b64474cabdf755bd41088e9e7f8d8a6e20adec4
24,593
import random def generateColor(text): """Deterministically generates a colour for a given text.""" random.seed(text) return ('#%06X' % random.randint(0,0xFFFFFF))
6577ba33642b68bdef3d1cec914078fa1d2b8b27
24,600
def unit_size(size): """ Convert Byte size to KB/MB/GB/TB. """ units = ['KB', 'MB', 'GB', 'TB'] i = 0 size = size / 1024 while size >= 1024 and i<(len(units)-1): i = i + 1 size = size / 1024 return '%.2f %s'%(size, units[i])
9f174d94cd7c3e57399f0689d67c35fe97da5e23
24,607
def tally_output(cons_file, output_path, file_stem): """Writes a tally file for easy graphing/analysis.""" tally_file = "{}/{}.tally".format(output_path, file_stem) with open(cons_file, "r") as reader, open(tally_file, "w") as writer: for line in reader: if line.startswith('#'): writer.write(line.split('# ')[1]) return tally_file
02c1b20a1e71482344e5ad8516a2f8ed86429a60
24,608
def project_is_connected(project, user): """ Return True if the given project is connected (joined and authorized). """ return project.is_joined(user)
7d513406f1ec66eb57d8f7a6f0dc5688d521f755
24,617
def filter_lower_case_keys(dict): """ Filter dict to include only lower case keys. Used to skip HTTP response fields. :param dict: Dict with all capabilities parsed from the SSDP discovery. :return: Dict with lower case keys only. """ return {key: value for key, value in dict.items() if key.islower()}
f441202f6ae66ab023431c42680f349169ea0f79
24,627
def supports_int(value: object) -> bool: # noqa: E302 """Check if an int-like object has been passed (:class:`~typing.SupportsInt`). Examples -------- .. code:: python >>> from nanoutils import supports_int >>> supports_int(1.0) True >>> supports_int(1.5) False >>> supports_int(1) True >>> supports_int('1') True >>> supports_int('not a int') False Parameters ---------- value : :class:`object` The to-be evaluated object. Returns ------- :class:`bool` Whether or not the passed **value** is int-like or not. """ # floats that can be exactly represented by an integer are also fine try: int(value) # type: ignore return float(value).is_integer() # type: ignore except Exception: return False
c7a920837a080030d1300c99951d887ed4917091
24,637
from typing import Callable from typing import Dict from typing import Any def memoized(func: Callable) -> Callable: """Decorator that caches values returned by a function for future calls. The first time func is called with a particular sequence of arguments, the result is computed as normal. Any subsequent calls to func with the same sequence of arguments will then return the same value without requiring it to be recomputed. """ memo: Dict[Any, Any] = {} def memo_func(*args: Any) -> Any: if args not in memo: memo[args] = func(*args) return memo[args] return memo_func
fd348711215713aff3361cfc47609634a457de88
24,638
def intify_and_make_intron(junction): """Convert start and stop strings to ints and shorten the interval Interval is shortened to match the introns of SJ.out.tab files from STAR Parameters ---------- junction : tuple (chrom, start, stop, strand) tuple of strings, e.g. ('chr1', '100', '200', '-') Returns ------- intron : tuple (chrom, start, stop, strand) tuple of string, int, int, string. Adds 1 to original start and subtracts 1 from original stop >>> intify_and_make_intron(('chr1', '100', '200', '-')) ('chr1', 101, 199, '-') """ chrom, start, stop, strand = junction start = int(start) + 1 stop = int(stop) - 1 return chrom, start, stop, strand
200a8504e0a28194ea70fe9e9702094b6ddeb1f1
24,640
def get_value_for_key_from_json(key, data_structure): """ Given a data_structure return the *first* value of the key from the first dict example: data_structure: [ { "id": 1, "name": "name1" }, { "id": 2, "name": "name2"} ] key: "id" return 1 :param key: key of a dict of which to return the value :param data_structure: python data_structure :return: value of key in first dict found """ if type(data_structure) == list: for v in data_structure: # print("recursively searching %s" % str(v)) r = get_value_for_key_from_json(key, v) if r is not None: return r return None elif type(data_structure) == dict: if key in data_structure: return data_structure[key] else: for k in data_structure: v = data_structure[k] # print("recursively searching %s" % str(v)) r = get_value_for_key_from_json(key, v) if r is not None: return r return None
071d6c771d750bdef99a209ba537d0709c1d9582
24,641
import pickle def predict(dataX): """ Predict dependent variable from a features vector """ # load model model = pickle.load(open('model.pickle', 'rb')) # predict predY = model.predict(dataX.values.reshape(-1, dataX.shape[1])) return predY
85505d3435ad8593542851680aef7491058a0239
24,642
def format_pci_addr(pci_addr): """Pad a PCI address eg 0:0:1.1 becomes 0000:00:01.1 :param pci_addr: str :return pci_addr: str """ domain, bus, slot_func = pci_addr.split(':') slot, func = slot_func.split('.') return '{}:{}:{}.{}'.format(domain.zfill(4), bus.zfill(2), slot.zfill(2), func)
9da677c2f1ff832cfbe86f19dffcada1fb33003a
24,643
import re def make_url_pattern(url: str, version: str) -> str: """Returns a regular expression for matching versioned download URLs. Args: url: Existing download URL for `version`. version: Version corresponding to `url` (must be a substring). Returns: Regular expression that matches URLs similar to `url`, where all instances of `version` are replaced by match groups. """ replacement_temp = 'XXXXXXXXXXXXXXX' return re.escape(url.replace(version, replacement_temp)).replace( replacement_temp, '([^/"\']+)')
384a58bf8ef02b75f510fe5884445e297862f0ef
24,647
import re def parse_not_to_prune(model, config): """Returns a list of names of modules not to prune in the model""" patterns = [re.compile(s) for s in config.not_to_prune] parsed_not_to_prune = [] for name, module in model.named_modules(): if type(module).__name__ in config.prune_layer_types: if any([p.search(name) for p in patterns]): parsed_not_to_prune.append(name) return parsed_not_to_prune
71e56a5c3d9a2502e8309225feeb127b8b98f7eb
24,648
def right(x): """Helper function: argument x must be a dot. Returns dot right of x.""" return (x[0]+1,x[1])
bbb9b16ddbecd8bb452d941e5a871c9799f2ca7a
24,651
def isMatch(peak, biomarker, tolerance): """Check if spectral peak matches protein biomarker Args: peak: Spectral peak obatained from experiment, float biomarker: An array of biomarker values tolerance: Maximal difference between experimental weight and theoretical one that could be considered a match. float Return: True / False """ for each in biomarker: if abs(float(peak) - each) <= float(tolerance): return True return False
a81a67deca75a4d41c17707dc2e8e528c8fc3949
24,654
def create_stratum_name(stratification_name, stratum_name, joining_string="X"): """ generate a name string to represent a particular stratum within a requested stratification :param stratification_name: str the "stratification" or rationale for implementing the current stratification process :param stratum_name: str name of the stratum within the stratification :param joining_string: str the character to add to the front to indicate that this string is the extension of the existing one in SUMMER, capitals are reserved for non-user-requested strings, in this case "X" is used as the default :return: str the composite string for the stratification """ return joining_string + "%s_%s" % (stratification_name, str(stratum_name))
d778f2538c3e9c451bcafba287ef34fcc5bed07b
24,658
import socket def get_node_address(node_name=''): """ Return the IP address associated to the node's domain. This is by no means perfect and should not be relied upon aside from testing purpose. """ return socket.gethostbyname(socket.getfqdn(node_name))
d3f8b5c39118cf05e195d82430f024ef27d01c21
24,659
def extract_muMax(df_Annotations): """ Extracts the growth rate (Slope) for each sample. Parameters ---------- df_Annotations : pandas.DataFrame The dataframe contains the results of a linear fit through the exponential growth phase. Returns ------- df_mu : pandas.DataFrame A dataframe that shows the calculated maximal growth rate for each sample. """ # Delete index name and transform df_Annotations.index.name = None df_mu = df_Annotations.T # Keep only sample name and slope df_mu.reset_index(inplace=True) df_mu = df_mu.rename(columns={"index": "samplename_OD"}) df_mu = df_mu[["samplename_OD", "Slope"]] df_mu["samplename_OD"] = df_mu["samplename_OD"].str[:6] # Rename slope df_mu.rename(columns={"Slope": "mu[/h]"}, inplace=True) return df_mu
63ca33ddf29c0bfc1ee1af49ef5b6ad4f89829ff
24,663
def _parse_system_prop(line): """Returns the name and value of a system property (The line is preceded by @). Args: line (str): The definition of the system property. Returns: str, str: Pair of name, value. """ return line[1:].split("#")[0].split("=")
f2822175a717f9ee96524438eaa610898f7fb613
24,664
def pentad_to_month_day(p): """ Given a pentad number, return the month and day of the first day in the pentad :param p: pentad number from 1 to 73 :type p: integer :return: month and day of the first day of the pentad :rtype: integer """ assert 0 < p < 74, 'p outside allowed range 1-73 ' + str(p) m = [1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12] d = [1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 2, 7, 12, 17, 22, 27, 1, 6, 11, 16, 21, 26, 1, 6, 11, 16, 21, 26, 31, 5, 10, 15, 20, 25, 30, 5, 10, 15, 20, 25, 30, 4, 9, 14, 19, 24, 29, 3, 8, 13, 18, 23, 28, 3, 8, 13, 18, 23, 28, 2, 7, 12, 17, 22, 27, 2, 7, 12, 17, 22, 27] return m[p - 1], d[p - 1]
183c3954fe6ee003af28ca13d2a2207812d66483
24,665
def balance_from_utxos(utxos): """Return balance of a list of utxo. Args: utxos: A list of utxo with following format. [{ txid: '' value: 0, vout: 0, scriptPubKey: '', }] Returns: A balance sum of value value. """ balance = 0 if utxos: balance = sum(utxo['value'] for utxo in utxos) return balance
e20373db24483bb33189b396da44b2c453b717d9
24,678
from typing import Counter def most_common_letter(s: str) -> str: """Returns the most common letter in a string.""" return Counter(s).most_common(1)[0][0]
9cf0d1fb6790a2f5a72dff976423868c0dce9a89
24,685
def captcha_halfway(numbers): """Sum the digits that match the one half way around a cyclic string.""" total = 0 for i in range(int(len(numbers) / 2)): if numbers[i] == numbers[i + int(len(numbers) / 2)]: total += int(numbers[i]) return total * 2
c772c194c038484c144337dd32d93ab74e1d9370
24,696
def get_all_regions(config): """Retrieve a set of all regions used by the declared integration tests.""" regions = set() for feature in config.get("test-suites").values(): for test in feature.values(): for dimensions_config in test.values(): for dimensions_group in dimensions_config: regions.update(dimensions_group.get("regions", [])) return regions
c2dfddc73601f3661f482cf0ed5547b9b8434689
24,697
def keras_name_to_tf_name_block(keras_name, keras_block='block1a', tf_block='blocks_0', use_ema=True, model_name_tf='efficientnet-b0'): """Mapping name in h5 to ckpt that belongs to a block. we map name keras_name that points to a weight in h5 file to a name of weight in ckpt file. Args: keras_name: str, the name of weight in the h5 file of keras implementation keras_block: str, the block name for keras implementation (e.g. 'block1a') tf_block: str, the block name for tf implementation (e.g. 'blocks_0') use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not model_name_tf: str, the name of model in ckpt. Returns: String for the name of weight as in ckpt file. Raises: ValueError if keras_block does not show up in keras_name """ if keras_block not in keras_name: raise ValueError('block name {} not found in {}'.format( keras_block, keras_name)) # all blocks in the first group will not have expand conv and bn is_first_blocks = (keras_block[5] == '1') tf_name = [model_name_tf, tf_block] # depthwide conv if 'dwconv' in keras_name: tf_name.append('depthwise_conv2d') tf_name.append('depthwise_kernel') # conv layers if is_first_blocks: # first blocks only have one conv2d if 'project_conv' in keras_name: tf_name.append('conv2d') tf_name.append('kernel') else: if 'project_conv' in keras_name: tf_name.append('conv2d_1') tf_name.append('kernel') elif 'expand_conv' in keras_name: tf_name.append('conv2d') tf_name.append('kernel') # squeeze expansion layers if '_se_' in keras_name: if 'reduce' in keras_name: tf_name.append('se/conv2d') elif 'expand' in keras_name: tf_name.append('se/conv2d_1') if 'kernel' in keras_name: tf_name.append('kernel') elif 'bias' in keras_name: tf_name.append('bias') # batch normalization layers if 'bn' in keras_name: if is_first_blocks: if 'project' in keras_name: tf_name.append('tpu_batch_normalization_1') else: tf_name.append('tpu_batch_normalization') else: if 'project' in keras_name: tf_name.append('tpu_batch_normalization_2') elif 'expand' in keras_name: tf_name.append('tpu_batch_normalization') else: tf_name.append('tpu_batch_normalization_1') for x in ['moving_mean', 'moving_variance', 'beta', 'gamma']: if x in keras_name: tf_name.append(x) if use_ema: tf_name.append('ExponentialMovingAverage') return '/'.join(tf_name)
6a249f992be1c1232ba8415523b8408be5680133
24,699
def _format_version(name): """Formats the string name to be used in a --version flag.""" return name.replace("-", "_")
9b0bb72d6cef2836dce1f6c0d167ba41ce5a487b
24,705
import math def rho2_rho1(M, beta, gamma): """Density ratio across an olique shock (eq. 4.8) :param <float> M: Mach # upstream :param <float> Beta: Shock angle w.r.t initial flow direction (radians) :param <float> gamma: Specific heat ratio :return <float> Density ratio r2/r1 """ m1sb = M * math.sin(beta) n1 = (gamma + 1.0) * m1sb ** 2 d1 = 2.0 + (gamma - 1.0) * m1sb ** 2 return n1 / d1
20de69d2ac14100cd8c88e3f51eb14220052195b
24,706
import logging import pathlib def collector(name, fields, filepath, append=False, format_types=None, delimiter='|'): """ Returns a function for collecting rows with fields :fields: (along with datetime information) in a CSV log file located at :filepath:. We often want to collect some data about choices we are making while processing and transforming data. This collector function provides a way to do that using python's logging standard library module. :name: the name to be given to the logger :fields: list of fields that you want to collect :filepath: target for the logfile :append: (default False) if True, will append to the given filepath. Default behavior is to overwrite it, with column headings in the first line. :format_types: optional dictionary from fields to format-string types (like 's' or '.6f') describing how fields should be formatted in the CSV. Any fields not included will default to 'f'. :delimiter: the delimiter in the CSV. Defaults to '|' to avoid collisions. """ if not format_types: format_types = dict() if 'asctime' not in fields: fields = ['asctime'] + fields logger = logging.Logger(name) pathlib.Path(filepath).parent.mkdir(parents=True, exist_ok=True) if not append: with open(filepath, 'w') as f: f.write(delimiter.join(fields) + '\n') handler = logging.FileHandler(filepath, mode='a') default_types = {field: 's' for field in fields} types = {**default_types, **format_types} formatted_fields = [f"%({field}){types[field]}" for field in fields] formatter = logging.Formatter(delimiter.join( formatted_fields), "%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) handler.setLevel(logging.INFO) logger.addHandler(handler) def collect(**kwargs): logger.info("Collected data point for {vtd_splits}", extra=kwargs) return collect
0f6f9fd0df1e262d2b0d0a8d4965f8038722e649
24,717
def print_network(net, out_f=None): """ Prints the number of learnable parameters. :param net: the network. :type net: :param out_f: file :type out_f: :return: number of learnable parameters. :rtype: int """ num_params = 0 for param in net.parameters(): num_params += param.numel() if out_f is not None: out_f.write(net.__repr__() + "\n") out_f.write('Total number of parameters: %d\n' % num_params) out_f.flush() return num_params
9dcfd7da51eab385dacb90d35bfeb139ed785be1
24,720
def _skip_first_max_pooling(inputs): """Whether to skip the first max pooling layer in ResNet. For 128x128 inputs, we skip the first 3x3 2-stride max pooling layer. Args: inputs: The inputs passed to ResNet. Returns: Whether to skip the first max pooling layer in ResNet. """ dims = inputs.shape.dims size = dims[1] # dims is like [B, H, W, C] return size == 128
45425522c23191e014c9cb687baf1a5d123fc443
24,723
def is_hom(G, H, hom): """ Check whether hom is a homomorphism from G to H. Works for both directed and undirected. """ assert set(G.vertices()) == set(hom.keys()) assert set(H.vertices()).issuperset(set(hom.values())) for e in G.edges(): u, v = e[0], e[1] if not H.has_edge(hom[u], hom[v]): return False return True
256ed3b2b496ed72bcd4e23f6f28bcc742049e15
24,725
from typing import List from typing import Any def lazy_content(__list: List, /, nvals: int, ellipsis: Any = ...) -> List: """Return list at most {nvals} items from object Extra values are replaced with mid_value Examples: >>> lazy_content([1, 2, 3, 4, 5], 3) ... [1, ..., 5] """ if len(__list) <= nvals: return __list return ( __list[: nvals // 2] + [ellipsis] + __list[len(__list) - abs(nvals - 1) // 2:] )
7af6fe6766c677b0bd9261c305e1e5301567045f
24,728
def clear_form(n_clicks): """Empty input textarea""" return ""
a1d72b827f61d14d898170129d8dee75adf2a88a
24,729
def wrap_to_pmh(x, to2): """Wrap x to [-to/2,to/2).""" to = to2/2 return (x + to)%to2 - to
29a1ce74e903fb23c316ec841097bf571f561a40
24,735
def return_index(lbound, ubound, cells, position): """ Give the position of a node on a 1D mesh this function will return the corresponding index of that node in an array that holds the node positions. lbound: Lower bound of mesh domain. ubound: Upper bound of mesh domain. cells: Number of cells along axis in mesh domain. position: Position of mesh node to find corresponding index for. returns Integer """ index = (position - lbound) * cells / (ubound - lbound) return int(index)
dcb0fb35f6ec8b5d7d948b951ce6677d27023a95
24,736
import huggingface_hub def list_metrics(with_community_metrics=True, with_details=False): """List all the metrics script available on the Hugging Face Hub. Args: with_community_metrics (:obj:`bool`, optional, default ``True``): Include the community provided metrics. with_details (:obj:`bool`, optional, default ``False``): Return the full details on the metrics instead of only the short name. Example: ```py >>> from datasets import list_metrics >>> list_metrics() ['accuracy', 'bertscore', 'bleu', 'bleurt', 'cer', 'chrf', ... ] ``` """ metrics = huggingface_hub.list_metrics() if not with_community_metrics: metrics = [metric for metric in metrics if "/" not in metric.id] if not with_details: metrics = [metric.id for metric in metrics] return metrics
db833a683cb3a549ba040cd5ef3c020c7286624e
24,742
def pathCount(stairs: int): """number of unique ways to climb N stairs using 1 or 2 steps""" #we've reached the top if stairs == 0: return 1 else: if stairs < 2: return pathCount(stairs - 1) return pathCount(stairs - 2) + pathCount(stairs - 1)
69992b4f1a0043935a34ae15a3370f78d0bbf274
24,744
def _split_comment(lineno, comment): """Return the multiline comment at lineno split into a list of comment line numbers and the accompanying comment line""" return [(lineno + index, line) for index, line in enumerate(comment.splitlines())]
07a4f160ce47d9391b90646cf8b43b18eb32ed14
24,751
def ts_path(path): """Make a URL path with a database and test suite embedded in them.""" return "/api/db_<string:db>/v4/<string:ts>/" + path
c007887e752af8aa4be6936f1fceb3df873d9d61
24,752
def display_formatter(input_other_type): """ Used by QAbstractItemModel data method for Qt.DisplayRole Format any input value to a string :param input_other_type: :return: str """ return str(input_other_type)
8f5e619d98e666f3d6820ec119dd01b11d357555
24,753
def first_name(s): """ Returns the first name in s Examples: last_name_first('Walker White') returns 'Walker' last_name_first('Walker White') returns 'Walker' Parameter s: a name 'first-name last-name' Precondition: s is a string 'first-name last-name' with one or more blanks between the two names. """ end_first = s.find(' ') return s[:end_first]
ca37032ac865981ff83d8f076b3c827334e2fc58
24,757
def replace_keys(d, old, new): """replace keys in a dict.""" return {k.replace(old, new): v for k, v in d.items()}
44e44308741e37a3b499aac4518cff74f087a589
24,761
from typing import Union import io def _hash(fn, buffer: Union[io.StringIO, io.BytesIO]): """Partial function for generating checksum of binary content.""" buffer.seek(0) hashsum = fn() for chunk in iter(lambda: buffer.read(4096), b''): hashsum.update(chunk) return hashsum.hexdigest()
c2b7b3ba1487273b7759d396d29e0aece15b5efa
24,766
import inspect def _num_required_args(func): """ Number of args for func >>> def foo(a, b, c=None): ... return a + b + c >>> _num_required_args(foo) 2 >>> def bar(*args): ... return sum(args) >>> print(_num_required_args(bar)) None borrowed from: https://github.com/pytoolz/toolz """ try: spec = inspect.getfullargspec(func) if spec.varargs: return None num_defaults = len(spec.defaults) if spec.defaults else 0 return len(spec.args) - num_defaults except TypeError: return None
dacc2ed0165ea8bc1e4be45bf2a9477778d2fe45
24,768
def expand(values, index, padding=128): """ Modify in place and return the list values[] by appending zeros to ensure that values[index] is not out of bounds. An error is raised if index is negative. """ assert index >= 0, f"Oops: negative index in expand(values, index={index})" if index >= len(values): space_needed = index - len(values) + 1 values.extend([0] * (space_needed + padding)) return values
6f4d09244972e7af28b6119706a8095bedbc629d
24,769
import typing import dataclasses def dataclass_from_dict(cls: type, src: typing.Dict[str, typing.Any]) -> typing.Any: """ Utility function to construct a dataclass object from dict """ field_types_lookup = {field.name: field.type for field in dataclasses.fields(cls)} constructor_inputs = {} for field_name, value in src.items(): if dataclasses.is_dataclass(field_types_lookup[field_name]): constructor_inputs[field_name] = dataclass_from_dict(field_types_lookup[field_name], value) else: constructor_inputs[field_name] = value return cls(**constructor_inputs)
f91ddb784d3a0ef4a2c5d78205f4a7908b79a1b3
24,772
from typing import Any def pm_assert( condition: Any, exc: Any=Exception, context: Any=None, msg: str="", ) -> Any: """ Generic assertion that can be used anywhere @condition: A condition to assert is true @exc: Raise if @condition is False @context: The relevant data structures @msg: Any additional text to include """ if not condition: raise exc(f"{msg}\n{context}") return condition
a99f0ed3f460f95c84a1391664cc2bc169f17201
24,775
def resize_halo_datasets(halos_dset, new_size, write_halo_props_cont, dtype): """ Resizes the halo datasets Parameters ----------- halos_dset: dictionary, required new_size: scalar integer, required write_halo_props_cont: boolean, required Controls if the individual halo properties are written as distinct datasets such that any given property for ALL halos is written contiguously (structure of arrays, SOA). dtype: numpy datatype Returns ------- Returns ``True`` on successful completion """ if write_halo_props_cont: for name in dtype.names: dset = halos_dset[name] dset.resize((new_size, )) else: halos_dset.resize((new_size, )) return True
a5461a776a0991eda04fc5d0e1d2a2a14e6e1f5f
24,780
def checksum(sentence): """Calculate and return checsum for given NMEA sentence""" crc = 0 for c in sentence: crc = crc ^ ord(c) crc = crc & 0xFF return crc
841c35f11c5f4a46cfb62efc04379fa54772e739
24,784
def _apply_affine_scalar(i, j, k, affine_matrix): """ Applies an affine matrix to the given coordinates. The 3 values i, j and k must be scalars. The affine matrix consists of a 3x3 rotation matrix and a 3x1 transposition matrix (plus the last row). Parameters ---------- i, j, k: numeric scalars The source coordinates. affine_matrix: numpy 2D float array with shape (4, 4) The affine matrix Returns ------- The coordinate vector after applying the matrix. """ rotation = affine_matrix[:3, :3] translation = affine_matrix[:3, 3] return rotation.dot([i, j, k]) + translation
a548ac37cad242549e7f40ab799a2361dc2733ff
24,791
import re def get_dict(post, key): """ Extract from POST PHP-like arrays as dictionary. Example usage:: <input type="text" name="option[key1]" value="Val 1"> <input type="text" name="option[key2]" value="Val 2"> options = get_dict(request.POST, 'option') options['key1'] is 'Val 1' options['key2'] is 'Val 2' """ result = {} if post: patt = re.compile('^([a-zA-Z_]\w+)\[([a-zA-Z_\-][\w\-]*)\]$') for post_name, value in post.items(): value = post[post_name] match = patt.match(post_name) if not match or not value: continue name = match.group(1) if name == key: k = match.group(2) result.update({k: value}) return result
35649820407161432f5e3d01816376e67b19ea82
24,799
import json def json_load(file): """ load json data from file. """ with open(file, 'r', encoding='utf8') as _file: data = json.load(_file) return data
9d2ef792a9d2b201a5608601057f4f76e7905662
24,804
from typing import Dict from typing import Any def severity(event: Dict[str, Any]) -> str: """Maps `score` of a Qualys event to `severity`. Possible `score` values: 0 = Known Good [File/Process/Network] 1 = Remediated [File/Process/Network] 2 = Suspicious Low File event 3 = Suspicious Low Process event 4 = Suspicious Low Network event 5 = Suspicious Medium File event 6 = Suspicious Medium Process event 7 = Suspicious Medium Network event 8 = Malicious File event 9 = Malicious Process event 10 = Malicious Network event """ if 'score' not in event: return 'Unknown' scores = { '0': 'None', '1': 'High', '2': 'Low', '3': 'Low', '4': 'Low', '5': 'Medium', '6': 'Medium', '7': 'Medium', '8': 'High', '9': 'High', '10': 'High' } score = event['score'] return scores.get(score, 'Unknown')
4952bbf5b76f7d5f7ab16d1ef9f475a9a5f54582
24,805
def get_diff(df, column1, column2): """Get the difference between two column values. Args: df: Pandas DataFrame. column1: First column. column2: Second column. Returns: Value of summed columns. Usage: df['item_quantity_vs_mean'] = get_diff(df, 'item_quantity', 'item_code_item_quantity_mean') """ return df[column1] - df[column2]
1fc5ec361cfdd28775257980c28b4924fdab4eeb
24,810
def int_to_bytes(number: int) -> bytes: """Convert integer to byte array in big endian format""" return number.to_bytes((number.bit_length() + 7) // 8, byteorder='big')
1158b63b71774c6202aa4c96dae54eca2fae2c0a
24,811
import random def DEFAULTPOLICY(state): """ random policy :param state: from this state, run simulation with random action :return: final reward after reaching the final state """ while state.terminal() == False: # simulate until terminal state random_action = random.randint(0, 3) state = state.next_state(random_action) # print(state) return state.reward()
99a281406c5a293a9040b2c772ca60fdc7b6a2eb
24,814
import hashlib from pathlib import Path def label_generator(predictions, processor, filename): """Generates a unique label for the image based on the predicted class, a hash, and the existing file extension. Parameters ---------- predictions : np.array Output from model showing each target probability processor : keras.util Prediction utility unique to each model filename : str Path to image or image name Returns ------- new_label : str New label consisting of predicted class plus a hash """ # Hash predictions for always unique filename hashed = hashlib.sha1(predictions).hexdigest() # Get label from keras predictor label = processor(predictions, top=1)[0][0][1] # Capture original image suffix suffix = "".join(Path(filename).suffixes) new_label = f"{label}_{hashed}{suffix}" return new_label
8baba1aad6ad9a7f4d69b499a76b1b4079e79cb5
24,817
import re def parse_csl_item_note(note): """ Return the dictionary of key-value pairs encoded in a CSL JSON note. Extracts both forms (line-entry and braced-entry) of key-value pairs from "cheater syntax" https://github.com/Juris-M/citeproc-js-docs/blob/93d7991d42b4a96b74b7281f38e168e365847e40/csl-json/markup.rst#cheater-syntax-for-odd-fields """ note = str(note) line_matches = re.findall( r'^(?P<key>[A-Z]+|[-_a-z]+): *(?P<value>.+?) *$', note, re.MULTILINE) braced_matches = re.findall( r'{:(?P<key>[A-Z]+|[-_a-z]+): *(?P<value>.+?) *}', note) return dict(line_matches + braced_matches)
2a9076c646cd3efff12a4f2bbc3d639e2104a5b3
24,822
def field_is_required(field): """ Returns true if the field is required (e.g. not nullable) """ return getattr(field, 'required', False)
15eaf261ef0316c06272b1c38e0ec65bd16e77e6
24,834
from datetime import datetime def date_to_datetime(value): """Get datetime value converted from a date or datetime object :param date/datetime value: a date or datetime value to convert :return: datetime; input value converted to datetime >>> from datetime import date, datetime >>> from pyams_utils.date import date_to_datetime >>> value = date(2016, 11, 15) >>> date_to_datetime(value) datetime.datetime(2016, 11, 15, 0, 0) >>> value = datetime(2016, 11, 15, 10, 13, 12) >>> value datetime.datetime(2016, 11, 15, 10, 13, 12) >>> date_to_datetime(value) is value True >>> date_to_datetime(None) is None True """ if not value: return None if isinstance(value, datetime): return value return datetime(value.year, value.month, value.day)
5ba2d15d8169d923b87181545eb910f1715ac4c2
24,836
def filter_set_options(options): """Filters out options that are not set.""" return {key: value for key, value in options.items() if value}
e6605af08626189a501895973c59d6dd9956cb55
24,838
def not_in_dict_or_none(dict, key): """ Check if a key exists in a map and if it's not None :param dict: map to look for key :param key: key to find :return: true if key is in dict and not None """ if key not in dict or dict[key] is None: return True else: return False
2bc3f2194b82e978ab8edb2ffaac7a88a58e9c9e
24,841
def to_digits_base10(n): """ Return the digits of a number in base 10. """ digits = [] remaining = n while remaining > 0: digit = remaining % 10 remaining = (remaining - digit) // 10 digits.append(digit) return digits[::-1]
5acc6a2ef1e10bc3142371944232c7d1bcad3a32
24,843
def pbc(rnew, rold): """ Periodic boundary conditions for an msd calculation Args: rnew (:py:attr:`float`, optional): New atomic position rold (:py:attr:`float`, optional): Previous atomic position Returns: cross (:py:attr:`bool`, optional): Has the atom cross a PBC? rnew (:py:attr:`float`, optional): New position """ shift = abs(rold - rnew) shift = round(shift, 0) shift = int(shift) cross = False if shift < 2: if rnew - rold > 0.5: rnew = rnew - 1.0 cross = True elif -(rnew - rold) > 0.5: rnew = rnew + 1.0 cross = True else: if rnew - rold > 0.5: rnew = rnew - shift cross = True elif -(rnew - rold) > 0.5: rnew = rnew + shift cross = True return cross, rnew
67260f98371fbb95d2eca5958f75d80c76b89371
24,847
import requests def submit_task(task: dict, username: str, password: str) -> str: """ Submits a task using the AppEEARS API. Parameters ---------- task: dictionary following the AppEEARS API task object format username: Earthdata username password: Earthdata password Returns ------- Task ID Notes ----- For more information about the task object convention and all the properties that can be specified, check the documentation: https://lpdaacsvc.cr.usgs.gov/appeears/api/#task-object """ api_url = "https://lpdaacsvc.cr.usgs.gov/appeears/api" try: # get authorization token and build headers r = requests.post(f"{api_url}/login", auth=(username, password)) r.raise_for_status() token = r.json()["token"] headers = {"Authorization": f"Bearer {token}"} # submit the task and logout to dispose of the authentication r = requests.post(f"{api_url}/task", json=task, headers=headers) requests.post(f"{api_url}/logout", headers=headers) return r.json()["task_id"] except requests.HTTPError as err: raise Exception(f"Error submitting task. {err}")
fd75b86b5258f9b4b0abd02f8fb289a7467149df
24,848
def compare_int(entry, num): """Return True if the integer matches the line entry, False otherwise.""" if int(entry) == num: return True else: return False
e779829b0d9a8343d3c48e6a66542e8e6ee62494
24,851
def _prepare_params(params): """return params as SmashGG friendly query string""" query_string = '' if len(params) == 0: return query_string prefix = '?expand[]=' query_string = prefix + '&expand[]='.join(params) return query_string
9fc0573961d50536ee28ae576ac030197eae0cf2
24,852
def load_settings(path='api/settings'): """Loads the user settings located under `api/`""" with open(path, 'r', encoding='utf-8') as file: return { l.strip(): r.strip() for l, r in (l.split('=') for l in file if l.strip()) }
1a89c3f80f0a8b4013429fbb060b453f5f447aa5
24,853
def get_mask(source, source_lengths): """ Args: source: [B, C, T] source_lengths: [B] Returns: mask: [B, 1, T] """ B, _, T = source.size() mask = source.new_ones((B, 1, T)) for i in range(B): mask[i, :, source_lengths[i]:] = 0 return mask
8466ff5113ca22488b4218f86c43bfea248197d1
24,855
import glob def get_all_html_files(directory): """ Returns list of html files located in the directory """ return glob.glob(directory + "/*.html")
5e92a8b4fc52ea63e5c65c5eb7b2487556b08a3d
24,857
import csv def load_csv(csv_filename): """Load csv file generated py ```generate_training_testing_csv.py``` and parse contents into ingredients and labels lists Parameters ---------- csv_filename : str Name of csv file Returns ------- list[str] List of ingredient strings list[dict] List of dictionaries, each dictionary the ingredient labels """ labels, ingredients = [], [] with open(csv_filename, 'r') as f: reader = csv.reader(f) next(reader) # skip first row for row in reader: ingredients.append(row[0]) labels.append({'quantity': row[1].strip(), 'unit': row[2].strip(), 'item': row[3].strip(), 'comment': row[4].strip()}) return ingredients, labels
704151f36424f9e72ecb1d0dce9f8f7e8f77c1f1
24,870
def largest_common_substring(query, target, max_overhang): """Return the largest common substring between `query` and `target`. Find the longest substring of query that is contained in target. If the common substring is too much smaller than `query` False is returned, else the location `(start, end)` of the substring in `target` is returned. Parameters: ----------- query (str) The sequence to be found in target (minus some overhangs possibly). target (str) The sequence in which to find `query`. max_overhang Maximal size allowed for the flanking regions of `query` that would not be contained in `target`. Examples -------- >>> seqA = '-----oooooooo' >>> seqB = 'oooooo-----tttt' >>> largest_common_substring(seqA, seqA, 80) # == (0, 12) >>> largest_common_substring(seqA, seqB, 80) # == (5, 11) Notes: ------ This is intended for finding whether `query` can be extracted from `target` using PCR. See the PcrExtractionStation implementation in DnaSupplier.py. """ # The trick here is to start with the central region of "query". # This region is initially as small as max_overhang allows, and it is # progressively expanded on the sides max_overhang = min(max_overhang, int(len(query) / 2)) start, end = max_overhang, len(query) - max_overhang if query[start:end] not in target: return False while (start >= 0) and (query[start:end] in target): start -= 1 start += 1 while (end < len(query)) and (query[start:end] in target): end += 1 end -= 1 return start, end
4e0e1e1ee9d5d37fe5e56601fcedab66621ac9fb
24,871