content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
from pathlib import Path def get_and_mark_bundle_cache_version(bundle_base: str, *, previously_bundled: bool) -> int: """ Check and return the bundle cache version. The marker filename is `.bundle_cache_version`. :param str bundle_base: The bundle directory :param bool previously_bundled: true if the user has previously used this workspace to build a bundle :returns: the cache layout version to use """ marker_path = Path(bundle_base) / '.bundle_cache_version' bundle_cache_version = 2 if previously_bundled: bundle_cache_version = 1 if marker_path.is_file(): bundle_cache_version = \ int(marker_path.read_text().rstrip()) marker_path.write_text(str(bundle_cache_version) + '\n') return bundle_cache_version
74ab1584b60ffb77dbb3709d01ea4df00a448ea4
11,986
def isinrectbnd(x: int, y: int, xmin: int, ymin: int, xmax: int, ymax: int) -> bool: """Checks if the x and y values lie within the rectangular area defined by xmin, ymin and xmax, ymax Args: x, y: (x,y) coordinates to test xmin, ymin: min (x, y) bounds xmax, ymax: max (x, y) bounds Returns: boolean value True -> (x, y) is in bounds False -> (x, y) is out of bounds """ return (x < xmax and y < ymax) and \ (x > xmin and y > ymin)
3f67de8669a258a554a8754786579517a07bc321
11,987
def remove_prefix(string: str, prefix: str): """ Removes a prefix from a string if present. Args: string (`str`): The string to remove the prefix from. prefix (`str`): The prefix to remove. Returns: The string without the prefix. """ return string[len(prefix) :] if string.startswith(prefix) else string[:]
598e1b9b863d342e757e54cf94035da63e3ace1f
11,990
def get_params(opt_over, net, net_input, downsampler=None): """ Returns parameters that we want to optimize over. :param opt_over: comma separated list, e.g. "net,input" or "net" :param net: network :param net_input: torch.Tensor that stores input `z` :param downsampler: :return: """ opt_over_list = opt_over.split(',') params = [] for opt in opt_over_list: if opt == 'net': params += [x for x in net.parameters()] elif opt == 'down': assert downsampler is not None params = [x for x in downsampler.parameters()] elif opt == 'input': net_input.requires_grad = True params += [net_input] else: assert False, 'what is it?' return params
b8a3c26b5307c0ba584e3841a2d98f337c618bf8
11,991
from typing import Optional from typing import Any def nested_get(dct: dict, *keys: str) -> Optional[Any]: """Multi-level get helper function.""" for key in keys: dct = dct.get(key, {}) return dct if dct else None
cd881389157d67365793240e1e2e0b39f4bc1726
11,995
def only_moto(request): """Return True if only moto ports are to be used for mock services.""" return request.config.option.only_moto
1ab211925a411d4999a22e77d819da88b4477ed6
11,996
def find(sub_str, target_str): """[summary] 字符串查找 Arguments: sub_str {str} -- substring target_str {str} -- target string Returns: bool -- if substring is found in target string """ return sub_str in target_str # in operator用的是Boyer–Moore算法,最坏情况O(mn), 最好情况O(n/m) # 速度肯定好过re.search()
d26e4ad79eaf913d81126d7647036b857de5ed6d
11,997
def check_faces_in_caption(photo): """Checks if all faces are mentioned in the caption.""" comment = photo.comment if photo.getfaces() and not comment: return False for face in photo.getfaces(): parts = face.split(" ") # Look for the full name or just the first name. if (comment.find(face) == -1 and (len(parts) <= 1 or comment.find(parts[0]) == -1)): return False return True
eed03439df84a1ddd4cb7bcbb269af7c60adfcb5
11,998
def LfromS(seq): """ Compute Schroder function value L from a given sequence. This performs the calculation by plodding through the algorithm given in the paper. Humans can easily recognize the relation between elements of S and length of runs of 0s or 1s in the binary representation of L. Knowing that, there probably is a slicker way to code LfromS(). Args: S (list of int): Sequence - see paper for details Returns: value of L, real in range 0.0 .. 1.0 Note that overflows are no problem - S may contain "large" values like ten. Note also there's no check on the length of S, or if it's empty. """ Lambda = 1.0 L = Lambda/2 for c in reversed(seq): L = L/(2**c) L = Lambda - L L = Lambda-L return L
6adf8c0b6f8e12d7e6a79efcb38c628ef0b29031
11,999
from typing import Dict import pathlib def dir_parser(path_to_dir: str) -> Dict[str, Dict[str, str]]: """ Parses the given directory, and returns the path, stem and suffix for files. """ files = pathlib.Path(path_to_dir).resolve().glob("*.*") files_data = {} for file in files: files_data[file.stem] = { "suffix": file.suffix, "path": file.as_posix(), } return files_data
3b6c0ac172ad863470d492aa99713fab3ecf5d99
12,011
def filter_data(data): """ Filters data for years after 2010 and data originating from Syria. Inputs: data (pd.DataFrame): Input data with column "Year" and "Origin" Returns: data (pd.DataFrame): Filtered data """ if 'Origin' in data.columns: data = data[(data.Year > 2010) & (data.Origin.str.contains('Syria'))] else: data = data[(data.Year > 2010)] return data
2daf6fbd815fcea9dd3c2712363cd99cdca62a34
12,013
def strrep(strg,x): """ A função retorna uma string com n repetições; str, int -> str """ return strg*x
6e3d59666778af9db781a11e6f48b61010e7de4d
12,014
def get_positions(structure): """Wrapper to get the positions from different structure classes""" try: # ASE structure return structure.get_scaled_positions() except AttributeError: try: # diffpy structure return structure.xyz except AttributeError: raise ValueError("Unable to get positions from structure")
6428139131de02b577925be9499668642a11a69c
12,017
from typing import List def parse_comp_internal( comp_rules: str, top_delim: str, bottom_delim: str, rule_start: str ) -> List[str]: """ Do the heavy handling to parse out specific sub-sections of rules. :param comp_rules: Rules to parse :param top_delim: Section to parse :param bottom_delim: Section to cut away :param rule_start: What rules to pull that rule from :return: List of asked for components """ # Keyword actions are found in section XXX comp_rules = comp_rules.split(top_delim)[2].split(bottom_delim)[0] # Windows line endings... yuck valid_line_segments = comp_rules.split("\r\n") # XXX.1 is just a description of what rule XXX includes. # XXX.2 starts the action for _most_ sections keyword_index = 2 return_list: List[str] = [] for line in valid_line_segments: # Keywords are defined as "XXX.# Name" # We will want to ignore subset lines like "XXX.#a" if f"{rule_start}.{keyword_index}" in line: # Break the line into "Rule Number | Keyword" keyword = line.split(" ", 1)[1].lower() return_list.append(keyword) # Get next keyword, so we can pass over the non-relevant lines keyword_index += 1 return sorted(return_list)
5563e1eb7bbe9d738f550a220a733c6c2d11b509
12,019
def fully_connected(input, params): """Creates a fully connected layer with bias (without activation). Args: input (:obj:`tf.Tensor`): The input values. params (:obj:`tuple` of (:obj:`tf.Variable`, :obj:`tf.Variable`)): A tuple of (`weights`, `bias`). Probably obtained by :meth:`fully_connected_params`. Returns: :obj:`tf.Tensor`: The output values. """ weights, bias = params return input @ weights + bias
77815acfe17674bc20035900b75d8e4ddc982855
12,022
def GetIslandSlug(island_url: str) -> str: """该函数接收一个小岛 URL,并将其转换成小岛 slug Args: island_url (str): 小岛 URL Returns: str: 小岛 slug """ return island_url.replace("https://www.jianshu.com/g/", "")
71e1b609e5bd3c703a4d5d0789fddff117969e69
12,023
def get_base_classification(x: str) -> str: """ Obtains the base classification for a given node label. Args: x: The label from which to obtain the base classification. Returns: The base classification. """ return x.split('_', 1)[0]
8122d435af8ac6aef43faab349ee98dca75469d4
12,025
def check_individuals(ped_individuals, vcf_individuals): """ Check if the individuals from ped file is in vcf file Arguments: ped_individuals (iterator): An iterator with strings vcf_individuals (iterator): An iterator with strings Returns: bool: if the individuals exists """ for individual in ped_individuals: if individual not in vcf_individuals: raise IOError("Individuals in PED file must exist in VCF file") # Raise proper exception here return True
e65b24390c8cebff7870e46790cf1c0e9b2d37c6
12,026
import dill def load(filename): """ Load an instance of a bayesloop study class that was saved using the bayesloop.save() function. Args: filename(str): Path + filename to stored bayesloop study Returns: Study instance """ with open(filename, 'rb') as f: S = dill.load(f) print('+ Successfully loaded study.') return S
19ce91d2a4bb552362bd8f8ab67194e1241356d1
12,027
from typing import List def load_grid_from_string(grid_str: str) -> List[List[int]]: """Returns a grid by loading a grid passed as a string.""" array = [int(cell) for cell in grid_str.split()] grid = [[int(cell) for cell in array[i : i + 3]] for i in range(0, len(array), 3)] return grid
c3b9a91c9298b54226f5ef532d1948a41dc67eac
12,032
def fixbackslash(value): """Replace backslashes '\' in encoded polylines for Google Maps overlay.""" return value.replace('\\','\\\\')
20a1e6132c379049e949f50e413c66cf5e67e7dc
12,037
def _filter_tuples(diced_str, to_remove): """ Returns *diced_str* with all of the tuples containing any elements of the *to_remove* iterable filtered out. This is used to drop search terms from the diced_str once they've been matched. For example: # start with the output of the _dice doctest >>> p = [('a', 'b', 'c'), ('a', 'b'), ('b', 'c'), ('a',), ('b',), ('c',)] >>> _filter_tuples(p, ("a")) [('b', 'c'), ('b',), ('c',)] >>> _filter_tuples(p, ("b", "c")) [('a',)] """ # true if the tupl does not contain # any of the elements in *to_remove* def _func(tupl): for x in to_remove: if x in tupl: return False return True return filter(_func, diced_str)
625ca421e3b1ec3dd9f5187fe994ee095eff8d30
12,038
def print_xm_info(xm_dict, name_re): """Print a dictionary of xmethods.""" def get_status_string(m): if not m.enabled: return " [disabled]" else: return "" if not xm_dict: return for locus_str in xm_dict: if not xm_dict[locus_str]: continue print ("Xmethods in %s:" % locus_str) for matcher in xm_dict[locus_str]: print (" %s%s" % (matcher.name, get_status_string(matcher))) if not matcher.methods: continue for m in matcher.methods: if name_re is None or name_re.match(m.name): print (" %s%s" % (m.name, get_status_string(m)))
e2564a5fcb7dc435c3ba1fa71fe82532d2b5083e
12,040
def format_multitask_preds(preds): """ Input format: list of dicts (one per task, named with task_name), each having a 'predictions' list containing dictionaries that represent predictions for each sample. Prediction score is represented by the field {task_name}_score in each of those dicts. Output format: a list of lists of dictionaries, where now each dictionary include scores for all the tasks that were included in the input. """ out = [] score_names = [f"{task['task']}_score" for task in preds[1:]] first_task = preds[0] for sentence_idx, sentence in enumerate(first_task["predictions"]): out_sent = [] for token_idx, token in enumerate(sentence): for task, score in zip(preds[1:], score_names): token[score] = task["predictions"][sentence_idx][token_idx][score] out_sent.append(token) out.append(out_sent) return out
470b23f6a5cc6b8e48ce0becfafd62104e016de8
12,041
def issue_statuses(metric, last_measurement) -> list[dict]: """Return the metric's issue statuses.""" last_issue_statuses = last_measurement.get("issue_status", []) return [status for status in last_issue_statuses if status["issue_id"] in metric.get("issue_ids", [])]
23d2434727fa1b7a2e06fb621347270efd0627fc
12,050
def heroUnit( headline="", tagline="", buttonStyle="primary", buttonText="", buttonHref="#" ): """ *Generate a heroUnit - TBS style* **Key Arguments:** - ``headline`` -- the headline text - ``tagline`` -- the tagline text for below the headline - ``buttonStyle`` -- the style of the button to be used - ``buttonText`` -- the text for the button - ``buttonHref`` -- the anchor link for the button **Return:** - ``heroUnit`` -- the heroUnit """ heroUnit = """ <div class="hero-unit" id=" "> <h1>%(headline)s</h1> <p>%(tagline)s</p> <p> <a href="%(buttonHref)s" class="btn btn-%(buttonStyle)s btn-large"> %(buttonText)s </a> </p> </div>""" % locals() return heroUnit
42826a5d63023b1a87062205255f80a2c85dd0f6
12,052
def build_lines_data(lines) -> bytes: """ Builds the byte string from given lines to send to the server :param lines: :return bytes: """ result = [] for line in lines: result.append(f"{(len(line) + 5 ):04x}".encode()) result.append(line) result.append(b"\n") result.append(b"0000") return b''.join(result)
f5fd900c606b2e44454bbc15cab61e2b34809fab
12,058
def find_folder_on_path(path, target='.git', **kwargs): """ Given a path, find the repo root. The root is considered the top-level folder containing the '.git' folder. This method will traverse up the tree, searching each level. If not `.git` folder is found, None is returned. Otherwise the parent folder path of the `.git` folder is returned. # Parameters path:pathlib.Path - the path to search for the repo root. target:str - The name of the folder to search for. The parent of this folder is considered the root folder. parent folder to be the root folder - Default - '.git' - We'll use the .git folder to identify the parent folder. # Return If the 'target' is found, the parent of 'target' is returned. Otherwise None is returned. """ # construct the search list, we want to search the path and its # parents. search = [path] + list(path.parents) for p in search: if p.joinpath(target).exists(): return p return None
317a061210bca432d128a82a8cc7e27b6aa4feee
12,059
def get_camera_serial_and_firmware(console_text): """ Scraps console text for serial and firmware information of all connected realsense devices. Args: console_text (str): input console text contaning all connected device information Returns: Array[dic]: Array item for each connect devices. """ camera_data = [] for line in console_text.split("\n"): if "serial" in line and "firmware" in line: serial_num = None firmware_num = None for item in line.split(","): if "serial" in item and "update" not in item: # there are two items which have the word serial in them serial_num = item.split(" ")[-1] elif "firmware" in item: firmware_num = item.split(" ")[-1] camera_data.append( {"serial": serial_num, "firmware": firmware_num}) return camera_data
9426522f3815d7e73c12b5081c6308733836528d
12,063
def _get_config_parameter(config, section, parameter_name, default_value): """ Get the parameter if present in the configuration otherwise returns default value. :param config the configuration parser :param section the name of the section :param parameter_name: the name of the parameter :param default_value: the default to propose the user :return: """ return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value
2431c37eab3396b79f4f9e649a7cc25fc27208dc
12,065
def as_escaped_unicode_literal( text: str ) -> str: """Convert the given ``text`` into a string of escaped Unicode hexadecimal. Args: text (:obj:`str`): The string to convert. :rtype: :obj:`str` A string with each character of the given ``text`` converted into an escaped Python literal. Example: >>> from flutils.strutils import as_escaped_unicode_literal >>> t = '1.★ 🛑' >>> as_literal(t) '\\\\x31\\\\x2e\\\\u2605\\\\x20\\\\U0001f6d1' """ out = '' for c in text: c_hex = hex(ord(c))[2:] c_len = len(c_hex) if c_len in (1, 2): out += '\\x{:0>2}'.format(c_hex) elif c_len in (3, 4): out += '\\u{:0>4}'.format(c_hex) else: out += '\\U{:0>8}'.format(c_hex) return out
2fc72bb8875023d86561552e9c64e55f5a99dfc1
12,069
def compute_top_minus_index_spread(mean_returns, market_index): """ Computes the difference between the mean returns of two quantiles. Optionally, computes the standard error of this difference. Parameters ---------- mean_returns : pd.DataFrame DataFrame of mean period wise returns by quantile. MultiIndex containing date and quantile. See mean_return_by_quantile. market_index: pd.DataFrame Returns ------- mean_return_difference : pd.Series """ mean_return_difference = mean_returns.xs(mean_returns.index.levels[0].max(), level='factor_quantile')-market_index # tmp=pd.merge(mean_returns.xs(mean_returns.index.levels[0].max(),level='factor_quantile'), # ) return mean_return_difference
8da2a1c0d316bd78f5f6efdc93dfdac6ee81a2e7
12,070
def pytest_exception_interact(node, call, report): """ Set a different exit code on uncaught exceptions. """ global unhandled_exit exctype, value, traceback = call.excinfo._excinfo if exctype == AssertionError: return report unhandled_exit = node.config.getoption('--unhandled-exc-exit-code')
0206a848c9324fe40fe1875e18a28f1b0e8f9def
12,072
def rivers_with_station(stations): """For a list of MonitoringStation objects, return a set of rivers which have a monitoring station""" rivers = [] for station in stations: rivers.append(station.river) return set(rivers)
a9af1a64acf0b5dbc89cecad83c3f654e77e74a8
12,080
def calculate_tax(income): """Implement the code required to make this function work. Write a function `calculate_tax` that receives a number (`income`) and calculates how much of Federal taxes is due, according to the following table: | Income | Tax Percentage | | ------------- | ------------- | | <= $50,000 | 15% | | <= $75,000 | 25% | | <= $100,000 | 30% | | > $100,000 | 35% | Example: income = 30000 # $30,000 is less than $50,000 calculate_tax(income) # $30,000 * 0.15 = 4500 = $4,500 income = 80000 # $80,000 is more than $75,000 but less than $80,000 calculate_tax(income) # $80,000 * 0.25 = 20000 = $20,000 income = 210000 # $210,000 is more than $100,000 calculate_tax(income) # $210,000 * 0.35 = 73500 = $73,500 """ tax = 0 if income <= 50000: tax += (income *.15) return tax elif income >= 50000 and income <= 75000: tax += (income * .25) return tax elif income >= 75000 and income <= 100000: tax += (income * .30) return tax elif income >= 100000: tax += (income * .35) return tax
fce9ad9ac9d88b1821a1772b6cc5bb9259ba7ae5
12,081
import collections def frequency(input_list): """ Finds the occurances of elements in a list. Works with numbers not consisting of numbers as well. :param input_list: list or tuple >>> frequency([1, 2, 2, 3, 3, 3, 4, 4, 4, 4]) Counter({4: 4, 3: 3, 2: 2, 1: 1}) """ return collections.Counter(input_list)
417dcd493da11f72609e4c4aa3c4edfb35e921aa
12,083
def array_to_dict(result_array, regions): """Convert an array with regions to dict with region as key Arguments --------- result_array : array Results in region_array regions : list List with all regions (order is the same) Returns -------- result_dict : dict reg, value """ result_dict = {} for reg_array_nr, region in enumerate(regions): result_dict[region] = result_array[reg_array_nr] return result_dict
263e69179983eec525bba9f79f1ed02539e1d647
12,084
def string_from_uuid(uuid_obj): """Returns standard hexadecimal string for uuid; same as str(uuid_obj). arguments: uuid_obj (uuid.UUID object): the uuid which is required in hexadecimal string format returns: string (40 characters: 36 lowercase hexadecimals and 4 hyphens) """ return str(uuid_obj)
903f3b257854900f2b534658dbfa4e504812c67b
12,087
def create_setWeight(col: str, weight: str, lemex: str): """ :return str: format of 'setweight(col, weight, lemex)' \n Check postgresql text search setweight function to see the syntax """ return """setweight(%s, '%s', '%s')""" % (col, weight, lemex)
965b25a5ccb8f3c68ecc3742a512dd7c28dac0a4
12,091
from functools import reduce def find_common_set_of_column_names(dfs): """Returns a sorted list of common columns names.""" cols = [set(df.columns) for df in dfs] return sorted(list(reduce(set.intersection, map(set, cols))))
77cf55ace97fbab3ad2e32c3656f5df883b2db7b
12,097
def clean_up_whitespace_in_template_output(text): """Remove some excess whitespace from using Django templates for YAML.""" ret = [] for line in text.split("\n"): # Truly empty lines are likely intentional, so keep them if not line: ret.append("") continue # If the line consists entirely of trailing whitespace, it is likely an # artifact of template tag formatting, so drop it. line = line.rstrip() if not line: continue ret.append(line) text = "\n".join(ret) if not text.endswith("\n"): text += "\n" return text
8428e5575f0a731d79ae796d3f20408e33a344e6
12,105
def mixed_radix_to_base_10(x, b): """Convert the `mixed radix`_ integer with digits `x` and bases `b` to base 10. Args: x (list): a list of digits ordered by increasing place values b (list): a list of bases corresponding to the digits Examples: Generally, the base 10 representation of the mixed radix number :math:`x_n\ldots x_1` where :math:`x_i` is a digit in place value :math:`i` with base :math:`b_i` is .. math:: \sum_{i=1}^nx_i\prod_{j=i+1}^nb_j = x_n + b_nx_{n-1} + b_nb_{n-1}x_{n-2} + \cdots + b_n\cdots b_2x_1 Convert 111 with bases :math:`(b_1,b_2,b_3)=(2,3,4)` to base 10: >>> from fem.discrete.combinatorics import mixed_radix_to_base_10 >>> mixed_radix_to_base_10([1,1,1], [2,3,4]) 17 .. _mixed radix: https://en.wikipedia.org/wiki/Mixed_radix """ res = x[0] for i in range(1, len(x)): res *= b[i] res += x[i] return res
a821ca5ee4a720a9c445c98b2bcd2905bd6d87cb
12,107
def apply_mask_v2(mask: str, address: str) -> str: """ DOCTEST >>> apply_mask_v2(mask='000000000000000000000000000000X1001X', address='000000000000000000000000000000101010') '000000000000000000000000000000X1101X' """ output = "".join([address[i] if mask[i] == "0" else mask[i] for i, _ in enumerate(mask)]) return output
1865b59317ed394bea4ea658f1acfd4277521b59
12,109
def _first_true_index(bools): """ Given a pandas Series of bools, returns the index of the first occurrence of `True`. **Index-based, NOT location-based** I.e., say x = pd.Series({0: False, 2: False, 4: True}), then _first_true_index(x) will return index 4, not the positional index 2. If no value in `bools` is True, returns -1. """ if not bools.any(): return -1 i_true = bools.idxmax() return i_true
4d0cc2d2d3f53f5c73653aeafce7b6128a99d204
12,110
def sum_fibs(limit): """Sums even Fibonacci numbers upto `limit` Args: limit (int) : The limit the sum will be calculated to Returns: int : The sum of even Fibonacci numbers upto `limit` """ previous = 0 current = 1 future = 1 result = 0 while True: future = current + previous if future < limit: if future % 2 == 0: result += future else: break previous = current current = future return result
7e515e46880240f670bfd47fb54ac72768aac841
12,111
def _parse_to_last_comment(comments): """Unpack to get the last comment (hence the -1) or give '' when there is none""" return [(c[-1]['comment'] if hasattr(c, '__len__') else '') for c in comments]
ecde5f3d6df3278c5ac1e600241c1afd2f553a1b
12,116
def converte_int_char(inteiro): """Converte um inteiro para um caractere segundo a tabela ASCII com correção do valor do inteiro, segundo enunciado e uso da função chr(), para que: 0 = '_' = , 1 = 'a' = , 2 = 'b', ..., 26 = 'z' e 27 = '.' Args: inteiro (int): inteiro a ser convertido em caractere Returns: [str]: caractere correspondente ao inteiro """ if inteiro == 27: return '.' elif inteiro == 0: return '_' return chr(inteiro + 96)
e7550dec4aa3e4a82729728befcb566e9d8315d0
12,117
def create_iterable_dataset(torch_transforms_module, pipeline_results): """ Create a PyTorch iterable dataset that loads samples from pipeline results. :param torch_transforms_module: The imported torch.transforms module. :param pipeline_results: Pipeline results iterator. :return: Dataset that has valid PyTorch images saved as tensors and density maps. """ class PipelineDataset: def __init__(self): self.images_and_density_maps = pipeline_results self.image_transform = torch_transforms_module.Compose([ torch_transforms_module.ToTensor() ]) def __iter__(self): for image, density_map in self.images_and_density_maps: yield self.image_transform(image.copy().astype("float32")), density_map.copy().astype("float32") return PipelineDataset()
1de2f8be910da07e1a63e177f59d9bf0467edbe1
12,118
def dtype(request, device): """Run a test case in single and double precision.""" return request.param
8196d76b2edd4bd66253b675f4636b77208f7617
12,119
def get_attr_info(key, convention, normalized): """Get information about the MMD fields. Input ===== key: str MMD element to check convention: str e.g., acdd or acdd_ext normalized: dict a normalized version of the mmd_elements dict (keys are, e.g., 'personnel>organisation>acdd' or 'personnel>organisation>separator') Returns ======= required: int if it is required repetition: str ('yes' or 'no') if repetition is allowed repetition_str: str a longer string representation for use in the DMH (basically a comment) separator: str sign for separating elements that can be repeated (e.g., ',' or ';') default: a default value elements that are required but missing in the netcdf file """ max_occurs_key = key.replace(convention, 'maxOccurs') if max_occurs_key in normalized.keys(): max_occurs = normalized[max_occurs_key] else: max_occurs = '' repetition_allowed = 'yes' if max_occurs not in ['0', '1'] else 'no' min_occurs_key = key.replace(convention, 'minOccurs') if min_occurs_key in normalized.keys(): required = int(normalized[min_occurs_key]) else: required = 0 separator_key = key.replace(convention, 'separator') if separator_key in normalized.keys(): separator = normalized[separator_key] else: separator = '' default_key = key.replace(convention, 'default') if default_key in normalized.keys(): default = normalized[default_key] else: default = '' repetition_key = key.replace(convention, 'repetition') if repetition_key in normalized.keys(): repetition_str = normalized[repetition_key] else: repetition_str = '' return required, repetition_allowed, repetition_str, separator, default
53e6d389935fead65173c3fd66d3608daa9c7ebc
12,122
def toggle_doors(doors, skip): """Toggle every skip door in doors. doors is an array of elements where a false value represents a closed door and a true value represents an open door. If skip is 1, toggle door #1, #2, #3... If skip is 2, toggle door #2, #4, #6... Returns doors """ for i in range(skip-1, len(doors), skip): if doors[i]: doors[i] = 0 else: doors[i] = 1 return doors
7631bbc860c4bfa9a7c236a07ebfdb1092bd351c
12,126
def get_insert_cmd(insert_fields, table, update_cmd=None): """ Creates the insert command to use with an insertion to a PostgreSQL database, given the table and the fields to be inserted Parameters ---------- insert_fields: list of strs Fields of data that will be inserted table: str Name of the table being inserted into update_cmd: Update command to use when there is a conflict on the ID and event keys. If `None`, then defaults to "DO NOTHING" and no update is performed """ if update_cmd is None: update_cmd = "DO NOTHING" insert_str = ','.join(insert_fields) insert_cmd = f"INSERT INTO {table} ({insert_str}) VALUES %s ON CONFLICT (id,event) {update_cmd}" json_array_fields = {'urls', 'description_urls'} template_strs = [] for f in insert_fields: if f in json_array_fields: s = f"%({f})s::jsonb[]" else: s = f"%({f})s" template_strs.append(s) template_str = ','.join(template_strs) template = f"({template_str})" return insert_cmd,template
d026911c0574bc2f8a52ede5bbb72b48d81272f8
12,132
def next_power_2(x: int) -> int: """Return the smallest power of 2 greater than or equal to x""" return 1 << (x-1).bit_length()
f5361250c1a6ef8228adcfd0aeab478484e7239b
12,137
def security_group_exists(self, sg_id=None, name=None): """ Checks if a security group already exists on this connection, by name or by ID. :param boto.ec2.EC2Connection self: Current connection. :param string sg_id: ID of the security group to check. Default : None. :param string name: Name of the security group to check. Default : None. :return: True if the security group is present, False otherwise :rtype: bool """ if sg_id: return sg_id in [sg.id for sg in self.get_all_security_groups()] elif name: return name in [sg.name for sg in self.get_all_security_groups()]
042cd05a550b139b3441613269f59a27ad028ea1
12,138
def to_int(x, error=0): """Convert argument to int.""" try: return int(x) except (ValueError, TypeError): return error
2c363a1d9125e396a76007d9986748b98130e1ab
12,142
def apply_format(data_frame, column_names, format_method): """Apply a formatting function to a DataFrame column and return. Simplify applying format modifications to the data stored in columns of `data_frame`. Check if the parameters are of the right type, apply `format_method` to the columns of `data_frame` whose labels are passed in `column names`. Return the DataFrame with the applied changes. Parameters ---------- data_frame : pandas.DataFrame DataFrame containing the data to be modified. column_names : list List of string labels of columns in `data_frame` to be modified. format_method : function Function to be applied to the columns of `data_frame`, whose labels are listed in `column_names`. Returns ------- data_frame : pandas.DataFrame The passed in DataFrame with the formatting changes applied to its columns. See Also -------- pandas.apply Examples -------- >>> data = pd.read_csv("data.csv") >>> print(data[['Wage']][0:3]) #print first few lines Wage 0 €565K 1 €405K 2 €290K >>> data = apply_format(data, ['Wage'], money_format) >>> print(data[['Wage']][0:3]) Wage 0 565 1 405 2 290 """ for column in column_names: if isinstance(column, str) and (column in data_frame) and callable(format_method): data_frame.loc[:, column] = data_frame[column].apply(format_method) return data_frame
af8f09d57e1f48da79c576ae542bfc5cc6cd837b
12,143
def safe_execute(default, exception, function, *args): """ Inline Try/Except Parameters ---------- default : Object value returned in case of failure exception : Exception type of exception you want to catch function : function the function to execute args argument(s) of the function >>> def foo(x,y):return x/y >>> safe_execute("What did you expect !",ZeroDivisionError,foo,12,0) 'What did you expect !' >>> safe_execute("What did you expect !",ZeroDivisionError,foo,12,3) 4 """ try: return function(*args) except exception: return default
aae61410c96741985cc2e03786bbc5c69ad80fa8
12,149
def linear(x: float, target: float, span: float, symmetric = False) -> float: """Create a linearly sloped reward space. Args: x (float): Value to evaluate the reward space at. target (float): The value s.t. when x == target, this function returns 1. span (float): The value s.t. when x >= target + span, this function returns 0. symmetric (bool, optional): If true, then this function works if x is over or under target. Defaults to False. Returns: float: A value between 0 and 1. x == target evaluates to 1, x >= target + span evaluates to 0. Every value in between is evalulated as the linear interpolation between `target` and `span`. """ if span == 0: return 1. if x == target else 0. x_delta = x - target if abs(x_delta) > abs(span): return 0. ratio = x_delta / span if not symmetric and ratio < 0: return 0 return 1 - abs(ratio)
b443d42ec2686830668d1287db9a8e8fda3f5df1
12,154
def first_non_empty(items): """ Return first non empty item from the list. If nothing is found, we just pick the first item. If there is no item, we return the whole form (defaulting to []). """ if items: for it in items: if it: return it return items[0] else: return items
93dce672b5a28c094b8916c535aac2ae3622e890
12,159
def add_2_numbers(a, b): """ Assume your function returns the addition of 2 numbers """ return a + b
fbc3a89fb16334594914ce9cdd388d240effc7ea
12,160
def dst_main_directory(library: str) -> str: """ Main directory for report files resulting from the reconciliation process. """ return f"./files/{library}"
cea4ea7bcd2c37fd97302cb02185ac2090413a4d
12,172
import torch def compute_scores(users_embeds: torch.Tensor, items_embeds: torch.Tensor, items_bias: torch.Tensor) -> torch.Tensor: """ Args: users_embeds(torch.Tensor): shape (batch_size, items_total, embed_dim) items_embeds(torch.Tensor): shape (items_total, embed_dim) items_bias(torch.Tensor): shape (items_total) Returns: scores(torch.Tensor): shape (batch_size, items_total) """ scores = (users_embeds * items_embeds).sum(-1) + items_bias return scores
8528963a23efef270b467ec6f039b5a8733d3b4f
12,176
def extract_fplus_include_file(code_line): """ Extracts the included file path from an include statement """ return ( code_line .replace("#include <fplus/", "") .replace("#include \"", "") .replace("\"", "") .replace(">", "")[:-1] )
c969fcb633332b05aec93f2998065d34b2d99c2f
12,178
def split_byte(x): """Split byte into groups of bits: (2 bits, 3 bits, 3 bits)""" return x >> 6, x >> 3 & 7, x & 7
7743fdf78c201dce66803ae0eb62cbdf58cccc7d
12,183
def _is_transition_allowed(from_tag: str, from_entity: str, to_tag: str, to_entity: str): """ BIO 是否被允许转移。比如: "B-Per" "I-Per" 这是被允许的; 而 "B-Per" "I-Loc" 或者 "O", "I-Per" 这是不被允许的 :param from_tag: The tag that the transition originates from. For example, if the label is ``I-PER``, the ``from_tag`` is ``I``. :param from_entity: The entity corresponding to the ``from_tag``. For example, if the label is ``I-PER``, the ``from_entity`` is ``PER``. :param to_tag: The tag that the transition leads to. For example, if the label is ``I-PER``, the ``to_tag`` is ``I``. :param to_entity: The entity corresponding to the ``to_tag``. For example, if the label is ``I-PER``, the ``to_entity`` is ``PER``. :return: True: 该转移是被允许的; False: 该转移是不被允许的。 """ if to_tag == "START" or from_tag == "END": # Cannot transition into START or from END return False if from_tag == "START": return to_tag in ('O', 'B') if to_tag == "END": return from_tag in ('O', 'B', 'I') return any([ # Can always transition to O or B-x to_tag in ('O', 'B'), # Can only transition to I-x from B-x or I-x to_tag == 'I' and from_tag in ('B', 'I') and from_entity == to_entity ])
dbbef187e9444eb11b95b4a0bf84d29ccf604bcd
12,187
def _next_power_of_2(x): """ Calculate the closest power of 2, greater than the given x. :param x: positive integer :return: int - the closest power of 2, greater than the given x. """ return 1 if x == 0 else 2**(x - 1).bit_length()
616c01f6aacb7442ce1b2afbcac35b26c8f79701
12,191
def set_bdev_options(client, bdev_io_pool_size=None, bdev_io_cache_size=None): """Set parameters for the bdev subsystem. Args: bdev_io_pool_size: number of bdev_io structures in shared buffer pool (optional) bdev_io_cache_size: maximum number of bdev_io structures cached per thread (optional) """ params = {} if bdev_io_pool_size: params['bdev_io_pool_size'] = bdev_io_pool_size if bdev_io_cache_size: params['bdev_io_cache_size'] = bdev_io_cache_size return client.call('set_bdev_options', params)
1eaa1403a45845a3d742438cb9ff5f8b408f0684
12,197
def getGeolocalisationFromJson(image): """Get geolocalisation data of a image in the database Parameters: image (json): image from the validation data Returns: float: lng (degree) float: lat (degree) float: alt (degree) float: azimuth (degree) float: tilt (degree) float: roll (degree) float: focal (pixel) array: gcps int: image width int: image height """ lng = float(image['lng']) lat = float(image['lat']) alt = float(image['alt']) azimuth = float(image['azimuth'])%360 tilt= float(image['tilt'])%360 roll = float(image['roll'])%360 focal = float(image['focal']) gcps = image['gcp_json'] width = float(image['width']) height = float(image['height']) return lng, lat, alt, azimuth, tilt, roll, focal, gcps, width, height
4ea02780b4254dfb03ac335ca1214a7b7d6bf521
12,203
def fixquotes(u): """ Given a unicode string, replaces "smart" quotes, ellipses, etc. with ASCII equivalents. """ if not u: return u # Double quotes u = u.replace('\u201c', '"').replace('\u201d', '"') # Single quotes u = u.replace('\u2018', "'").replace('\u2019', "'") # Endash u = u.replace('\u2014', '--') # Ellipses u = u.replace('\u2026', '...') return u
28f8ff0068b28ae0cca60ac850c89403bc347346
12,207
def render_hunspell_word_error( data, fields=["filename", "word", "line_number", "word_line_index"], sep=":", ): """Renders a mispelled word data dictionary. This function allows a convenient way to render each mispelled word data dictionary as a string, that could be useful to print in the context of spell checkers command line interfaces. Args: data (dict): Mispelled word data, as it is yielded by the method :py:meth:`hunspellcheck.HunspellChecker.check`. fields (list): List of fields to include in the response. sep (str): Separator string between each field value. Returns: str: Mispelled word data as a string. """ values = [] for field in fields: value = data.get(field) if value is not None: values.append(str(value)) return (sep).join(values)
9bbefd0b998abe25d0a977adfe69ef51185cde37
12,209
def int2lehex(value, width): """ Convert an unsigned integer to a little endian ASCII hex string. Args: value (int): value width (int): byte width Returns: string: ASCII hex string """ return value.to_bytes(width, byteorder='little').hex()
1ce9bb9447236c36bb906560c65ffd8e058c5aa4
12,211
import socket import struct def ip4_from_int(ip): """Convert :py:class:`int` to IPv4 string :param ip: int representing an IPv4 :type ip: int :return: IP in dot-decimal notation :rtype: str """ return socket.inet_ntoa(struct.pack(">L", ip))
94091bd650cf15bb216e82072478e73180c0027c
12,214
def overlap(start_1, end_1, start_2, end_2): """Return the `range` covered by two sets of coordinates. The coordinates should be supplied inclusive, that is, the end coordinates are included in the region. The `range` returned will be exclusive, in keeping with the correct usage of that type. Parameters ---------- start_1, start_2 : int The start coordinates of each region. end_1, end_2 : int The end coordinates of each region. Returns ------- range The `range` covered by both regions. If there is no overlap then start of the range will be equal to or greater than the end, thus having zero or negative length. """ return range(max(start_1, start_2), min(end_1, end_2) + 1)
0bf76a98feaf94fffa2a13eb74f2a16e4fafe350
12,215
def issubset(a, b): """Determines if a exists in b Args: a: sequence a b: sequence b Returns: bool: True or False """ return set(a).issubset(set(b))
39e3c974cb2f3bc3ecfe17589292646f7a1a3383
12,217
def random_explorer_player(bot, state): """ Least visited random player. Will prefer moving to a position it’s never seen before. """ if not bot.turn in state: # initialize bot state[bot.turn] = { 'visited': [] } if bot.position in state[bot.turn]['visited']: state[bot.turn]['visited'].remove(bot.position) state[bot.turn]['visited'].insert(0, bot.position) # possible candidates positions = bot.legal_positions[:] # go through all visited positions and remove them # from our candidate list for pos in state[bot.turn]['visited']: if len(positions) == 1: # only one position left, we’ll take it return positions[0] if len(positions) == 0: return bot.position if pos in positions: positions.remove(pos) # more than one move left return bot.random.choice(positions)
95b05d749b9fa7994ddb0b1e6adae5ef5c3b25ea
12,219
def pop(obj, key=0, *args, **kwargs): """Pop an element from a mutable collection. Parameters ---------- obj : dict or list Collection key : str or int Key or index default : optional Default value. Raise error if not provided. Returns ------- elem Popped element """ if isinstance(obj, dict): return obj.pop(key, *args, **kwargs) else: try: val = obj[key] del obj[key] return val except: if len(args) > 0: return args[0] else: return kwargs.get('default')
71222b35f52a1ee118a352596caefebe9b7070fa
12,224
import re def normalize_text(text): """ Strips formating spaces/tabs, carriage returns and trailing whitespace. """ text = re.sub(r"[ \t]+", " ", text) text = re.sub(r"\r", "", text) # Remove whitespace in the middle of text. text = re.sub(r"[ \t]+\n", "\n", text) # Remove whitespace at the end of the text. text = text.rstrip() return text
aec04cc84aa91e16ca0f8ac18530813a6de3c187
12,230
def standardize_sizes(sizes): """ Removes trailing ones from a list. Parameters ---------- sizes: List A list of integers. """ while (sizes[-1] == 1) and len(sizes)>2: sizes = sizes[0:-1] return sizes
8af9a1589d2cf1fe4f906839daa88de5bf9987c8
12,234
def qpm_to_bpm(quarter_note_tempo, numerator, denominator): """Converts from quarter notes per minute to beats per minute. Parameters ---------- quarter_note_tempo : float Quarter note tempo. numerator : int Numerator of time signature. denominator : int Denominator of time signature. Returns ------- bpm : float Tempo in beats per minute. """ if not (isinstance(quarter_note_tempo, (int, float)) and quarter_note_tempo > 0): raise ValueError( 'Quarter notes per minute must be an int or float ' 'greater than 0, but {} was supplied'.format(quarter_note_tempo)) if not (isinstance(numerator, int) and numerator > 0): raise ValueError( 'Time signature numerator must be an int greater than 0, but {} ' 'was supplied.'.format(numerator)) if not (isinstance(denominator, int) and denominator > 0): raise ValueError( 'Time signature denominator must be an int greater than 0, but {} ' 'was supplied.'.format(denominator)) # denominator is whole, half, quarter, eighth, sixteenth or 32nd note if denominator in [1, 2, 4, 8, 16, 32]: # simple triple if numerator == 3: return quarter_note_tempo * denominator / 4.0 # compound meter 6/8*n, 9/8*n, 12/8*n... elif numerator % 3 == 0: return quarter_note_tempo / 3.0 * denominator / 4.0 # strongly assume two eighths equal a beat else: return quarter_note_tempo * denominator / 4.0 else: return quarter_note_tempo
e32614978b3632255963e84bb48c8f1fb14e82d1
12,236
def format_review(__, data): """Returns a formatted line showing the review state and its reference tags.""" return ( "<li>[{state}] <a href='{artist_tag}/{album_tag}.html'>" "{artist} - {album}</a></li>\n" ).format(**data)
ab13b83c7ec317b23a7bdcc8e1d205fe2b77afbe
12,241
def get_field_locs(working_data, fields): """ Finds fields index locations. Parameters ---------- working_data: list-like the working data fields: list-like the field names Outputs ------- field_locs: dict field-index pairs """ field_locs = {x:i for i,x in enumerate(working_data) if x in fields} not_found = set(fields) - set(field_locs) if not_found: raise Exception(f'Missing fields {not_found}') return field_locs
884c55211e9129222763b04c2e5cfc9ea4e371a3
12,243
def add_category_to_first(column, new_category): """Adds a new category to a pd.Categorical object Keyword arguments: column: The pd.Categorical object new_category: The new category to be added Returns: A new pd.Categorical object that is almost the same as the given one, except for a new category is added (if it is not already included in the original object). The new category is added to first in the categories list. """ if column.dtype.name != "category": raise Exception("Object is not a pandas.Categorical") if new_category in column.cat.categories: raise Exception("%s is already in categories list" % new_category) column = column.copy() column = column.cat.add_categories(new_category) cat = column.cat.categories.tolist() cat = cat[0:-1] cat.insert(0, new_category) column = column.cat.reorder_categories(cat) return column
78c2bb09e439bd941653e599f1c872e858195239
12,244
from pathlib import Path def x2char_star(what_to_convert, convert_all=False): """ Converts `what_to_convert` to whatever the platform understand as char*. For python2, if this is unicode we turn it into a string. If this is python3 and what you pass is a `str` we convert it into `bytes`. If `convert_all` is passed we will also convert non string types, so `1` will be `b'1'` and `True` will be true """ if isinstance(what_to_convert, Path): return str(what_to_convert).encode() elif isinstance(what_to_convert, bytes): return what_to_convert elif isinstance(what_to_convert, str): return what_to_convert.encode() elif convert_all: if isinstance(what_to_convert, bool): return str(what_to_convert).lower().encode() return repr(what_to_convert).encode() else: return what_to_convert
a612063372d22e5ef310356c74981b6e5f8f12bd
12,246
def require_one_of(_return=False, **kwargs): """ Validator that raises :exc:`TypeError` unless one and only one parameter is not ``None``. Use this inside functions that take multiple parameters, but allow only one of them to be specified:: def my_func(this=None, that=None, other=None): # Require one and only one of `this` or `that` require_one_of(this=this, that=that) # If we need to know which parameter was passed in: param, value = require_one_of(True, this=this, that=that) # Carry on with function logic pass :param _return: Return the matching parameter :param kwargs: Parameters, of which one and only one is mandatory :return: If `_return`, matching parameter name and value :rtype: tuple :raises TypeError: If the count of parameters that aren't ``None`` is not 1 """ # Two ways to count number of non-None parameters: # # 1. sum([1 if v is not None else 0 for v in kwargs.values()]) # # Using a list comprehension instead of a generator comprehension as the # parameter to `sum` is faster on both Python 2 and 3. # # 2. len(kwargs) - kwargs.values().count(None) # # This is 2x faster than the first method under Python 2.7. Unfortunately, # it doesn't work in Python 3 because `kwargs.values()` is a view that doesn't # have a `count` method. It needs to be cast into a tuple/list first, but # remains faster despite the cast's slowdown. Tuples are faster than lists. count = len(kwargs) - tuple(kwargs.values()).count(None) if count == 0: raise TypeError( "One of these parameters is required: " + ', '.join(kwargs.keys()) ) if count != 1: raise TypeError( "Only one of these parameters is allowed: " + ', '.join(kwargs.keys()) ) if _return: keys, values = zip(*((k, 1 if v is not None else 0) for k, v in kwargs.items())) k = keys[values.index(1)] return k, kwargs[k]
294bbb73136fd722a3b998881ef491977c2d0639
12,249
def delDotPrefix(string): """Delete dot prefix to file extension if present""" return string[1:] if string.find(".") == 0 else string
792d4f72dcbc641de5c3c11f68a78c73fa0d9ecd
12,253
from typing import TextIO from typing import Optional def next_line(fh: TextIO) -> Optional[str]: """ Return the next line from a filehandle """ try: return next(fh).rstrip() except StopIteration: return None
92077359720b6ac72637943d86c7ab6250158194
12,255
def dectobin(x): """Convert Decimal to Binary. Input is a positive integer and output is a string.""" ans = '' while x > 1: tmp = x % 2 ans = str(tmp) + ans x = x // 2 ans = str(x) + ans return ans
21a6c0c161088f1e73afe87707503667b4651c87
12,257
def drop_field(expr, field, *fields): """Drop a field or fields from a tabular expression. Parameters ---------- expr : Expr A tabular expression to drop columns from. *fields The names of the fields to drop. Returns ------- dropped : Expr The new tabular expression with some columns missing. Raises ------ TypeError Raised when ``expr`` is not tabular. ValueError Raised when a column is not in the fields of ``expr``. See Also -------- :func:`blaze.expr.expressions.projection` """ to_remove = set((field,)).union(fields) new_fields = [] for field in expr.fields: if field not in to_remove: new_fields.append(field) else: to_remove.remove(field) if to_remove: raise ValueError( 'fields %r were not in the fields of expr (%r)' % ( sorted(to_remove), expr.fields ), ) return expr[new_fields]
3007a46d3a0a44f47e10ead1eb0ab3a0ff5be44c
12,259
def check_index_in_list(name_list, index): """ Returns whether or not given index exists in a list of names For example: name_list: ['a', 'b'] 0 exists, 'a', so we return True 1 exists, 'b', so we return True -1 exists, 'b', so we return True 2 does not exists, we return False :param name_list: list(str), name split in components :param index: int, positive or negative index number :return: bool, Whether given index exists or not in names list """ list_length = len(name_list) if index < 0: check_length = abs(index) else: check_length = index + 1 if check_length > list_length: return False return True
efedd7511377e29cc28ea4212271a4b6b59cb3b2
12,269
import enum def pascal_case(value): """Convert strings or enums to PascalCase""" if isinstance(value, enum.Enum): value = value.name return value.title().replace('_', '')
6e994650755968a3f73b345e1f3e040f0f211aa9
12,273
def format_timedelta(td): """ Format a timedelta object with the format {hours}h {minutes}m. """ if td is None: return '0h 0m' seconds = td.total_seconds() hours, seconds = divmod(seconds, 3600) minutes = seconds / 60 return f'{int(hours)}h {int(minutes)}m'
e0b30250eef25db9b905e9e6d6c82a41b742112b
12,274
def common_text(stringlist, kind='prefix'): """ For a list of strings find common prefix or suffix, returns None if no common substring of kind is not 'prefix' or 'suffix' :param stringlist: a list of strings to test :param kind: string, either 'prefix' or 'suffix' :return: """ substring = stringlist[0] if kind == 'prefix': # loop around strings in list (except first) for _str in stringlist[1:]: # while substring is not equal in first and Nth shorten while _str[:len(substring)] != substring and len(substring) != 0: substring = substring[:len(substring)-1] # test for blank string if len(substring) == 0: break elif kind == 'suffix': # loop around strings in list (except first) for _str in stringlist[1:]: # while substring is not equal in first and Nth shorten while _str[-len(substring):] != substring and len(substring) != 0: substring = substring[1:] # test for blank string if len(substring) == 0: break # if prefix or suffix is the same as all in list return None - there # is no prefix if substring == stringlist[0]: return None # else return the substring else: return substring
9a85120006abb5483f3a0afd4f8df63f547eb817
12,282
import copy def merge_dicts(dict1, dict2, make_copy=True): """ Recursively merges dict2 with dict1. Otherwise if both dict1 and dict2 have a key, it is overwritten by the value in dict2. """ if make_copy: dict1 = copy.copy(dict1) for key, value in dict2.iteritems(): if isinstance(dict1.get(key), dict) and isinstance(value, dict): value = merge_dicts(dict1[key], value, make_copy=False) dict1[key] = value return dict1
4da004649b0abacb5213d5a13ea44e2300c88bd3
12,291
def layout_bounding_boxes(canvas_x, canvas_y, canvas_width, line_height, space_widths, y_space, sizes): """Layout token bounding boxes on canvas with greedy wrapping. Args: canvas_x: x coordinate of top left of canvas canvas_y: y coordinate of top left of canvas canvas_width: width of canvas line_height: height for each line space_widths: width for space between each word y_space: extra space between lines sizes: list of width,height tuples of sizes for each word Returns: boxes: 4-tuple bounding box for each word line_breaks: token index for each line break line_poss: x,y positions starting each line """ cur_x, cur_y = canvas_x, canvas_y cur_size = 0 cur_line = 0 boxes = [] line_breaks = [] line_poss = [] line_poss.append((cur_x, cur_y)) while cur_size < len(sizes): sz = sizes[cur_size] if cur_x + sz[0] > canvas_width + canvas_x: cur_line += 1 cur_y = canvas_y - cur_line * (y_space + line_height) cur_x = canvas_x line_poss.append((cur_x, cur_y)) line_breaks.append(cur_size) else: boxes.append((cur_x, cur_y, sz[0], sz[1])) cur_x += sz[0] if cur_size < len(space_widths): cur_x += space_widths[cur_size] cur_size += 1 return boxes, line_breaks, line_poss
057eb7976849444efa27d51d2ec1209e0f95eaa8
12,297
def _sanitize_bin_name(name): """ Sanitize a package name so we can use it in starlark function names """ return name.replace("-", "_")
37e9a09cf60f83c087734b74ccf0ba7d3c46fea6
12,308
def _line(x, a, b): """ Dummy function used for line fitting :param x: independent variable :param a, b: slope and intercept """ return a*x + b
e883689fb39c51064b1f1e0a34d1ab03cc11efb9
12,309
def normalise_line_tail(line): """Replace white-space characters at the end of the line with a newline character""" return line.rstrip() + '\n'
25c22c0c39a73d5b9a449f202433f77bd0e1bb3b
12,313
import codecs def load_vocabulary(vocabulary_path, reverse=False): """Load vocabulary from file. We assume the vocabulary is stored one-item-per-line, so a file: d c will result in a vocabulary {"d": 0, "c": 1}, and this function may also return the reversed-vocabulary [0, 1]. Args: vocabulary_path: path to the file containing the vocabulary. reverse: flag managing what type of vocabulary to return. Returns: the vocabulary (a dictionary mapping string to integers), or if set reverse to True the reversed vocabulary (a list, which reverses the vocabulary mapping). Raises: ValueError: if the provided vocabulary_path does not exist. """ rev_vocab = [] with codecs.open(vocabulary_path, "r", "utf-8") as vocab_file: rev_vocab.extend(vocab_file.readlines()) rev_vocab = [line.strip() for line in rev_vocab] if reverse: return rev_vocab else: return dict([(x, y) for (y, x) in enumerate(rev_vocab)])
0bbe55a64657f53a66df7f55b3e85dcb579593a5
12,317
def alignments_to_report(alignments): """Determine which alignments should be reported and used to call variants. In the simplest and best case, there is only a single alignment to consider. If there is more than one alignment, determine which ones are interpretable as a variant, and of these return the alignment(s) with the optimal score. """ if len(alignments) <= 1: return alignments scrtbl = [aln for aln in alignments if aln.vartype is not None] if len(scrtbl) == 0: finallist = alignments else: finallist = scrtbl bestscore = max([aln.score for aln in finallist]) aligns2report = [aln for aln in finallist if aln.score == bestscore] return aligns2report
0a8adfb3146ffee4ac8272e5e515fb75ad2f13b4
12,318
def tag_repr(tag): """String of tag value as (0xgggg, 0xeeee)""" return "(0x{group:04x}, 0x{elem:04x})".format(group=tag.group, elem=tag.element)
058f40824c85b834ce759ae1d01275c718a438c6
12,321