content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def OpenCredentials(cred_path: str): """ Opens and parses an AWS credentials file. :param cred_path: Path to the file containing the credentials :return: A dict containing the credentials """ with open(cred_path) as file: keys, values = map(lambda s: s.strip().split(','), file) credentials = dict(zip(keys, values)) return credentials
2f224a92b6c3999a45f6d73bb90504663614a1ac
705,465
def pkcs7_unpad_strict(data, block_size=16): """Same as `pkcs7_unpad`, but throw exception on incorrect padding. Mostly used to showcase the padding oracle attack. """ pad = data[-1] if ord(pad) < 1 or ord(pad) > block_size: raise Exception('Invalid padding length') for i in range(2, ord(pad)+1): if data[-i] != pad: raise Exception('Invalid padding character') return data[:-ord(pad)]
0cb7c2d66c30de8bac54ca714dfa05a29d4f0cbd
705,466
def _remove_trailing_string(content, trailing): """ Strip trailing component `trailing` from `content` if it exists. Used when generating names from view classes. """ if content.endswith(trailing) and content != trailing: return content[:-len(trailing)] return content
775bafba5ea518e03499c9351b74ac472c265c9a
705,467
def find_loop_size( public_key, subject=7 ): """ To transform a subject number, start with the value 1. Then, a number of times called the loop size, perform the following steps: - Set the value to itself multiplied by the subject number. - Set the value to the remainder after dividing the value by 20201227 After the desired loop size, the subject number 7 is transformed into the public key itself. """ loops = 0 value = 1 while value != public_key: loops += 1 value *= subject value = value % 20201227 return loops
831f5f3e9867b06640493226fa35a89251f5aad5
705,468
def make_user_variable( id_name, cluster_name, w_name, d_name, y_tree_name, y_name, x_name_ord, x_name_unord, x_name_always_in_ord, z_name_list, x_name_always_in_unord, z_name_split_ord, z_name_split_unord, z_name_mgate, z_name_amgate, x_name_remain_ord, x_name_remain_unord, x_balance_name_ord, x_balance_name_unord): """Put variable names in dictionary.""" def check_none(name): if name is None: return [] return name variable_dict = {'id_name': check_none(id_name), 'cluster_name': check_none(cluster_name), 'w_name': check_none(w_name), 'd_name': check_none(d_name), 'y_tree_name': check_none(y_tree_name), 'y_name': check_none(y_name), 'x_name_ord': check_none(x_name_ord), 'x_name_unord': check_none(x_name_unord), 'x_name_always_in_ord': check_none(x_name_always_in_ord), 'z_name_list': check_none(z_name_list), 'x_name_always_in_unord': check_none( x_name_always_in_unord), 'z_name_ord': check_none(z_name_split_ord), 'z_name_unord': check_none(z_name_split_unord), 'z_name_mgate': check_none(z_name_mgate), 'z_name_amgate': check_none(z_name_amgate), 'x_name_remain_ord': check_none(x_name_remain_ord), 'x_name_remain_unord': check_none(x_name_remain_unord), 'x_balance_name_ord': check_none(x_balance_name_ord), 'x_balance_name_unord': check_none(x_balance_name_unord), } return variable_dict
d7f9f85a75df28e1db7f3dee71d625bbe99c6106
705,469
import re def get_skip_report_step_by_index(skip_report_list): """Parse the missed step from skip a report. Based on the index within the skip report file (each line a report), the missed step for this entry gets extracted. In case no step could be found, the whole entry could not been parsed or no report for this index exists, the step is 'None'. """ def extract_step(index): skip_report_entry = ( skip_report_list[index] if index < len(skip_report_list) else "" ) step_findings = re.findall( "^([0-9]+),0x[0-9,a-f]+,[0-9,-]+ [0-9,:]+$", skip_report_entry.strip() ) step = int(step_findings[0]) if len(step_findings) == 1 else None return step return extract_step
7aa46050702aba07902ceec586175fce2226e1e3
705,470
def _prefix_with_swift_module(path, resource_info): """Prepends a path with the resource info's Swift module, if set. Args: path: The path to prepend. resource_info: The resource info struct. Returns: The path with the Swift module name prepended if it was set, or just the path itself if there was no module name. """ swift_module = resource_info.swift_module if swift_module: return swift_module + "-" + path return path
f2a12f59a3c30c09fa20d65b806779ad47f49b90
705,471
def parse_repeating_time_interval_to_days(date_str): """Parsea un string con un intervalo de tiempo con repetición especificado por la norma ISO 8601 en una cantidad de días que representa ese intervalo. Devuelve 0 en caso de que el intervalo sea inválido. """ intervals = {'Y': 365, 'M': 30, 'W': 7, 'D': 1, 'H': 0, 'S': 0} if date_str.find('R/P') != 0: # Periodicity mal formada return 0 date_str = date_str.strip('R/P') days = 0 index = 0 for interval in intervals: value_end = date_str.find(interval) if value_end < 0: continue try: days += int(float(date_str[index:value_end]) * intervals[interval]) # Valor de accrualPeriodicity inválido, se toma como 0 except ValueError: continue index = value_end # Si el número de días es menor lo redondeamos a 1 return max(days, 1)
c417c0fc971ae0c94f651634ef5fb27f1accff24
705,472
def split_rdd(rdd): """ Separate a rdd into two weighted rdds train(70%) and test(30%) :param rdd """ SPLIT_WEIGHT = 0.7 (rdd_train, rdd_test) = rdd.randomSplit([SPLIT_WEIGHT, 1 - SPLIT_WEIGHT]) return rdd_train, rdd_test
082439fb41108da171610dc3d03ab1f8f9f021c5
705,474
def QuickSort(A, l, r): """ Arguments: A -- total number list l -- left index of input list r -- right index of input list Returns: ASorted -- sorted list cpNum -- Number of comparisons """ # Number of comparisons cpNum = r - l # Base case if cpNum == 0: return [A[l]], 0 elif cpNum < 0: return [], 0 # Partition part A[l], A[r] = A[r], A[l] # Swap the first and the last element p = A[l] i = l + 1 for j in range(l + 1, r + 1): if A[j] < p: A[j], A[i] = A[i], A[j] i += 1 A[l], A[i-1] = A[i-1], A[l] # Recursion call ALeft, cpNumLeft = QuickSort(A, l, i-2) ARight, cpNumRight = QuickSort(A, i, r) ASorted = ALeft + [p] + ARight cpNum = cpNum + cpNumLeft + cpNumRight return ASorted, cpNum
26092d222d93d8b931ab6f2d5c539ac5c9e00b2f
705,475
from typing import Counter def checksum(input): """ Checksum by counting items that have duplicates and/or triplicates and multiplying""" checksum_twos = 0 checksum_threes = 0 for id in input: c = [v for k,v in Counter(id).items()] if 2 in c: checksum_twos += 1 if 3 in c: checksum_threes += 1 return checksum_threes * checksum_twos
8ba72e795b5868a852ce0c9c234ca35088057538
705,476
def extract_kernel_version(kernel_img_path): """ Extracts the kernel version out of the given image path. The extraction logic is designed to closely mimick the logic Zipl configuration to BLS conversion script works, so that it is possible to identify the possible issues with kernel images. :param str kernel_img_path: The path to the kernel image. :returns: Extracted kernel version from the given path :rtype: str """ # Mimick bash substitution used in the conversion script, see: # https://github.com/ibm-s390-linux/s390-tools/blob/b5604850ab66f862850568a37404faa647b5c098/scripts/zipl-switch-to-blscfg#L168 if 'vmlinuz-' in kernel_img_path: fragments = kernel_img_path.rsplit('/vmlinuz-', 1) return fragments[1] if len(fragments) > 1 else fragments[0] fragments = kernel_img_path.rsplit('/', 1) return fragments[1] if len(fragments) > 1 else fragments[0]
2f75b220ff3e68b8c2ae2a046b7c604a786b05b8
705,477
def concat_strings(string_list): """ Concatenate all the strings in possibly-nested string_list. @param list[str]|str string_list: a list of strings @rtype: str >>> list_ = (["The", "cow", "goes", "moo", "!"]) >>> concat_strings(list_) 'The cow goes moo !' >>> list_ = (["This", "sentence", "is actually", \ "constructed", ["from", ["other"], "smaller"], "strings"]) >>> concat_strings(list_) 'This sentence is actually constructed from other smaller strings' """ if isinstance(string_list, str): # string_list is a str return string_list else: return " ".join([concat_strings(elem) for elem in string_list])
bbeb884e2cd4c689ce6e61c147558c993acc5f09
705,478
def match(input_character, final_answer): """ :param input_character: str, allow users to input a string that will be verified whether there are any matches with the final answer. :param final_answer: str, the final answer. :return: str, return the matching result that could consist of '-' and letters. """ result = "" for f in final_answer: if f == input_character: result += input_character else: result += '-' if final_answer.find(input_character) != -1: print('You are correct!') else: print('There is no ' + input_character + '\'s in the word.') return result
4323cd2eefa00126baad11576cdc9a29fe94ec0b
705,479
import argparse def create_parser(args): """ Function which add the command line arguments required for the cyclomatic complexity report parser""" # Create the parser cyclo_parser = argparse.ArgumentParser(description='cyclomatic complexity gate Parser') # Add the arguments cyclo_parser.add_argument('--cyclo', metavar='--c', type=int, help='cyclo benchmark') return cyclo_parser.parse_args(args)
46ddafdf458c20d323bc86974525f91320a33ae3
705,480
import calendar def get_month_day_range(date): """ For a date 'date' returns the start and end date for the month of 'date'. Month with 31 days: >>> date = datetime.date(2011, 7, 27) >>> get_month_day_range(date) (datetime.date(2011, 7, 1), datetime.date(2011, 7, 31)) Month with 28 days: >>> date = datetime.date(2011, 2, 15) >>> get_month_day_range(date) (datetime.date(2011, 2, 1), datetime.date(2011, 2, 28)) """ first_day = date.replace(day = 1) last_day = date.replace(day = calendar.monthrange(date.year, date.month)[1]) return first_day, last_day
610ff43b0e637afba780119c76181c6ff033a299
705,481
def leia_dinheiro(msg): """ -> Recebe um valor digitado pelo usuário e verifica se é um valor númerico válido :param msg: Mensagem a ser mostrada ao usuário :return: Retorno o valor digitado pelo usuário caso seja válido """ while True: num = input(msg).strip().replace(',', '.') # Substitui as vírgulas por pontos if num.replace('.', '').isdigit(): # 'Exluí' os pontos num = float(num) break else: print(f'\033[1;31mERRO! \"{num}\" não é um preço válido.\033[m') return num
aa8e21243009af1fde6d6c5e9cb611acff36369e
705,482
def _octet_bits(o): """ Get the bits of an octet. :param o: The octets. :return: The bits as a list in LSB-to-MSB order. :rtype: list """ if not isinstance(o, int): raise TypeError("o should be an int") if not (0 <= o <= 255): raise ValueError("o should be between 0 and 255 inclusive") bits = [0] * 8 for i in range(8): if 1 == o & 1: bits[i] = 1 o = o >> 1 return bits
f472a2ab65702e59439b7693260abf040d4e7742
705,483
import resource def __limit_less(lim1, lim2): """Helper function for comparing two rlimit values, handling "unlimited" correctly. Params: lim1 (integer): first rlimit lim2 (integer): second rlimit Returns: true if lim1 <= lim2 """ if lim2 == resource.RLIM_INFINITY: return True if lim1 == resource.RLIM_INFINITY: return False return lim1 <= lim2
8c8faebd4cc1eecfbd8e0a73b16b2bee0a433572
705,484
from typing import List def getswarmlocations() -> List[str]: """ checks if the provided location is a location where a swarm can happen. :param location: the provided location :return: boolean if location is in the list of locations where swarms can happen, """ swarmlocationlist = open("commands/data/swarmlocations.csv").read().split(",") swarmlocationlist = [location.lower() for location in swarmlocationlist] swarmlocationlist = list(set(swarmlocationlist)) # remove duplicates just in case swarmlocationlist.sort() return swarmlocationlist
3a7db13c8a0176a4cc9a9dda38b23041639917a9
705,485
import os import json import logging def get_project_id(): """ Gets the project ID. It defaults to the project declared in the enviorment variable PROJECT but if it can't find it there it will try looking for a service account and take the project ID from there Args: Returns: """ service_acc_address = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None) if service_acc_address: service_acc = open(service_acc_address, 'r').read() service_acc_project_id = json.loads(service_acc)['project_id'] else: service_acc_project_id = None project_id = os.environ.get('PROJECT', service_acc_project_id) if service_acc_project_id != None and project_id != service_acc_project_id: logging.critical("Warning the project in ENV VAR PROJECT is \ not the same as your service account project") return project_id
ce06f65be2fa70898a71f10d4f848afdf944da48
705,486
def val_err_str(val: float, err: float) -> str: """ Get a float representation of a value/error pair and create a string representation 12.345 +/- 1.23 --> 12.3(12) 12.345 +/- 0.012 -> 12.345(12 12345 +/- 654 ---> 12340(650) :param val: float representing the value :param err: float representing the error in the value :return: a string representation of the value/error pair """ err_sig_figs = 2 # future upgrade path is to allow user to set this dps = 2 - err_sig_figs if err < 10: while err < 10.: err *= 10 dps += 1 err = round(err, 0) else: # err > 10 while err > 100.: err /= 10 dps -= 1 err = round(err, 0) * 10 ** (-dps) val = round(val, dps) return f"{val:.{max(0, dps)}f}({err:.0f})"
5b759ff8e6996704edb7f6b68f6cb7e307593c9e
705,487
def port_name(name, nr=0): """Map node output number to name.""" return name + ":" + str(nr)
a82e0b9940fa6b7f11f1a11fbd8a1b9b1a57c07b
705,488
def _stringify(values): """internal method: used to convert values to a string suitable for an xml attribute""" if type(values) == list or type(values) == tuple: return " ".join([str(x) for x in values]) elif type(values) == type(True): return "1" if values else "0" else: return str(values)
a8f3c290ef949a254ca5dca9744ff3f4c602c4d2
705,489
def xml_safe(s): """Returns the XML-safe version of a given string. """ new_string = s.replace("&", "&amp;").replace("<", "&lt;") new_string = new_string.replace("\r", "").replace("\n", "<br/>") return new_string
166bf2b78441b4f22bf3a89f8be56efb756fe72f
705,490
from typing import List from typing import Dict def group_by_author(commits: List[dict]) -> Dict[str, List[dict]]: """Group GitHub commit objects by their author.""" grouped: Dict[str, List[dict]] = {} for commit in commits: name = commit["author"]["login"] if name not in grouped: grouped[name] = [] grouped[name].append(commit) return grouped
239c523317dc8876017d4b61bc2ad8887444085e
705,491
def non_empty_string(value): """Must be a non-empty non-blank string""" return bool(value) and bool(value.strip())
707d6c39a52b1ec0e317d156e74fef78170739d9
705,493
import pwd def get_uid_from_user(user): """Return UID from user name Looks up UID matching the supplied user name; returns None if no matching name can be found. NB returned UID will be an integer. """ try: return pwd.getpwnam(str(user)).pw_uid except KeyError: return None
dd4f6f839f985b923199b438216c567e1e84327d
705,494
from pathlib import Path def get_base_folder(): """Return the base folder of ProfileQC.""" return Path(__file__).parent
e0a49bbbe018333dd107466a5178c5579327edc1
705,495
def u16le_list_to_byte_list(data): """! @brief Convert a halfword array into a byte array""" byteData = [] for h in data: byteData.extend([h & 0xff, (h >> 8) & 0xff]) return byteData
6e4dd1fe69a24f135d0dfa38d5d0ba109ad24b9e
705,496
def process_IBM_strings(string): """ Format all the IBM string in the same way, creating a single string of lowercase characters :param string: :return: """ parts = string.split() result = str(parts[0].lower()) for part in parts[1:]: result += " " + str(part.lower()) return result
72216b014a18c72d4dec9ec54f24f13de0d46583
705,497
from typing import Dict from typing import Any def get_context() -> Dict[str, Any]: """ Retrieve the current Server Context. Returns: - Dict[str, Any]: the current context """ ctx = _context.get() # type: ignore if ctx is not None: assert isinstance(ctx, dict) return ctx.copy() else: return {}
dad971abb645fa7c194db5cd9ce45e7c38166f31
705,498
import pandas as pd def kiinteisto_alueiksi(kiinteisto): """ kiinteist: kiinteisto/property register An artificial property / constituency division will be made for the regionalization of postal codes. A brute-force distribution is used, where the relative number of residential properties in the constituency is divided into postcodes. Generally, constituencies are smaller than postcode areas. The paid property data (kiinteistorekisteri) also includes the number of apartments in the property data. In this way, the division would be more accurate. In various inspections, the division seemed competent. This returns the estimate of shares Returns: kiintosuus """ #new kiint and kiintpnrot -dataframes, with muncipality, constituency area and postcode kiint=kiinteisto[kiinteisto['Käyttötarkoitus']==1].reset_index().groupby(['Kuntanumero','Alue','Postinumero'],as_index=False ).count() kiint=kiint[['Alue','Postinumero','index']] kiintpnrot=kiint.reset_index().groupby(['Postinumero', 'Alue'],as_index=False ).sum()[['Alue','Postinumero','index']] kiintalueet=kiint.reset_index().groupby(['Alue'],as_index=False).sum()[['Alue','index']] #join them by constituency area kiintosuus= pd.merge(kiintpnrot, kiintalueet, how='inner', on='Alue', left_index=False, right_index=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None) #brute-force calculation of share of areas based on the amount of properties kiintosuus['Osuus']=kiintosuus['index_x']/kiintosuus['index_y'] kiintosuus=kiintosuus[['Alue','Postinumero','Osuus']] return(kiintosuus)
fee095ccc4cb82b735c2d314a96ab20bf0790a9a
705,499
def optional_observation_map(env, inner_obs): """ If the env implements the `observation` function (i.e. if one of the wrappers is an ObservationWrapper), call that `observation` transformation on the observation produced by the inner environment """ if hasattr(env, 'observation'): return env.observation(inner_obs) else: return inner_obs
b1b57e74e498e520df80a310f95d1c79799a517d
705,500
def RunMetadataLabels(run_metadata): """Returns all labels in run_metadata.""" labels = [] for dev_stats in run_metadata.step_stats.dev_stats: for node_stats in dev_stats.node_stats: labels.append(node_stats.timeline_label) return labels
277745263c75c4c6037f8b7a26b9421699bec3a5
705,501
import torch def step(x, b): """ The step function for ideal quantization function in test stage. """ y = torch.zeros_like(x) mask = torch.gt(x - b, 0.0) y[mask] = 1.0 return y
bac5dd8cbaa4da41219f03a85e086dd3bdd1e554
705,502
import pickle import time def dump_ensure_space(file, value, fun_err=None): """ Only dump value if space enough in disk. If is not enough space, then it retry until have space Note: this method is less efficient and slowly than simple dump >>> with open("test_ensure_space.tmp", "wb") as f: ... dump_ensure_space(f, "test_value") :param file: file where dump :param value: value to dump :param fun_err: event previous to sleep if error, with params: times_waiting: times retrying until now time_to_retry: time to next retry in seconds err: msg error :return: None """ if fun_err is None: def fun_err_default(_, __, ___): return None fun_err = fun_err_default times_waiting = 0 retry = True while retry: try: pickle.dump(value, file, pickle.HIGHEST_PROTOCOL) retry = False except IOError as err: if "No space left on device" in str(err): retry = True times_waiting += 1 time_to_retry = 0.1 * times_waiting if time_to_retry > 3600: time_to_retry = 3600 fun_err(times_waiting, time_to_retry, err) time.sleep(time_to_retry) else: raise
622ed232a3e747e55004ab28225418fc3c6570ef
705,503
def generate_output_file_name(input_file_name): """ Generates an output file name from input file name. :type input_file_name: str """ assert isinstance(input_file_name, str) output_file_name = input_file_name + ".gen.ipynb" return output_file_name
e638d676048e062711ca1a09d88a12d76fb9239d
705,505
import string def getApproximateArialStringWidth(st: str) -> float: """Calculate rough width of a word in a variable width font. By https://stackoverflow.com/users/234270/speedplane Args: st (str): The string you need a width for Returns: float: The rough width in picas To make sure that the names will fit in the space, at the given font size etc., if the space can fit 13 M chars across, then getApproximateArialStringWidth("M"*13) gives 10.53, so the space is 10 picas wide, and we can exclude wider names. """ size = 0 # in milinches for s in st: if s in "lij|' ": size += 37 elif s in "![]fI.,:;/\\t": size += 50 elif s in '`-(){}r"': size += 60 elif s in "*^zcsJkvxy": size += 85 elif s in "aebdhnopqug#$L+<>=?_~FZT" + string.digits: size += 95 elif s in "BSPEAKVXY&UwNRCHD": size += 112 elif s in "QGOMm%W@": size += 135 else: size += 50 return size * 6 / 1000.0
d37cc49e4ffd347ddace5de1d420bc8c3c37b615
705,507
def prepare_text(input_string): """Converts an input string into a list containing strings. Parameters ---------- input_string : string String to convert to a list of string. Returns ------- out_list : list List containing the input string. """ # Converting a string into lower cases temp_string = input_string.lower() # Spliting up the characters of the string in lower cases out_list = temp_string.split() return out_list
ddf060728127380ef3ec689f7ee8104b9c12ebea
705,508
def fill_bin_content(ax, sens, energy_bin, gb, tb): """ Parameters -------- Returns -------- """ for i in range(0,gb): for j in range(0,tb): theta2 = 0.005+0.005/2+((0.05-0.005)/tb)*j gammaness = 0.1/2+(1/gb)*i text = ax.text(theta2, gammaness, "%.2f %%" % sens[energy_bin][i][j], ha="center", va="center", color="w", size=8) return ax
aa2121697429d330da3ec18f08f36248e3f57152
705,509
import os def get_unique_filepath(stem): """NOT thread-safe! return stems or stem# where # is the smallest positive integer for which the path does not exist. useful for temp dirs where the client code wants an obvious ordering. """ fp = stem if os.path.exists(stem): n = 1 fp = stem + str(n) while os.path.exists(fp): n += 1 fp = stem + str(n) return fp
29f853bcb1df4bd2b989948ad2b7b8985bff83e9
705,510
import ast from typing import Set def all_statements(tree: ast.AST) -> Set[ast.stmt]: """ Return the set of all ast.stmt nodes in a tree. """ return {node for node in ast.walk(tree) if isinstance(node, ast.stmt)}
9f7cc367f01ec3bb90869879e79eb9cbe6636820
705,511
def _fi18n(text): """Used to fake translations to ensure pygettext retrieves all the strings we want to translate. Outside of the aforementioned use case, this is exceptionally useless, since this just returns the given input string without any modifications made. """ return text
e505b58f4ff1e64c07b4496f69bee8b6e86b5129
705,512
def is_required_version(version, specified_version): """Check to see if there's a hard requirement for version number provided in the Pipfile. """ # Certain packages may be defined with multiple values. if isinstance(specified_version, dict): specified_version = specified_version.get("version", "") if specified_version.startswith("=="): return version.strip() == specified_version.split("==")[1].strip() return True
6c8bfe0fe77f7a7d14e1ca2dd8005a8d82d0998c
705,513
def key(i): """ Helper method to generate a meaningful key. """ return 'key{}'.format(i)
04658ebead9581ff97406111c9b85e361ee49ff8
705,514
def mark(symbol): """Wrap the symbol's result in a tuple where the first element is `symbol`. Used where the information about "which branch of the grammar was used" must be propagated upwards for further checks. """ def mark_action(x): return (symbol, x) return mark_action << symbol
3180c96d4d2a68df2909f23a544879918016fb37
705,515
def validate_search_inputs(row_id, search_column, search_value): """Function that determines if row_id, search_column and search_value are defined correctly""" return_value = { "valid": True, "msg": None } a_search_var_defined = True if search_column or search_value else False if row_id and a_search_var_defined: return_value["valid"] = False return_value["msg"] = "Only 'row_id' or the 'search_column and search_value' pair can be defined" elif not row_id and not a_search_var_defined: return_value["valid"] = False return_value["msg"] = "You must define either 'row_id' or the 'search_column and search_value' pair" return return_value
ce85ce1b973beab6b0476dfc05edc594fac8c420
705,517
import os def avi_common_argument_spec(): """ Returns common arguments for all Avi modules :return: dict """ credentials_spec = dict( controller=dict(default=os.environ.get('AVI_CONTROLLER', '')), username=dict(default=os.environ.get('AVI_USERNAME', '')), password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True), api_version=dict(default='16.4.4', type='str'), tenant=dict(default='admin'), tenant_uuid=dict(default='', type='str'), port=dict(type='int'), token=dict(default='', type='str', no_log=True), timeout=dict(default=300, type='int'), session_id=dict(default='', type='str', no_log=True), csrftoken=dict(default='', type='str', no_log=True) ) return dict( controller=dict(default=os.environ.get('AVI_CONTROLLER', '')), username=dict(default=os.environ.get('AVI_USERNAME', '')), password=dict(default=os.environ.get('AVI_PASSWORD', ''), no_log=True), tenant=dict(default='admin'), tenant_uuid=dict(default=''), api_version=dict(default='16.4.4', type='str'), avi_credentials=dict(default=None, type='dict', options=credentials_spec), api_context=dict(type='dict'), avi_disable_session_cache_as_fact=dict(default=False, type='bool'))
dfac1913e3b5af435ce8e9e8b53bf2d0d00aad11
705,518
import re def find_first_in_register_stop(seq): """ Find first stop codon on lowercase seq that starts at an index that is divisible by three """ # Compile regexes for stop codons regex_stop = re.compile('(taa|tag|tga)') # Stop codon iterator stop_iterator = regex_stop.finditer(seq) # Find next stop codon that is in register for stop in stop_iterator: if stop.end() % 3 == 0: return stop.end() # Return -1 if we failed to find a stop codon return -1
56741828c42ecf0cb96044d03c8d1b6bc4994e01
705,519
def find_parents(candidate, branches): """Find parents genre of a given genre, ordered from the closest to the further parent. """ for branch in branches: try: idx = branch.index(candidate.lower()) return list(reversed(branch[:idx + 1])) except ValueError: continue return [candidate]
17934d9ee1d3098cc3d08f38d9e3c387df6b7c19
705,520
def max_sum_naive(arr: list, length: int, index: int, prev_max: int) -> int: """ We can either take or leave the current number depending on previous max number """ if index >= length: return 0 cur_max = 0 if arr[index] > prev_max: cur_max = arr[index] + max_sum_naive(arr, length, index + 1, arr[index]) return max(cur_max, max_sum_naive(arr, length, index + 1, prev_max))
644b5cb294e78a10add253cad96d3c3e2c3d67d7
705,521
def _table_row(line): """ Return all elements of a data line. Return all elements of a data line. Simply splits it. Parameters ---------- line: string A stats line. Returns ------- list of strings A list of strings, containing the data on the line, split at white space. """ return line.split()
dc5d76db80059b0da257b45f12513d75c2765d55
705,522
def range2d(range_x, range_y): """Creates a 2D range.""" range_x = list(range_x) return [ (x, y) for y in range_y for x in range_x ]
ca33799a277f0f72e99836e81a7ffc98b191fc37
705,523
import os def filename_fixture(): """The name of the cities csv file for testing""" return os.path.join('tests', 'fixtures', 'cities.csv')
d96b38d2ab616de9297526712521207c656593ea
705,524
import json def is_json(payload): """Check if a payload is valid JSON.""" try: json.loads(payload) except (TypeError, ValueError): return False else: return True
a02499ffd0a890fa4697f1002c5deb0fc894cac0
705,525
def _seconds_to_hours(time): """Convert time: seconds to hours""" return time / 3600.0
d6abd9144882587833601e64d5c2226446f1bbdc
705,526
import operator def process_fuel(context): """ Reformats Fuel consumed """ fuel = { 0: 'Petrol', 1: 'Desiel' } data = [] totals = [] for index, type in enumerate(context['Fuel']): litresSold = operator.sub(type.closing_meter, type.opening_meter) total = operator.mul(litresSold, type.unit_price) totals.append(total) data.append([ {'type': fuel[index], 'opening_meter': type.opening_meter, 'closing_meter': type.closing_meter, 'unit_price': type.unit_price, 'litresSold': litresSold, 'total': total}]) return { 'data': data, 'total': totals }
fea31cb306417cf1dfcef8859ed2585c2903849b
705,527
import click def generate_list_display(object, attrs): """Generate a display string for an object based on some attributes. Args: object: An object which has specific attributes. attrs: An interable of strings containing attributes to get from the above object. Returns: A string containing a list display of the object with respect to the passed in attributes. """ return "\n".join( click.style(attr, bold=True) + ": %s" % getattr(object, attr) for attr in attrs )
17c876261bede0c38d91b4bd3e7b0048616f8cbf
705,528
def discover_fields(layout): """Discover all fields defined in a layout object This is used to avoid defining the field list in two places -- the layout object is instead inspected to determine the list """ fields = [] try: comps = list(layout) except TypeError: return fields for comp in comps: if isinstance(comp, str): fields.append(comp) else: fields.extend(discover_fields(comp)) return fields
359a6ed1d23e1c56a699895e8c15a93bce353750
705,529
def collocations_table_exist(con): """Return True if the collocations table exist""" query = con.query( "select 1 from information_schema.tables " "where table_name='collocations'") return bool(list(query.dictresult()))
9ffa05f698056d9fab6bb9651427b6bc64f414ea
705,530
from bs4 import BeautifulSoup import re def ftp_profile(publish_settings): """Takes PublishSettings, extracts ftp user, password, and host""" soup = BeautifulSoup(publish_settings, 'html.parser') profiles = soup.find_all('publishprofile') ftp_profile = [profile for profile in profiles if profile['publishmethod'] == 'FTP'][0] matches = re.search('ftp://(.+)/site/wwwroot', ftp_profile['publishurl']) host = matches.group(1) if matches else '' username = ftp_profile['username'].replace("\\$", "%5C%24") password = ftp_profile['userpwd'] return host, username, password, ftp_profile['publishurl']
003218e6d58d01afcbf062a14e68294d0033b8af
705,531
def _temp_dict_file_name(): """Name of the expected python dictionary as a json file from run_external_python(). .. versionadded:: 9.1 """ return '__shared_dictionary__'
94f33562d775b041387b477d838a5efadfe38f00
705,532
def get_list_from_file(filename): """ Returns a list of containers stored in a file (one on each line) """ with open(filename) as fh: return [_ for _ in fh.read().splitlines() if _]
8d9a271aa4adea81f62bf74bb1d3c308870f1baf
705,533
def accumulator(init, update): """ Generic accumulator function. .. code-block:: python # Simplest Form >>> a = 'this' + ' ' >>> b = 'that' >>> c = functools.reduce(accumulator, a, b) >>> c 'this that' # The type of the initial value determines output type. >>> a = 5 >>> b = Hello >>> c = functools.reduce(accumulator, a, b) >>> c 10 :param init: Initial Value :param update: Value to accumulate :return: Combined Values """ return ( init + len(update) if isinstance(init, int) else init + update )
6a4962932c8dba4d5c01aa8936787b1332a6323f
705,534
def himmelblau(individual): """The Himmelblau's function is multimodal with 4 defined minimums in :math:`[-6, 6]^2`. .. list-table:: :widths: 10 50 :stub-columns: 1 * - Type - minimization * - Range - :math:`x_i \in [-6, 6]` * - Global optima - :math:`\mathbf{x}_1 = (3.0, 2.0)`, :math:`f(\mathbf{x}_1) = 0`\n :math:`\mathbf{x}_2 = (-2.805118, 3.131312)`, :math:`f(\mathbf{x}_2) = 0`\n :math:`\mathbf{x}_3 = (-3.779310, -3.283186)`, :math:`f(\mathbf{x}_3) = 0`\n :math:`\mathbf{x}_4 = (3.584428, -1.848126)`, :math:`f(\mathbf{x}_4) = 0`\n * - Function - :math:`f(x_1, x_2) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2` .. plot:: code/benchmarks/himmelblau.py :width: 67 % """ return (individual[0] * individual[0] + individual[1] - 11)**2 + \ (individual[0] + individual[1] * individual[1] - 7)**2,
2fcf348e01f33a54d847dfc7f9a225ed043e36a4
705,536
def _recursive_pairwise_outer_join( dataframes_to_merge, on, lsuffix, rsuffix, npartitions, shuffle ): """ Schedule the merging of a list of dataframes in a pairwise method. This is a recursive function that results in a much more efficient scheduling of merges than a simple loop from: [A] [B] [C] [D] -> [AB] [C] [D] -> [ABC] [D] -> [ABCD] to: [A] [B] [C] [D] -> [AB] [CD] -> [ABCD] Note that either way, n-1 merges are still required, but using a pairwise reduction it can be completed in parallel. :param dataframes_to_merge: A list of Dask dataframes to be merged together on their index :return: A single Dask Dataframe, comprised of the pairwise-merges of all provided dataframes """ number_of_dataframes_to_merge = len(dataframes_to_merge) merge_options = { "on": on, "lsuffix": lsuffix, "rsuffix": rsuffix, "npartitions": npartitions, "shuffle": shuffle, } # Base case 1: just return the provided dataframe and merge with `left` if number_of_dataframes_to_merge == 1: return dataframes_to_merge[0] # Base case 2: merge the two provided dataframe to be merged with `left` if number_of_dataframes_to_merge == 2: merged_ddf = dataframes_to_merge[0].join( dataframes_to_merge[1], how="outer", **merge_options ) return merged_ddf # Recursive case: split the list of dfs into two ~even sizes and continue down else: middle_index = number_of_dataframes_to_merge // 2 merged_ddf = _recursive_pairwise_outer_join( [ _recursive_pairwise_outer_join( dataframes_to_merge[:middle_index], **merge_options ), _recursive_pairwise_outer_join( dataframes_to_merge[middle_index:], **merge_options ), ], **merge_options, ) return merged_ddf
7d65d01cce313ed0517fd685045978dee6d7cb08
705,537
def signum(x): """cal signum :param x: :return: """ if x > 0: return 1.0 if x < 0: return -1.0 if x == 0: return 0
0f8e67eb8fa3267ec341d17440270ce68ca8b446
705,538
import re def is_date(word): """ is_date() Purpose: Checks if word is a date. @param word. A string. @return the matched object if it is a date, otherwise None. >>> is_date('2015-03-1') is not None True >>> is_date('2014-02-19') is not None True >>> is_date('03-27-1995') is not None True >>> is_date('201') is not None False >>> is_date('0') is not None False """ regex = r'^(\d\d\d\d-\d\d-\d|\d\d?-\d\d?-\d\d\d\d?|\d\d\d\d-\d\d?-\d\d?)$' return re.search(regex, word)
004bef4ac50f3ebd859cb35086c6e820f4c6e231
705,539
def forestvar(z_in): """ Return intrinsic variance of LyaF variance for weighting. This estimate is roughly from McDonald et al 2006 Parameters ---------- z_in : float or ndarray Returns ------- fvar : float or ndarray Variance """ fvar = 0.065 * ((1.+z_in)/(1.+2.25))**3.8 # Return return fvar
d3523510ee29b0cc12138da93001635f5ffe6a11
705,540
def adjust_age_groups(age_labels): """ for each pair of cols to aggregate, takes the first number of the first element, and the last number for the last element for instance: ["0-4",'5-10'] -> ['0-10'] """ i=0 new_age_labels=[] label="" for element in age_labels: if i%2==0: label+=element.split('-')[0] i+=1 elif i%2==1: label=label+'-'+element.split('-')[-1] new_age_labels.append(label) label="" i+=1 #making the last agegroup based on the first number + new_age_labels[-1]= new_age_labels[-1].split("-")[0]+"+" return(new_age_labels)
521a2f6779ae8fa3f3a53801e0f935844245cffc
705,541
def exploration_function(q_space, x_space, index_, action_space_n, k): """returns exploration value""" x_value = float('-inf') for i in range(action_space_n): x_value = max(x_value, q_space[index_][i] + k/(1 + x_space[index_][i])) #print("q={}, q+x_bonus={}".format(max(q_space[index_]), x_value)) return x_value
9c6f1aa2943436d75c9a7735b4efa2c44c8a08d1
705,542
import sqlite3 def user_has_registered(userID): """Checks if a particular user has been registered in database""" database = sqlite3.connect("users.db") cursor = database.cursor() cursor.execute(f"SELECT user_id FROM profile WHERE user_id = {userID}") result = cursor.fetchone() if result is None: return False return True
e98f83b272a52828638f276575596489bebe1fcf
705,543
def key_gen(**kwargs): """ Key generator for linux. Determines key based on parameters supplied in kwargs. Keyword Parameters: @keyword geounit1: portable_id of a geounit @keyword geounit2: portable_id of a geounit @keyword region: region abbreviation """ if 'geounit1' in kwargs and 'geounit2' in kwargs: return 'adj:geounit1:%s:geounit2:%s' % (kwargs['geounit1'], kwargs['geounit2']) if 'region' in kwargs: return 'adj:region:%s' % kwargs['region']
02426fbf49e7a4d85094896546980828e2c6bc20
705,544
import math def lafferty_wyatt_point(lowedge, highedge, expo_slope): """calculates the l-w point for a bin where the true distribution is an exponential characterized by expo_slope. """ rhs = (math.exp(expo_slope*highedge) - math.exp(expo_slope*lowedge)) rhs /= expo_slope rhs /= (highedge - lowedge) return math.log(rhs) / expo_slope
326acddc1926f1a142f34e8cff9109554ec850d3
705,546
def check_list(data): """check if data is a list, if it is not a list, it will return a list as [data]""" if type(data) is not list: return [data] else: return data
00ae7a857c3f969ca435928edf98ed5bb36c1c34
705,548
import base64 def encode_base64(filename): """encode image to string. Args filename: image file path. Returns: a bites string. """ with open(filename, "rb")as f: bs64 = base64.b64encode(f.read()).decode() return bs64
9eab28ec1cb9619411ea28a9640a2fa8b02e61a3
705,549
def Doxyfile_emitter(target, source, env): """ Modify the target and source lists to use the defaults if nothing else has been specified. Dependencies on external HTML documentation references are also appended to the source list. """ doxyfile_template = env.File(env['DOXYFILE_FILE']) source.insert(0, doxyfile_template) return target, source
41928a8c837d7eb00d6b4a4a2f407e2d75217620
705,550
def _clean_annotated_text(text): """Cleans text from the format that it was presented to annotators in the S.M.A.R.T data annotation tool. Splits the title from the abstract text and strips any trailing whitespace. Returns: title (str): The project title text (str): The project abstract """ text = text.split('=====') title = text[1].strip() abstract = text[-1].strip() return title, abstract
356cdf893225c41d303e83f1cf2f3418544c76ae
705,551
from typing import Dict def swim_for_a_day(life_counts: Dict[int, int]): """Process the shoal, decrement the life_counts: any that get to -1 have procreated in the last day, their offspring are created with 8 day life_counts, whilst they get reset to 6 days… and are added to the count of any fish that moved down from 7 days. """ new_counts = {d - 1: p for d, p in life_counts.items()} if -1 in new_counts.keys(): new_counts[8] = new_counts[-1] new_counts[6] = new_counts[-1] + new_counts.get(6, 0) del new_counts[-1] return new_counts
3d5d3f48942a5a1f4eba3100e903df592d933e23
705,552
def page_not_found (error): """ Generic Error Message """ return "Unable to find Distill."
ed764c2c2814487c33f9945b17b85a234ae45645
705,553
import itertools import re def _apply_constraints(password_hash, size, is_non_alphanumeric): """ Fiddle with the password a bit after hashing it so that it will get through most website filters. We require one upper and lower case, one digit, and we look at the user's password to determine if there should be at least one alphanumeric or not. """ starting_size = 0 if size < 4 else size - 4 result = password_hash[:starting_size] extras = itertools.chain((ord(ch) for ch in password_hash[starting_size:]), itertools.repeat(0)) extra_chars = (chr(ch) for ch in extras) def next_between(start, end): interval = ord(end) - ord(start) + 1 offset = next(extras) % interval return chr(ord(start) + offset) chars_ranges = (("A", "Z"), ("a", "z"), ("0", "9")) for first, last in chars_ranges: any_of_chars = re.compile("[{}-{}]".format(first, last)) if any_of_chars.search(result): result += next(extra_chars) else: result += next_between(first, last) non_word = re.compile(r"\W") if non_word.search(result) and is_non_alphanumeric: result += next(extra_chars) else: result += "+" while non_word.search(result) and not is_non_alphanumeric: result = non_word.sub(next_between("A", "Z"), result, 1) flip_place = next(extras) % len(result) result = result[flip_place:] + result[:flip_place] return result.strip("\x00")
8757c3197052fb1606a95dfa417a13ba833cdb43
705,555
import argparse def get_arguments(): """Parsing the arguments""" parser = argparse.ArgumentParser(description="", usage=''' ______________________________________________________________________ BiG-MAP map: maps the reads to the predicted MGCs ______________________________________________________________________ Generic command: python3 BiG-MAP.map.py {-I1 [mate-1s] -I2 [mate-2s] | -U [samples]} -O [outdir] -F [family] [Options*] Maps the metagenomic/metatranscriptomic reads to the fasta reference file and outputs RPKM read counts in .csv and BIOM format. Use BiG-MAP_process conda environment. Data inputs: either paired or unpaired -I1 Provide the mate 1s of the paired metagenomic and/or metatranscriptomic samples here. These samples should be provided in fastq-format (.fastq, .fq, .fq.gz). Also, this can be a space separated list from the command line. -I2 Provide the mate 2s of the paired metagenomic and/or metatranscriptomic samples here. These samples should be provided in fastq-format (.fastq, .fq, .fq.gz). Also, this can be a space separated list from the command line. -U Provide the unpaired metagenomic/metatranscriptomic samples here. These samples should be provided in fastq-format (.fastq, .fq, .fq.gz). Also, this can be a space separated list from the command line. File inputs: either separated or pickled: -F Directory with all the output files from the family module -P Input files are in pickled format (named: BiG-MAP.[name].pickle). The format of the pickled file: fasta file, GCF json file, and optionally a bed file and/or BiG-SCAPE GCF dictionary. Obligatory arguments: -O Name of the output directory for where the output files are going to be written. Default = current folder (.) Options: -b Outputs the resulting read counts in biom format (v1.0) as well. This will be useful to analyze the results in BiG-MAP.analyse. Therefore, it is important to include the metadata here as well: this metagenomical data should be in the same format as the example metadata -f Input files are in fasta format (.fna, .fa, .fasta): True/False. Default = False. -s Bowtie2 setting: END-TO-END mode: very-fast, fast, sensitive, very-sensitive LOCAL mode: very-fast-local, fast-local, sensitive-local, very-sensitive-local. Default = fast -a Ouput read average values across GCFs instead of summed counts: True/False. Default = False. -th Number of used threads in the bowtie2 mapping step. Default = 6 ______________________________________________________________________ ''') parser.add_argument("-O", "--outdir", help=argparse.SUPPRESS, required=True) parser.add_argument("-I1","--fastq1", nargs='+',help=argparse.SUPPRESS, required=False) parser.add_argument("-I2","--fastq2",nargs='+',help=argparse.SUPPRESS, required = False) parser.add_argument("-U","--U_fastq",nargs='+',help=argparse.SUPPRESS, required = False) parser.add_argument("-F", "--family", help=argparse.SUPPRESS, required=False) parser.add_argument("-P", "--pickle_file", help=argparse.SUPPRESS, required=False) parser.add_argument( "-b", "--biom_output", help=argparse.SUPPRESS, type=str, required = False) parser.add_argument( "-f", "--fasta", help=argparse.SUPPRESS, type=str, required = False, default=False) parser.add_argument( "-a", "--average", help=argparse.SUPPRESS, type=str, required = False, default=False) parser.add_argument( "-s", "--bowtie2_setting", help=argparse.SUPPRESS, type=str, required = False, default="fast") parser.add_argument( "-th", "--threads", help=argparse.SUPPRESS, type=int, required = False, default=6) return(parser, parser.parse_args())
79173e468d640457f7374ebd5aa23d7da9f7684c
705,556
def replace_ensembl_ids(expression_df, gene_id_mapping): """ Replaces ensembl gene ids with hgnc symbols Arguments --------- expression_df: df gene expression data matrix (sample x gene) gene_id_mapping: df Dataframe mapping ensembl ids (used in DE_stats_file) to hgnc symbols, used in Crow et. al. NOTE: ----- This function is deprecated due to large memory usage: when `expression_df` is a large dataframe, manipulating it inside the momory becomes very slow (and sometimes even impossible) due to large memory consumption. The same functionality has been refactored into `get_renamed_columns()` and `map_recount2_data()` functions in this module. THIS FUNCTION IS KEPT AS A REFERENCE ONLY. """ # Some columns are duplicates, for example: # (ENSG00000223773.7, ENSG00000223773) --> CD99P1 # (ENSG00000124334.17, ENSG00000124334) --> IL9R # We keep the first occurence of duplicated ensembl ids updated_mapping = gene_id_mapping.loc[ ~gene_id_mapping.index.duplicated(keep="first") ] # Same ensembl ids are mapped to different gene symbol twice (CCL3L1, CCL3L3) # ENSG00000187510.7 ENSG00000187510 C12orf74 # ENSG00000187510.7 ENSG00000187510 PLEKHG7 # Manually mapping them based on what is found on ensembl site manual_mapping = { "ENSG00000187510.7": "PLEKHG7", "ENSG00000230417.11": "LINC00595", "ENSG00000255374.3": "TAS2R45", "ENSG00000276085.1": "CCL3L1", } # Apply manual mappings to `updated_mapping` for ensembl_id, gene_symbol in manual_mapping.items(): updated_mapping.loc[ensembl_id].hgnc_symbol = gene_symbol # Remove paralogs. # Some ensembl ids are paralogs (for example, "geneA" and "geneA_PAR_Y"). # They map to the same hgnc symbol. Homologous sequences are paralogous # if they were separated by a gene duplication event: if a gene in an # organism is duplicated to occupy two different positions in the same # genome, then the two copies are paralogous. updated_expression_df = expression_df.iloc[ :, ~expression_df.columns.str.contains("PAR_Y") ] # Replace ensembl ids with gene symbol updated_expression_df.columns = updated_expression_df.columns.map( updated_mapping["hgnc_symbol"] ) # Remove columns whose mapped ensembl id is an empty string updated_expression_df = updated_expression_df.iloc[ :, updated_expression_df.columns != "" ] # Remove columns whose mapped ensembl id is `NaN` updated_expression_df = updated_expression_df.iloc[ :, updated_expression_df.columns.notnull() ] return updated_expression_df
db21341c337481f897da47e482a6667b3e4b9c8e
705,557
def ssl_allowed(fn): """ Decorator - marks a route as allowing ssl, but not requiring it. It can be served over http and https. NOTE: This must go BEFORE the route! """ fn.ssl_allowed = True return fn
d8a22ed69a356189bca69a08516fd0a1187e4866
705,558
import argparse def get_parser(): """ return a parser """ parser = argparse.ArgumentParser("cli") parser.add_argument('registryimage', help="registry/image:tag - tag is optional") # Username and password come last to make them optional later parser.add_argument('username', help='username') parser.add_argument('password', help='password') return parser
e7bf6233cce887bf3fff1a1360ddef2134fe3eb6
705,559
def format_date(date): """ Format date for creation of Twitter URL and Facebook API. Format a datetime object to a string in the form of '%Y-%m-%d', e.g. '2018-01-21' Parameters ---------- date : datetime date to be formated Returns ------- str date in string representation """ return date.strftime('%Y-%m-%d')
d76e81613d2c3b06623cadb30d706c537555ad51
705,560
import base64 def basic_token(username, password): """Generate the Authorization token for Resource Orchestrator (SO-ub container). Args: username (str): the SO-ub username password (str): the SO-ub password Returns: str: the Basic token """ if not isinstance(username, str): raise TypeError("The given type of username is `{}`. Expected str.".format(type(username))) if not isinstance(password, str): raise TypeError("The given type of password is `{}`. Expected str.".format(type(password))) credentials = str.encode(username + ":" + password) return bytes.decode(base64.b64encode(credentials))
054fccad28d1c18a34d630a664742f77e15ee4fe
705,561
import csv def read_alias(alias_csv_path): """Reads alias.csv at the specified path. Then returns a dict mapping from alias to monster id. """ with open(alias_csv_path) as alias_csv: return { alias: int(monster_id) for alias, monster_id in csv.reader(alias_csv)}
3a3818b81a916b4dd18ca7cab5fbcbe1b4050d03
705,562
def get_nodes_str (name, nodes): """ helper function to dump nodes as a list of names """ nodes_str = " %s nodes = %d\n" % (name, len(nodes)) nodes_str += " " + ", ".join(map(lambda x: x._name, nodes)) + "\n" return nodes_str
cafb9fd0aa202c2172aede97eabbf829dc9a1b53
705,563
def fibonacci_thrid(n): """计算斐波那契数列3""" return n if n < 2 else fibonacci_thrid(n - 2) + fibonacci_thrid(n - 1)
b98251e9bd4ec507933338738c2b65faea8700b2
705,564
def is_collision_ray_cell(map_obj, cell): """ cell : cell r, c index from left bottom. """ idx = cell[0] + map_obj.mapdim[0] * cell[1] if (cell[0] < 0) or (cell[1] < 0) or (cell[0] >= map_obj.mapdim[0]) or (cell[1] >= map_obj.mapdim[1]): return True #elif (map_obj.map is not None) and map_obj.map[cell[0], cell[1]] == 1: elif (map_obj.map is not None) and map_obj.map_linear[idx] == 1: return True else: return False
6eaf38710843c4c4e82e8411db9f1e1d97fb1710
705,566
def transform_resource_name(ctx, param, value): """Callback to transform resource_name into title case.""" if value is not None: return value.title() return value
b708c3318b731d652a7acad216093c96bc18fe2e
705,567
def extrema (im): """ Return the minimum and maximum of an image. Arguments: im image whose extrema are to be found """ return [im.min(), im.max()]
303d9c50cca91c3e73341d7b40195aceb02aef7a
705,568
def _create_statement(name, colnames): """Create table if not exists foo (...). Note: Every type is numeric. Table name and column names are all lowercased """ # every col is numeric, this may not be so elegant but simple to handle. # If you want to change this, Think again schema = ', '.join([col + ' ' + 'numeric' for col in colnames]) return "create table if not exists %s (%s)" % (name, schema)
53c7fc9486274645c5dc7dea2257fda3cf496f9e
705,569
def binary_or(a: int, b: int): """ Take in 2 integers, convert them to binary, and return a binary number that is the result of a binary or operation on the integers provided. >>> binary_or(25, 32) '0b111001' >>> binary_or(37, 50) '0b110111' >>> binary_or(21, 30) '0b11111' >>> binary_or(58, 73) '0b1111011' >>> binary_or(0, 255) '0b11111111' >>> binary_or(0, 256) '0b100000000' >>> binary_or(0, -1) Traceback (most recent call last): ... ValueError: the value of both input must be positive >>> binary_or(0, 1.1) Traceback (most recent call last): ... TypeError: 'float' object cannot be interpreted as an integer >>> binary_or("0", "1") Traceback (most recent call last): ... TypeError: '<' not supported between instances of 'str' and 'int' """ if a < 0 or b < 0: raise ValueError("the value of both input must be positive") a_binary = str(bin(a))[2:] # remove the leading "0b" b_binary = str(bin(b))[2:] max_len = max(len(a_binary), len(b_binary)) return "0b" + "".join( str(int("1" in (char_a, char_b))) for char_a, char_b in zip(a_binary.zfill(max_len), b_binary.zfill(max_len)) )
514fa4a02b778dfa91c4097bb8916522339cda33
705,570
def tempConvert(temp, unit): """ Convert Fahrenheit to Celsius """ if unit == 'F': celsius = (temp-32) * 5/9 return celsius else: return temp
224c7b5bd72ff5d209bfaf2b10d94cc24ac8681d
705,571
import fnmatch def allowed_file(filename, allowed_exts): """ The validator for blueimp that limits which file extensions are allowed. Args: filename (str): a filepath allowed_exts (str): set of allowed file extensions Returns: bool: True if extension is an allowed file type, False otherwise """ allowed_extensions = ["*."+str(e) for e in list(allowed_exts)] for ext in allowed_extensions: if fnmatch.fnmatch(filename.lower(), ext): return True return False
af23f6017ffa76e5402800a77cf794a2c1bce330
705,572
import os def return_file_size(file_path): """This is taking the final size of the pre-processed file, and this number will be used in the rendering process. """ size = os.path.getsize(file_path) return size
dbda1f7cbdbff81588636a52d72be3ab0a0269e9
705,573
import os def get_file_size(file): """Get file size. Args: file (str): Input file. Returns: int: Return size of the file in bytes. """ return os.stat(file).st_size
2838f88bd36445b5fe5bda5e4637080116f24cd9
705,574